2 * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
3 * Copyright (C) 2013 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com>
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published by
8 * the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along with
16 * this program. If not, see <http://www.gnu.org/licenses/>.
19 #include <linux/sort.h>
20 #include <drm/drm_mode.h>
21 #include <drm/drm_crtc.h>
22 #include <drm/drm_crtc_helper.h>
23 #include <drm/drm_flip_work.h>
27 #define CURSOR_WIDTH 64
28 #define CURSOR_HEIGHT 64
35 spinlock_t lm_lock
; /* protect REG_MDP5_LM_* registers */
37 /* if there is a pending flip, these will be non-null: */
38 struct drm_pending_vblank_event
*event
;
40 /* Bits have been flushed at the last commit,
41 * used to decide if a vsync has happened since last commit.
45 #define PENDING_CURSOR 0x1
46 #define PENDING_FLIP 0x2
49 /* for unref'ing cursor bo's after scanout completes: */
50 struct drm_flip_work unref_cursor_work
;
52 struct mdp_irq vblank
;
54 struct mdp_irq pp_done
;
56 struct completion pp_completion
;
59 /* protect REG_MDP5_LM_CURSOR* registers and cursor scanout_bo*/
62 /* current cursor being scanned out: */
63 struct drm_gem_object
*scanout_bo
;
64 uint32_t width
, height
;
68 #define to_mdp5_crtc(x) container_of(x, struct mdp5_crtc, base)
70 static struct mdp5_kms
*get_kms(struct drm_crtc
*crtc
)
72 struct msm_drm_private
*priv
= crtc
->dev
->dev_private
;
73 return to_mdp5_kms(to_mdp_kms(priv
->kms
));
76 static void request_pending(struct drm_crtc
*crtc
, uint32_t pending
)
78 struct mdp5_crtc
*mdp5_crtc
= to_mdp5_crtc(crtc
);
80 atomic_or(pending
, &mdp5_crtc
->pending
);
81 mdp_irq_register(&get_kms(crtc
)->base
, &mdp5_crtc
->vblank
);
84 static void request_pp_done_pending(struct drm_crtc
*crtc
)
86 struct mdp5_crtc
*mdp5_crtc
= to_mdp5_crtc(crtc
);
87 reinit_completion(&mdp5_crtc
->pp_completion
);
90 static u32
crtc_flush(struct drm_crtc
*crtc
, u32 flush_mask
)
92 struct mdp5_crtc_state
*mdp5_cstate
= to_mdp5_crtc_state(crtc
->state
);
93 struct mdp5_ctl
*ctl
= mdp5_cstate
->ctl
;
94 struct mdp5_pipeline
*pipeline
= &mdp5_cstate
->pipeline
;
96 DBG("%s: flush=%08x", crtc
->name
, flush_mask
);
97 return mdp5_ctl_commit(ctl
, pipeline
, flush_mask
);
101 * flush updates, to make sure hw is updated to new scanout fb,
102 * so that we can safely queue unref to current fb (ie. next
103 * vblank we know hw is done w/ previous scanout_fb).
105 static u32
crtc_flush_all(struct drm_crtc
*crtc
)
107 struct mdp5_crtc_state
*mdp5_cstate
= to_mdp5_crtc_state(crtc
->state
);
108 struct mdp5_hw_mixer
*mixer
, *r_mixer
;
109 struct drm_plane
*plane
;
110 uint32_t flush_mask
= 0;
112 /* this should not happen: */
113 if (WARN_ON(!mdp5_cstate
->ctl
))
116 drm_atomic_crtc_for_each_plane(plane
, crtc
) {
117 flush_mask
|= mdp5_plane_get_flush(plane
);
120 mixer
= mdp5_cstate
->pipeline
.mixer
;
121 flush_mask
|= mdp_ctl_flush_mask_lm(mixer
->lm
);
123 r_mixer
= mdp5_cstate
->pipeline
.r_mixer
;
125 flush_mask
|= mdp_ctl_flush_mask_lm(r_mixer
->lm
);
127 return crtc_flush(crtc
, flush_mask
);
130 /* if file!=NULL, this is preclose potential cancel-flip path */
131 static void complete_flip(struct drm_crtc
*crtc
, struct drm_file
*file
)
133 struct mdp5_crtc_state
*mdp5_cstate
= to_mdp5_crtc_state(crtc
->state
);
134 struct mdp5_pipeline
*pipeline
= &mdp5_cstate
->pipeline
;
135 struct mdp5_crtc
*mdp5_crtc
= to_mdp5_crtc(crtc
);
136 struct mdp5_ctl
*ctl
= mdp5_cstate
->ctl
;
137 struct drm_device
*dev
= crtc
->dev
;
138 struct drm_pending_vblank_event
*event
;
141 spin_lock_irqsave(&dev
->event_lock
, flags
);
142 event
= mdp5_crtc
->event
;
144 mdp5_crtc
->event
= NULL
;
145 DBG("%s: send event: %p", crtc
->name
, event
);
146 drm_crtc_send_vblank_event(crtc
, event
);
148 spin_unlock_irqrestore(&dev
->event_lock
, flags
);
150 if (ctl
&& !crtc
->state
->enable
) {
151 /* set STAGE_UNUSED for all layers */
152 mdp5_ctl_blend(ctl
, pipeline
, NULL
, NULL
, 0, 0);
153 /* XXX: What to do here? */
154 /* mdp5_crtc->ctl = NULL; */
158 static void unref_cursor_worker(struct drm_flip_work
*work
, void *val
)
160 struct mdp5_crtc
*mdp5_crtc
=
161 container_of(work
, struct mdp5_crtc
, unref_cursor_work
);
162 struct mdp5_kms
*mdp5_kms
= get_kms(&mdp5_crtc
->base
);
163 struct msm_kms
*kms
= &mdp5_kms
->base
.base
;
165 msm_gem_put_iova(val
, kms
->aspace
);
166 drm_gem_object_unreference_unlocked(val
);
169 static void mdp5_crtc_destroy(struct drm_crtc
*crtc
)
171 struct mdp5_crtc
*mdp5_crtc
= to_mdp5_crtc(crtc
);
173 drm_crtc_cleanup(crtc
);
174 drm_flip_work_cleanup(&mdp5_crtc
->unref_cursor_work
);
179 static inline u32
mdp5_lm_use_fg_alpha_mask(enum mdp_mixer_stage_id stage
)
182 case STAGE0
: return MDP5_LM_BLEND_COLOR_OUT_STAGE0_FG_ALPHA
;
183 case STAGE1
: return MDP5_LM_BLEND_COLOR_OUT_STAGE1_FG_ALPHA
;
184 case STAGE2
: return MDP5_LM_BLEND_COLOR_OUT_STAGE2_FG_ALPHA
;
185 case STAGE3
: return MDP5_LM_BLEND_COLOR_OUT_STAGE3_FG_ALPHA
;
186 case STAGE4
: return MDP5_LM_BLEND_COLOR_OUT_STAGE4_FG_ALPHA
;
187 case STAGE5
: return MDP5_LM_BLEND_COLOR_OUT_STAGE5_FG_ALPHA
;
188 case STAGE6
: return MDP5_LM_BLEND_COLOR_OUT_STAGE6_FG_ALPHA
;
195 * left/right pipe offsets for the stage array used in blend_setup()
201 * blend_setup() - blend all the planes of a CRTC
203 * If no base layer is available, border will be enabled as the base layer.
204 * Otherwise all layers will be blended based on their stage calculated
205 * in mdp5_crtc_atomic_check.
207 static void blend_setup(struct drm_crtc
*crtc
)
209 struct mdp5_crtc
*mdp5_crtc
= to_mdp5_crtc(crtc
);
210 struct mdp5_crtc_state
*mdp5_cstate
= to_mdp5_crtc_state(crtc
->state
);
211 struct mdp5_pipeline
*pipeline
= &mdp5_cstate
->pipeline
;
212 struct mdp5_kms
*mdp5_kms
= get_kms(crtc
);
213 struct drm_plane
*plane
;
214 const struct mdp5_cfg_hw
*hw_cfg
;
215 struct mdp5_plane_state
*pstate
, *pstates
[STAGE_MAX
+ 1] = {NULL
};
216 const struct mdp_format
*format
;
217 struct mdp5_hw_mixer
*mixer
= pipeline
->mixer
;
218 uint32_t lm
= mixer
->lm
;
219 struct mdp5_hw_mixer
*r_mixer
= pipeline
->r_mixer
;
220 uint32_t r_lm
= r_mixer
? r_mixer
->lm
: 0;
221 struct mdp5_ctl
*ctl
= mdp5_cstate
->ctl
;
222 uint32_t blend_op
, fg_alpha
, bg_alpha
, ctl_blend_flags
= 0;
224 enum mdp5_pipe stage
[STAGE_MAX
+ 1][MAX_PIPE_STAGE
] = { SSPP_NONE
};
225 enum mdp5_pipe r_stage
[STAGE_MAX
+ 1][MAX_PIPE_STAGE
] = { SSPP_NONE
};
226 int i
, plane_cnt
= 0;
227 bool bg_alpha_enabled
= false;
228 u32 mixer_op_mode
= 0;
230 #define blender(stage) ((stage) - STAGE0)
232 hw_cfg
= mdp5_cfg_get_hw_config(mdp5_kms
->cfg
);
234 spin_lock_irqsave(&mdp5_crtc
->lm_lock
, flags
);
236 /* ctl could be released already when we are shutting down: */
237 /* XXX: Can this happen now? */
241 /* Collect all plane information */
242 drm_atomic_crtc_for_each_plane(plane
, crtc
) {
243 enum mdp5_pipe right_pipe
;
245 pstate
= to_mdp5_plane_state(plane
->state
);
246 pstates
[pstate
->stage
] = pstate
;
247 stage
[pstate
->stage
][PIPE_LEFT
] = mdp5_plane_pipe(plane
);
249 * if we have a right mixer, stage the same pipe as we
250 * have on the left mixer
253 r_stage
[pstate
->stage
][PIPE_LEFT
] =
254 mdp5_plane_pipe(plane
);
256 * if we have a right pipe (i.e, the plane comprises of 2
257 * hwpipes, then stage the right pipe on the right side of both
260 right_pipe
= mdp5_plane_right_pipe(plane
);
262 stage
[pstate
->stage
][PIPE_RIGHT
] = right_pipe
;
263 r_stage
[pstate
->stage
][PIPE_RIGHT
] = right_pipe
;
269 if (!pstates
[STAGE_BASE
]) {
270 ctl_blend_flags
|= MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT
;
271 DBG("Border Color is enabled");
272 } else if (plane_cnt
) {
273 format
= to_mdp_format(msm_framebuffer_format(pstates
[STAGE_BASE
]->base
.fb
));
275 if (format
->alpha_enable
)
276 bg_alpha_enabled
= true;
279 /* The reset for blending */
280 for (i
= STAGE0
; i
<= STAGE_MAX
; i
++) {
284 format
= to_mdp_format(
285 msm_framebuffer_format(pstates
[i
]->base
.fb
));
286 plane
= pstates
[i
]->base
.plane
;
287 blend_op
= MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST
) |
288 MDP5_LM_BLEND_OP_MODE_BG_ALPHA(BG_CONST
);
289 fg_alpha
= pstates
[i
]->alpha
;
290 bg_alpha
= 0xFF - pstates
[i
]->alpha
;
292 if (!format
->alpha_enable
&& bg_alpha_enabled
)
295 mixer_op_mode
|= mdp5_lm_use_fg_alpha_mask(i
);
297 DBG("Stage %d fg_alpha %x bg_alpha %x", i
, fg_alpha
, bg_alpha
);
299 if (format
->alpha_enable
&& pstates
[i
]->premultiplied
) {
300 blend_op
= MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST
) |
301 MDP5_LM_BLEND_OP_MODE_BG_ALPHA(FG_PIXEL
);
302 if (fg_alpha
!= 0xff) {
305 MDP5_LM_BLEND_OP_MODE_BG_MOD_ALPHA
|
306 MDP5_LM_BLEND_OP_MODE_BG_INV_MOD_ALPHA
;
308 blend_op
|= MDP5_LM_BLEND_OP_MODE_BG_INV_ALPHA
;
310 } else if (format
->alpha_enable
) {
311 blend_op
= MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_PIXEL
) |
312 MDP5_LM_BLEND_OP_MODE_BG_ALPHA(FG_PIXEL
);
313 if (fg_alpha
!= 0xff) {
316 MDP5_LM_BLEND_OP_MODE_FG_MOD_ALPHA
|
317 MDP5_LM_BLEND_OP_MODE_FG_INV_MOD_ALPHA
|
318 MDP5_LM_BLEND_OP_MODE_BG_MOD_ALPHA
|
319 MDP5_LM_BLEND_OP_MODE_BG_INV_MOD_ALPHA
;
321 blend_op
|= MDP5_LM_BLEND_OP_MODE_BG_INV_ALPHA
;
325 mdp5_write(mdp5_kms
, REG_MDP5_LM_BLEND_OP_MODE(lm
,
326 blender(i
)), blend_op
);
327 mdp5_write(mdp5_kms
, REG_MDP5_LM_BLEND_FG_ALPHA(lm
,
328 blender(i
)), fg_alpha
);
329 mdp5_write(mdp5_kms
, REG_MDP5_LM_BLEND_BG_ALPHA(lm
,
330 blender(i
)), bg_alpha
);
332 mdp5_write(mdp5_kms
, REG_MDP5_LM_BLEND_OP_MODE(r_lm
,
333 blender(i
)), blend_op
);
334 mdp5_write(mdp5_kms
, REG_MDP5_LM_BLEND_FG_ALPHA(r_lm
,
335 blender(i
)), fg_alpha
);
336 mdp5_write(mdp5_kms
, REG_MDP5_LM_BLEND_BG_ALPHA(r_lm
,
337 blender(i
)), bg_alpha
);
341 val
= mdp5_read(mdp5_kms
, REG_MDP5_LM_BLEND_COLOR_OUT(lm
));
342 mdp5_write(mdp5_kms
, REG_MDP5_LM_BLEND_COLOR_OUT(lm
),
343 val
| mixer_op_mode
);
345 val
= mdp5_read(mdp5_kms
, REG_MDP5_LM_BLEND_COLOR_OUT(r_lm
));
346 mdp5_write(mdp5_kms
, REG_MDP5_LM_BLEND_COLOR_OUT(r_lm
),
347 val
| mixer_op_mode
);
350 mdp5_ctl_blend(ctl
, pipeline
, stage
, r_stage
, plane_cnt
,
353 spin_unlock_irqrestore(&mdp5_crtc
->lm_lock
, flags
);
356 static void mdp5_crtc_mode_set_nofb(struct drm_crtc
*crtc
)
358 struct mdp5_crtc
*mdp5_crtc
= to_mdp5_crtc(crtc
);
359 struct mdp5_crtc_state
*mdp5_cstate
= to_mdp5_crtc_state(crtc
->state
);
360 struct mdp5_kms
*mdp5_kms
= get_kms(crtc
);
361 struct mdp5_hw_mixer
*mixer
= mdp5_cstate
->pipeline
.mixer
;
362 struct mdp5_hw_mixer
*r_mixer
= mdp5_cstate
->pipeline
.r_mixer
;
363 uint32_t lm
= mixer
->lm
;
364 u32 mixer_width
, val
;
366 struct drm_display_mode
*mode
;
368 if (WARN_ON(!crtc
->state
))
371 mode
= &crtc
->state
->adjusted_mode
;
373 DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
374 crtc
->name
, mode
->base
.id
, mode
->name
,
375 mode
->vrefresh
, mode
->clock
,
376 mode
->hdisplay
, mode
->hsync_start
,
377 mode
->hsync_end
, mode
->htotal
,
378 mode
->vdisplay
, mode
->vsync_start
,
379 mode
->vsync_end
, mode
->vtotal
,
380 mode
->type
, mode
->flags
);
382 mixer_width
= mode
->hdisplay
;
386 spin_lock_irqsave(&mdp5_crtc
->lm_lock
, flags
);
387 mdp5_write(mdp5_kms
, REG_MDP5_LM_OUT_SIZE(lm
),
388 MDP5_LM_OUT_SIZE_WIDTH(mixer_width
) |
389 MDP5_LM_OUT_SIZE_HEIGHT(mode
->vdisplay
));
391 /* Assign mixer to LEFT side in source split mode */
392 val
= mdp5_read(mdp5_kms
, REG_MDP5_LM_BLEND_COLOR_OUT(lm
));
393 val
&= ~MDP5_LM_BLEND_COLOR_OUT_SPLIT_LEFT_RIGHT
;
394 mdp5_write(mdp5_kms
, REG_MDP5_LM_BLEND_COLOR_OUT(lm
), val
);
397 u32 r_lm
= r_mixer
->lm
;
399 mdp5_write(mdp5_kms
, REG_MDP5_LM_OUT_SIZE(r_lm
),
400 MDP5_LM_OUT_SIZE_WIDTH(mixer_width
) |
401 MDP5_LM_OUT_SIZE_HEIGHT(mode
->vdisplay
));
403 /* Assign mixer to RIGHT side in source split mode */
404 val
= mdp5_read(mdp5_kms
, REG_MDP5_LM_BLEND_COLOR_OUT(r_lm
));
405 val
|= MDP5_LM_BLEND_COLOR_OUT_SPLIT_LEFT_RIGHT
;
406 mdp5_write(mdp5_kms
, REG_MDP5_LM_BLEND_COLOR_OUT(r_lm
), val
);
409 spin_unlock_irqrestore(&mdp5_crtc
->lm_lock
, flags
);
412 static void mdp5_crtc_disable(struct drm_crtc
*crtc
)
414 struct mdp5_crtc
*mdp5_crtc
= to_mdp5_crtc(crtc
);
415 struct mdp5_crtc_state
*mdp5_cstate
= to_mdp5_crtc_state(crtc
->state
);
416 struct mdp5_kms
*mdp5_kms
= get_kms(crtc
);
418 DBG("%s", crtc
->name
);
420 if (WARN_ON(!mdp5_crtc
->enabled
))
423 if (mdp5_cstate
->cmd_mode
)
424 mdp_irq_unregister(&mdp5_kms
->base
, &mdp5_crtc
->pp_done
);
426 mdp_irq_unregister(&mdp5_kms
->base
, &mdp5_crtc
->err
);
427 mdp5_disable(mdp5_kms
);
429 mdp5_crtc
->enabled
= false;
432 static void mdp5_crtc_enable(struct drm_crtc
*crtc
)
434 struct mdp5_crtc
*mdp5_crtc
= to_mdp5_crtc(crtc
);
435 struct mdp5_crtc_state
*mdp5_cstate
= to_mdp5_crtc_state(crtc
->state
);
436 struct mdp5_kms
*mdp5_kms
= get_kms(crtc
);
438 DBG("%s", crtc
->name
);
440 if (WARN_ON(mdp5_crtc
->enabled
))
443 mdp5_enable(mdp5_kms
);
444 mdp_irq_register(&mdp5_kms
->base
, &mdp5_crtc
->err
);
446 if (mdp5_cstate
->cmd_mode
)
447 mdp_irq_register(&mdp5_kms
->base
, &mdp5_crtc
->pp_done
);
449 mdp5_crtc
->enabled
= true;
452 int mdp5_crtc_setup_pipeline(struct drm_crtc
*crtc
,
453 struct drm_crtc_state
*new_crtc_state
,
454 bool need_right_mixer
)
456 struct mdp5_crtc_state
*mdp5_cstate
=
457 to_mdp5_crtc_state(new_crtc_state
);
458 struct mdp5_pipeline
*pipeline
= &mdp5_cstate
->pipeline
;
459 struct mdp5_interface
*intf
;
460 bool new_mixer
= false;
462 new_mixer
= !pipeline
->mixer
;
464 if ((need_right_mixer
&& !pipeline
->r_mixer
) ||
465 (!need_right_mixer
&& pipeline
->r_mixer
))
469 struct mdp5_hw_mixer
*old_mixer
= pipeline
->mixer
;
470 struct mdp5_hw_mixer
*old_r_mixer
= pipeline
->r_mixer
;
474 caps
= MDP_LM_CAP_DISPLAY
;
475 if (need_right_mixer
)
476 caps
|= MDP_LM_CAP_PAIR
;
478 ret
= mdp5_mixer_assign(new_crtc_state
->state
, crtc
, caps
,
479 &pipeline
->mixer
, need_right_mixer
?
480 &pipeline
->r_mixer
: NULL
);
484 mdp5_mixer_release(new_crtc_state
->state
, old_mixer
);
486 mdp5_mixer_release(new_crtc_state
->state
, old_r_mixer
);
487 if (!need_right_mixer
)
488 pipeline
->r_mixer
= NULL
;
493 * these should have been already set up in the encoder's atomic
494 * check (called by drm_atomic_helper_check_modeset)
496 intf
= pipeline
->intf
;
498 mdp5_cstate
->err_irqmask
= intf2err(intf
->num
);
499 mdp5_cstate
->vblank_irqmask
= intf2vblank(pipeline
->mixer
, intf
);
501 if ((intf
->type
== INTF_DSI
) &&
502 (intf
->mode
== MDP5_INTF_DSI_MODE_COMMAND
)) {
503 mdp5_cstate
->pp_done_irqmask
= lm2ppdone(pipeline
->mixer
);
504 mdp5_cstate
->cmd_mode
= true;
506 mdp5_cstate
->pp_done_irqmask
= 0;
507 mdp5_cstate
->cmd_mode
= false;
514 struct drm_plane
*plane
;
515 struct mdp5_plane_state
*state
;
518 static int pstate_cmp(const void *a
, const void *b
)
520 struct plane_state
*pa
= (struct plane_state
*)a
;
521 struct plane_state
*pb
= (struct plane_state
*)b
;
522 return pa
->state
->zpos
- pb
->state
->zpos
;
525 /* is there a helper for this? */
526 static bool is_fullscreen(struct drm_crtc_state
*cstate
,
527 struct drm_plane_state
*pstate
)
529 return (pstate
->crtc_x
<= 0) && (pstate
->crtc_y
<= 0) &&
530 ((pstate
->crtc_x
+ pstate
->crtc_w
) >= cstate
->mode
.hdisplay
) &&
531 ((pstate
->crtc_y
+ pstate
->crtc_h
) >= cstate
->mode
.vdisplay
);
534 enum mdp_mixer_stage_id
get_start_stage(struct drm_crtc
*crtc
,
535 struct drm_crtc_state
*new_crtc_state
,
536 struct drm_plane_state
*bpstate
)
538 struct mdp5_crtc_state
*mdp5_cstate
=
539 to_mdp5_crtc_state(new_crtc_state
);
542 * if we're in source split mode, it's mandatory to have
543 * border out on the base stage
545 if (mdp5_cstate
->pipeline
.r_mixer
)
548 /* if the bottom-most layer is not fullscreen, we need to use
549 * it for solid-color:
551 if (!is_fullscreen(new_crtc_state
, bpstate
))
557 static int mdp5_crtc_atomic_check(struct drm_crtc
*crtc
,
558 struct drm_crtc_state
*state
)
560 struct mdp5_kms
*mdp5_kms
= get_kms(crtc
);
561 struct drm_plane
*plane
;
562 struct drm_device
*dev
= crtc
->dev
;
563 struct plane_state pstates
[STAGE_MAX
+ 1];
564 const struct mdp5_cfg_hw
*hw_cfg
;
565 const struct drm_plane_state
*pstate
;
566 const struct drm_display_mode
*mode
= &state
->adjusted_mode
;
567 bool cursor_plane
= false;
568 bool need_right_mixer
= false;
571 enum mdp_mixer_stage_id start
;
573 DBG("%s: check", crtc
->name
);
575 drm_atomic_crtc_state_for_each_plane_state(plane
, pstate
, state
) {
576 pstates
[cnt
].plane
= plane
;
577 pstates
[cnt
].state
= to_mdp5_plane_state(pstate
);
580 * if any plane on this crtc uses 2 hwpipes, then we need
581 * the crtc to have a right hwmixer.
583 if (pstates
[cnt
].state
->r_hwpipe
)
584 need_right_mixer
= true;
587 if (plane
->type
== DRM_PLANE_TYPE_CURSOR
)
591 /* bail out early if there aren't any planes */
595 hw_cfg
= mdp5_cfg_get_hw_config(mdp5_kms
->cfg
);
598 * we need a right hwmixer if the mode's width is greater than a single
601 if (mode
->hdisplay
> hw_cfg
->lm
.max_width
)
602 need_right_mixer
= true;
604 ret
= mdp5_crtc_setup_pipeline(crtc
, state
, need_right_mixer
);
606 dev_err(dev
->dev
, "couldn't assign mixers %d\n", ret
);
610 /* assign a stage based on sorted zpos property */
611 sort(pstates
, cnt
, sizeof(pstates
[0]), pstate_cmp
, NULL
);
613 /* trigger a warning if cursor isn't the highest zorder */
614 WARN_ON(cursor_plane
&&
615 (pstates
[cnt
- 1].plane
->type
!= DRM_PLANE_TYPE_CURSOR
));
617 start
= get_start_stage(crtc
, state
, &pstates
[0].state
->base
);
619 /* verify that there are not too many planes attached to crtc
620 * and that we don't have conflicting mixer stages:
622 if ((cnt
+ start
- 1) >= hw_cfg
->lm
.nb_stages
) {
623 dev_err(dev
->dev
, "too many planes! cnt=%d, start stage=%d\n",
628 for (i
= 0; i
< cnt
; i
++) {
629 if (cursor_plane
&& (i
== (cnt
- 1)))
630 pstates
[i
].state
->stage
= hw_cfg
->lm
.nb_stages
;
632 pstates
[i
].state
->stage
= start
+ i
;
633 DBG("%s: assign pipe %s on stage=%d", crtc
->name
,
634 pstates
[i
].plane
->name
,
635 pstates
[i
].state
->stage
);
641 static void mdp5_crtc_atomic_begin(struct drm_crtc
*crtc
,
642 struct drm_crtc_state
*old_crtc_state
)
644 DBG("%s: begin", crtc
->name
);
647 static void mdp5_crtc_atomic_flush(struct drm_crtc
*crtc
,
648 struct drm_crtc_state
*old_crtc_state
)
650 struct mdp5_crtc
*mdp5_crtc
= to_mdp5_crtc(crtc
);
651 struct mdp5_crtc_state
*mdp5_cstate
= to_mdp5_crtc_state(crtc
->state
);
652 struct drm_device
*dev
= crtc
->dev
;
655 DBG("%s: event: %p", crtc
->name
, crtc
->state
->event
);
657 WARN_ON(mdp5_crtc
->event
);
659 spin_lock_irqsave(&dev
->event_lock
, flags
);
660 mdp5_crtc
->event
= crtc
->state
->event
;
661 spin_unlock_irqrestore(&dev
->event_lock
, flags
);
664 * If no CTL has been allocated in mdp5_crtc_atomic_check(),
665 * it means we are trying to flush a CRTC whose state is disabled:
666 * nothing else needs to be done.
668 /* XXX: Can this happen now ? */
669 if (unlikely(!mdp5_cstate
->ctl
))
674 /* PP_DONE irq is only used by command mode for now.
675 * It is better to request pending before FLUSH and START trigger
676 * to make sure no pp_done irq missed.
677 * This is safe because no pp_done will happen before SW trigger
680 if (mdp5_cstate
->cmd_mode
)
681 request_pp_done_pending(crtc
);
683 mdp5_crtc
->flushed_mask
= crtc_flush_all(crtc
);
685 /* XXX are we leaking out state here? */
686 mdp5_crtc
->vblank
.irqmask
= mdp5_cstate
->vblank_irqmask
;
687 mdp5_crtc
->err
.irqmask
= mdp5_cstate
->err_irqmask
;
688 mdp5_crtc
->pp_done
.irqmask
= mdp5_cstate
->pp_done_irqmask
;
690 request_pending(crtc
, PENDING_FLIP
);
693 static void get_roi(struct drm_crtc
*crtc
, uint32_t *roi_w
, uint32_t *roi_h
)
695 struct mdp5_crtc
*mdp5_crtc
= to_mdp5_crtc(crtc
);
696 uint32_t xres
= crtc
->mode
.hdisplay
;
697 uint32_t yres
= crtc
->mode
.vdisplay
;
700 * Cursor Region Of Interest (ROI) is a plane read from cursor
701 * buffer to render. The ROI region is determined by the visibility of
702 * the cursor point. In the default Cursor image the cursor point will
703 * be at the top left of the cursor image, unless it is specified
704 * otherwise using hotspot feature.
706 * If the cursor point reaches the right (xres - x < cursor.width) or
707 * bottom (yres - y < cursor.height) boundary of the screen, then ROI
708 * width and ROI height need to be evaluated to crop the cursor image
710 * (xres-x) will be new cursor width when x > (xres - cursor.width)
711 * (yres-y) will be new cursor height when y > (yres - cursor.height)
713 *roi_w
= min(mdp5_crtc
->cursor
.width
, xres
-
714 mdp5_crtc
->cursor
.x
);
715 *roi_h
= min(mdp5_crtc
->cursor
.height
, yres
-
716 mdp5_crtc
->cursor
.y
);
719 static int mdp5_crtc_cursor_set(struct drm_crtc
*crtc
,
720 struct drm_file
*file
, uint32_t handle
,
721 uint32_t width
, uint32_t height
)
723 struct mdp5_crtc
*mdp5_crtc
= to_mdp5_crtc(crtc
);
724 struct mdp5_crtc_state
*mdp5_cstate
= to_mdp5_crtc_state(crtc
->state
);
725 struct mdp5_pipeline
*pipeline
= &mdp5_cstate
->pipeline
;
726 struct drm_device
*dev
= crtc
->dev
;
727 struct mdp5_kms
*mdp5_kms
= get_kms(crtc
);
728 struct msm_kms
*kms
= &mdp5_kms
->base
.base
;
729 struct drm_gem_object
*cursor_bo
, *old_bo
= NULL
;
730 uint32_t blendcfg
, stride
;
731 uint64_t cursor_addr
;
732 struct mdp5_ctl
*ctl
;
734 enum mdp5_cursor_alpha cur_alpha
= CURSOR_ALPHA_PER_PIXEL
;
735 uint32_t flush_mask
= mdp_ctl_flush_mask_cursor(0);
736 uint32_t roi_w
, roi_h
;
737 bool cursor_enable
= true;
740 if ((width
> CURSOR_WIDTH
) || (height
> CURSOR_HEIGHT
)) {
741 dev_err(dev
->dev
, "bad cursor size: %dx%d\n", width
, height
);
745 ctl
= mdp5_cstate
->ctl
;
749 /* don't support LM cursors when we we have source split enabled */
750 if (mdp5_cstate
->pipeline
.r_mixer
)
755 cursor_enable
= false;
759 cursor_bo
= drm_gem_object_lookup(file
, handle
);
763 ret
= msm_gem_get_iova(cursor_bo
, kms
->aspace
, &cursor_addr
);
767 lm
= mdp5_cstate
->pipeline
.mixer
->lm
;
768 stride
= width
* drm_format_plane_cpp(DRM_FORMAT_ARGB8888
, 0);
770 spin_lock_irqsave(&mdp5_crtc
->cursor
.lock
, flags
);
771 old_bo
= mdp5_crtc
->cursor
.scanout_bo
;
773 mdp5_crtc
->cursor
.scanout_bo
= cursor_bo
;
774 mdp5_crtc
->cursor
.width
= width
;
775 mdp5_crtc
->cursor
.height
= height
;
777 get_roi(crtc
, &roi_w
, &roi_h
);
779 mdp5_write(mdp5_kms
, REG_MDP5_LM_CURSOR_STRIDE(lm
), stride
);
780 mdp5_write(mdp5_kms
, REG_MDP5_LM_CURSOR_FORMAT(lm
),
781 MDP5_LM_CURSOR_FORMAT_FORMAT(CURSOR_FMT_ARGB8888
));
782 mdp5_write(mdp5_kms
, REG_MDP5_LM_CURSOR_IMG_SIZE(lm
),
783 MDP5_LM_CURSOR_IMG_SIZE_SRC_H(height
) |
784 MDP5_LM_CURSOR_IMG_SIZE_SRC_W(width
));
785 mdp5_write(mdp5_kms
, REG_MDP5_LM_CURSOR_SIZE(lm
),
786 MDP5_LM_CURSOR_SIZE_ROI_H(roi_h
) |
787 MDP5_LM_CURSOR_SIZE_ROI_W(roi_w
));
788 mdp5_write(mdp5_kms
, REG_MDP5_LM_CURSOR_BASE_ADDR(lm
), cursor_addr
);
790 blendcfg
= MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_EN
;
791 blendcfg
|= MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_ALPHA_SEL(cur_alpha
);
792 mdp5_write(mdp5_kms
, REG_MDP5_LM_CURSOR_BLEND_CONFIG(lm
), blendcfg
);
794 spin_unlock_irqrestore(&mdp5_crtc
->cursor
.lock
, flags
);
797 ret
= mdp5_ctl_set_cursor(ctl
, pipeline
, 0, cursor_enable
);
799 dev_err(dev
->dev
, "failed to %sable cursor: %d\n",
800 cursor_enable
? "en" : "dis", ret
);
804 crtc_flush(crtc
, flush_mask
);
808 drm_flip_work_queue(&mdp5_crtc
->unref_cursor_work
, old_bo
);
809 /* enable vblank to complete cursor work: */
810 request_pending(crtc
, PENDING_CURSOR
);
815 static int mdp5_crtc_cursor_move(struct drm_crtc
*crtc
, int x
, int y
)
817 struct mdp5_kms
*mdp5_kms
= get_kms(crtc
);
818 struct mdp5_crtc
*mdp5_crtc
= to_mdp5_crtc(crtc
);
819 struct mdp5_crtc_state
*mdp5_cstate
= to_mdp5_crtc_state(crtc
->state
);
820 uint32_t lm
= mdp5_cstate
->pipeline
.mixer
->lm
;
821 uint32_t flush_mask
= mdp_ctl_flush_mask_cursor(0);
826 /* don't support LM cursors when we we have source split enabled */
827 if (mdp5_cstate
->pipeline
.r_mixer
)
830 /* In case the CRTC is disabled, just drop the cursor update */
831 if (unlikely(!crtc
->state
->enable
))
834 mdp5_crtc
->cursor
.x
= x
= max(x
, 0);
835 mdp5_crtc
->cursor
.y
= y
= max(y
, 0);
837 get_roi(crtc
, &roi_w
, &roi_h
);
839 spin_lock_irqsave(&mdp5_crtc
->cursor
.lock
, flags
);
840 mdp5_write(mdp5_kms
, REG_MDP5_LM_CURSOR_SIZE(lm
),
841 MDP5_LM_CURSOR_SIZE_ROI_H(roi_h
) |
842 MDP5_LM_CURSOR_SIZE_ROI_W(roi_w
));
843 mdp5_write(mdp5_kms
, REG_MDP5_LM_CURSOR_START_XY(lm
),
844 MDP5_LM_CURSOR_START_XY_Y_START(y
) |
845 MDP5_LM_CURSOR_START_XY_X_START(x
));
846 spin_unlock_irqrestore(&mdp5_crtc
->cursor
.lock
, flags
);
848 crtc_flush(crtc
, flush_mask
);
854 mdp5_crtc_atomic_print_state(struct drm_printer
*p
,
855 const struct drm_crtc_state
*state
)
857 struct mdp5_crtc_state
*mdp5_cstate
= to_mdp5_crtc_state(state
);
858 struct mdp5_pipeline
*pipeline
= &mdp5_cstate
->pipeline
;
859 struct mdp5_kms
*mdp5_kms
= get_kms(state
->crtc
);
861 if (WARN_ON(!pipeline
))
864 drm_printf(p
, "\thwmixer=%s\n", pipeline
->mixer
?
865 pipeline
->mixer
->name
: "(null)");
867 if (mdp5_kms
->caps
& MDP_CAP_SRC_SPLIT
)
868 drm_printf(p
, "\tright hwmixer=%s\n", pipeline
->r_mixer
?
869 pipeline
->r_mixer
->name
: "(null)");
872 static void mdp5_crtc_reset(struct drm_crtc
*crtc
)
874 struct mdp5_crtc_state
*mdp5_cstate
;
877 __drm_atomic_helper_crtc_destroy_state(crtc
->state
);
878 kfree(to_mdp5_crtc_state(crtc
->state
));
881 mdp5_cstate
= kzalloc(sizeof(*mdp5_cstate
), GFP_KERNEL
);
884 mdp5_cstate
->base
.crtc
= crtc
;
885 crtc
->state
= &mdp5_cstate
->base
;
889 static struct drm_crtc_state
*
890 mdp5_crtc_duplicate_state(struct drm_crtc
*crtc
)
892 struct mdp5_crtc_state
*mdp5_cstate
;
894 if (WARN_ON(!crtc
->state
))
897 mdp5_cstate
= kmemdup(to_mdp5_crtc_state(crtc
->state
),
898 sizeof(*mdp5_cstate
), GFP_KERNEL
);
902 __drm_atomic_helper_crtc_duplicate_state(crtc
, &mdp5_cstate
->base
);
904 return &mdp5_cstate
->base
;
907 static void mdp5_crtc_destroy_state(struct drm_crtc
*crtc
, struct drm_crtc_state
*state
)
909 struct mdp5_crtc_state
*mdp5_cstate
= to_mdp5_crtc_state(state
);
911 __drm_atomic_helper_crtc_destroy_state(state
);
916 static const struct drm_crtc_funcs mdp5_crtc_funcs
= {
917 .set_config
= drm_atomic_helper_set_config
,
918 .destroy
= mdp5_crtc_destroy
,
919 .page_flip
= drm_atomic_helper_page_flip
,
920 .set_property
= drm_atomic_helper_crtc_set_property
,
921 .reset
= mdp5_crtc_reset
,
922 .atomic_duplicate_state
= mdp5_crtc_duplicate_state
,
923 .atomic_destroy_state
= mdp5_crtc_destroy_state
,
924 .cursor_set
= mdp5_crtc_cursor_set
,
925 .cursor_move
= mdp5_crtc_cursor_move
,
926 .atomic_print_state
= mdp5_crtc_atomic_print_state
,
929 static const struct drm_crtc_funcs mdp5_crtc_no_lm_cursor_funcs
= {
930 .set_config
= drm_atomic_helper_set_config
,
931 .destroy
= mdp5_crtc_destroy
,
932 .page_flip
= drm_atomic_helper_page_flip
,
933 .set_property
= drm_atomic_helper_crtc_set_property
,
934 .reset
= mdp5_crtc_reset
,
935 .atomic_duplicate_state
= mdp5_crtc_duplicate_state
,
936 .atomic_destroy_state
= mdp5_crtc_destroy_state
,
937 .atomic_print_state
= mdp5_crtc_atomic_print_state
,
940 static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs
= {
941 .mode_set_nofb
= mdp5_crtc_mode_set_nofb
,
942 .disable
= mdp5_crtc_disable
,
943 .enable
= mdp5_crtc_enable
,
944 .atomic_check
= mdp5_crtc_atomic_check
,
945 .atomic_begin
= mdp5_crtc_atomic_begin
,
946 .atomic_flush
= mdp5_crtc_atomic_flush
,
949 static void mdp5_crtc_vblank_irq(struct mdp_irq
*irq
, uint32_t irqstatus
)
951 struct mdp5_crtc
*mdp5_crtc
= container_of(irq
, struct mdp5_crtc
, vblank
);
952 struct drm_crtc
*crtc
= &mdp5_crtc
->base
;
953 struct msm_drm_private
*priv
= crtc
->dev
->dev_private
;
956 mdp_irq_unregister(&get_kms(crtc
)->base
, &mdp5_crtc
->vblank
);
958 pending
= atomic_xchg(&mdp5_crtc
->pending
, 0);
960 if (pending
& PENDING_FLIP
) {
961 complete_flip(crtc
, NULL
);
964 if (pending
& PENDING_CURSOR
)
965 drm_flip_work_commit(&mdp5_crtc
->unref_cursor_work
, priv
->wq
);
968 static void mdp5_crtc_err_irq(struct mdp_irq
*irq
, uint32_t irqstatus
)
970 struct mdp5_crtc
*mdp5_crtc
= container_of(irq
, struct mdp5_crtc
, err
);
972 DBG("%s: error: %08x", mdp5_crtc
->base
.name
, irqstatus
);
975 static void mdp5_crtc_pp_done_irq(struct mdp_irq
*irq
, uint32_t irqstatus
)
977 struct mdp5_crtc
*mdp5_crtc
= container_of(irq
, struct mdp5_crtc
,
980 complete(&mdp5_crtc
->pp_completion
);
983 static void mdp5_crtc_wait_for_pp_done(struct drm_crtc
*crtc
)
985 struct drm_device
*dev
= crtc
->dev
;
986 struct mdp5_crtc
*mdp5_crtc
= to_mdp5_crtc(crtc
);
987 struct mdp5_crtc_state
*mdp5_cstate
= to_mdp5_crtc_state(crtc
->state
);
990 ret
= wait_for_completion_timeout(&mdp5_crtc
->pp_completion
,
991 msecs_to_jiffies(50));
993 dev_warn(dev
->dev
, "pp done time out, lm=%d\n",
994 mdp5_cstate
->pipeline
.mixer
->lm
);
997 static void mdp5_crtc_wait_for_flush_done(struct drm_crtc
*crtc
)
999 struct drm_device
*dev
= crtc
->dev
;
1000 struct mdp5_crtc
*mdp5_crtc
= to_mdp5_crtc(crtc
);
1001 struct mdp5_crtc_state
*mdp5_cstate
= to_mdp5_crtc_state(crtc
->state
);
1002 struct mdp5_ctl
*ctl
= mdp5_cstate
->ctl
;
1005 /* Should not call this function if crtc is disabled. */
1009 ret
= drm_crtc_vblank_get(crtc
);
1013 ret
= wait_event_timeout(dev
->vblank
[drm_crtc_index(crtc
)].queue
,
1014 ((mdp5_ctl_get_commit_status(ctl
) &
1015 mdp5_crtc
->flushed_mask
) == 0),
1016 msecs_to_jiffies(50));
1018 dev_warn(dev
->dev
, "vblank time out, crtc=%d\n", mdp5_crtc
->id
);
1020 mdp5_crtc
->flushed_mask
= 0;
1022 drm_crtc_vblank_put(crtc
);
1025 uint32_t mdp5_crtc_vblank(struct drm_crtc
*crtc
)
1027 struct mdp5_crtc
*mdp5_crtc
= to_mdp5_crtc(crtc
);
1028 return mdp5_crtc
->vblank
.irqmask
;
1031 void mdp5_crtc_set_pipeline(struct drm_crtc
*crtc
)
1033 struct mdp5_crtc_state
*mdp5_cstate
= to_mdp5_crtc_state(crtc
->state
);
1034 struct mdp5_kms
*mdp5_kms
= get_kms(crtc
);
1036 /* should this be done elsewhere ? */
1037 mdp_irq_update(&mdp5_kms
->base
);
1039 mdp5_ctl_set_pipeline(mdp5_cstate
->ctl
, &mdp5_cstate
->pipeline
);
1042 struct mdp5_ctl
*mdp5_crtc_get_ctl(struct drm_crtc
*crtc
)
1044 struct mdp5_crtc_state
*mdp5_cstate
= to_mdp5_crtc_state(crtc
->state
);
1046 return mdp5_cstate
->ctl
;
1049 struct mdp5_hw_mixer
*mdp5_crtc_get_mixer(struct drm_crtc
*crtc
)
1051 struct mdp5_crtc_state
*mdp5_cstate
;
1054 return ERR_PTR(-EINVAL
);
1056 mdp5_cstate
= to_mdp5_crtc_state(crtc
->state
);
1058 return WARN_ON(!mdp5_cstate
->pipeline
.mixer
) ?
1059 ERR_PTR(-EINVAL
) : mdp5_cstate
->pipeline
.mixer
;
1062 struct mdp5_pipeline
*mdp5_crtc_get_pipeline(struct drm_crtc
*crtc
)
1064 struct mdp5_crtc_state
*mdp5_cstate
;
1067 return ERR_PTR(-EINVAL
);
1069 mdp5_cstate
= to_mdp5_crtc_state(crtc
->state
);
1071 return &mdp5_cstate
->pipeline
;
1074 void mdp5_crtc_wait_for_commit_done(struct drm_crtc
*crtc
)
1076 struct mdp5_crtc_state
*mdp5_cstate
= to_mdp5_crtc_state(crtc
->state
);
1078 if (mdp5_cstate
->cmd_mode
)
1079 mdp5_crtc_wait_for_pp_done(crtc
);
1081 mdp5_crtc_wait_for_flush_done(crtc
);
1084 /* initialize crtc */
1085 struct drm_crtc
*mdp5_crtc_init(struct drm_device
*dev
,
1086 struct drm_plane
*plane
,
1087 struct drm_plane
*cursor_plane
, int id
)
1089 struct drm_crtc
*crtc
= NULL
;
1090 struct mdp5_crtc
*mdp5_crtc
;
1092 mdp5_crtc
= kzalloc(sizeof(*mdp5_crtc
), GFP_KERNEL
);
1094 return ERR_PTR(-ENOMEM
);
1096 crtc
= &mdp5_crtc
->base
;
1100 spin_lock_init(&mdp5_crtc
->lm_lock
);
1101 spin_lock_init(&mdp5_crtc
->cursor
.lock
);
1102 init_completion(&mdp5_crtc
->pp_completion
);
1104 mdp5_crtc
->vblank
.irq
= mdp5_crtc_vblank_irq
;
1105 mdp5_crtc
->err
.irq
= mdp5_crtc_err_irq
;
1106 mdp5_crtc
->pp_done
.irq
= mdp5_crtc_pp_done_irq
;
1109 drm_crtc_init_with_planes(dev
, crtc
, plane
, cursor_plane
,
1110 &mdp5_crtc_no_lm_cursor_funcs
, NULL
);
1112 drm_crtc_init_with_planes(dev
, crtc
, plane
, NULL
,
1113 &mdp5_crtc_funcs
, NULL
);
1115 drm_flip_work_init(&mdp5_crtc
->unref_cursor_work
,
1116 "unref cursor", unref_cursor_worker
);
1118 drm_crtc_helper_add(crtc
, &mdp5_crtc_helper_funcs
);