]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
drm/msm/mdp5: Prepare Layer Mixers for source split
[mirror_ubuntu-artful-kernel.git] / drivers / gpu / drm / msm / mdp / mdp5 / mdp5_crtc.c
CommitLineData
06c0dd96 1/*
68cdbed9 2 * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
06c0dd96
RC
3 * Copyright (C) 2013 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published by
8 * the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19#include "mdp5_kms.h"
20
ed851963 21#include <linux/sort.h>
06c0dd96
RC
22#include <drm/drm_mode.h>
23#include "drm_crtc.h"
24#include "drm_crtc_helper.h"
25#include "drm_flip_work.h"
26
e172d10a
BG
27#define CURSOR_WIDTH 64
28#define CURSOR_HEIGHT 64
29
06c0dd96
RC
30struct mdp5_crtc {
31 struct drm_crtc base;
06c0dd96
RC
32 int id;
33 bool enabled;
34
adfc0e63 35 spinlock_t lm_lock; /* protect REG_MDP5_LM_* registers */
0deed25b 36
06c0dd96
RC
37 /* if there is a pending flip, these will be non-null: */
38 struct drm_pending_vblank_event *event;
06c0dd96 39
0a5c9aad
HL
40 /* Bits have been flushed at the last commit,
41 * used to decide if a vsync has happened since last commit.
42 */
43 u32 flushed_mask;
44
06c0dd96
RC
45#define PENDING_CURSOR 0x1
46#define PENDING_FLIP 0x2
47 atomic_t pending;
48
e172d10a
BG
49 /* for unref'ing cursor bo's after scanout completes: */
50 struct drm_flip_work unref_cursor_work;
51
06c0dd96
RC
52 struct mdp_irq vblank;
53 struct mdp_irq err;
68cdbed9
HL
54 struct mdp_irq pp_done;
55
56 struct completion pp_completion;
57
e172d10a
BG
58 struct {
59 /* protect REG_MDP5_LM_CURSOR* registers and cursor scanout_bo*/
60 spinlock_t lock;
61
62 /* current cursor being scanned out: */
63 struct drm_gem_object *scanout_bo;
58560890
RC
64 uint32_t width, height;
65 uint32_t x, y;
e172d10a 66 } cursor;
06c0dd96
RC
67};
68#define to_mdp5_crtc(x) container_of(x, struct mdp5_crtc, base)
69
70static struct mdp5_kms *get_kms(struct drm_crtc *crtc)
71{
72 struct msm_drm_private *priv = crtc->dev->dev_private;
73 return to_mdp5_kms(to_mdp_kms(priv->kms));
74}
75
76static void request_pending(struct drm_crtc *crtc, uint32_t pending)
77{
78 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
79
80 atomic_or(pending, &mdp5_crtc->pending);
81 mdp_irq_register(&get_kms(crtc)->base, &mdp5_crtc->vblank);
82}
83
68cdbed9
HL
84static void request_pp_done_pending(struct drm_crtc *crtc)
85{
86 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
87 reinit_completion(&mdp5_crtc->pp_completion);
88}
89
0a5c9aad 90static u32 crtc_flush(struct drm_crtc *crtc, u32 flush_mask)
0deed25b 91{
0ddc3a63
AT
92 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
93 struct mdp5_ctl *ctl = mdp5_cstate->ctl;
f316b25a 94 struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
0deed25b 95
cee26588 96 DBG("%s: flush=%08x", crtc->name, flush_mask);
f316b25a 97 return mdp5_ctl_commit(ctl, pipeline, flush_mask);
0deed25b
SV
98}
99
100/*
101 * flush updates, to make sure hw is updated to new scanout fb,
102 * so that we can safely queue unref to current fb (ie. next
103 * vblank we know hw is done w/ previous scanout_fb).
104 */
0a5c9aad 105static u32 crtc_flush_all(struct drm_crtc *crtc)
06c0dd96 106{
0ddc3a63 107 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
b7621b2a 108 struct mdp5_hw_mixer *mixer, *r_mixer;
a8cecf33 109 struct drm_plane *plane;
0deed25b
SV
110 uint32_t flush_mask = 0;
111
ba0312a6 112 /* this should not happen: */
0ddc3a63 113 if (WARN_ON(!mdp5_cstate->ctl))
0a5c9aad 114 return 0;
06c0dd96 115
93b02beb 116 drm_atomic_crtc_for_each_plane(plane, crtc) {
0deed25b 117 flush_mask |= mdp5_plane_get_flush(plane);
06c0dd96 118 }
389b09a1 119
0ddc3a63 120 mixer = mdp5_cstate->pipeline.mixer;
adfc0e63 121 flush_mask |= mdp_ctl_flush_mask_lm(mixer->lm);
a8cecf33 122
b7621b2a
AT
123 r_mixer = mdp5_cstate->pipeline.r_mixer;
124 if (r_mixer)
125 flush_mask |= mdp_ctl_flush_mask_lm(r_mixer->lm);
126
0a5c9aad 127 return crtc_flush(crtc, flush_mask);
06c0dd96
RC
128}
129
06c0dd96
RC
130/* if file!=NULL, this is preclose potential cancel-flip path */
131static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
132{
0ddc3a63 133 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
f316b25a 134 struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
06c0dd96 135 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
0ddc3a63 136 struct mdp5_ctl *ctl = mdp5_cstate->ctl;
06c0dd96
RC
137 struct drm_device *dev = crtc->dev;
138 struct drm_pending_vblank_event *event;
a8cecf33 139 unsigned long flags;
06c0dd96
RC
140
141 spin_lock_irqsave(&dev->event_lock, flags);
142 event = mdp5_crtc->event;
143 if (event) {
02efb359
DV
144 mdp5_crtc->event = NULL;
145 DBG("%s: send event: %p", crtc->name, event);
146 drm_crtc_send_vblank_event(crtc, event);
06c0dd96
RC
147 }
148 spin_unlock_irqrestore(&dev->event_lock, flags);
149
0ddc3a63 150 if (ctl && !crtc->state->enable) {
e5989ee1 151 /* set STAGE_UNUSED for all layers */
b7621b2a 152 mdp5_ctl_blend(ctl, pipeline, NULL, NULL, 0, 0);
0ddc3a63
AT
153 /* XXX: What to do here? */
154 /* mdp5_crtc->ctl = NULL; */
ba0312a6 155 }
06c0dd96
RC
156}
157
e172d10a
BG
158static void unref_cursor_worker(struct drm_flip_work *work, void *val)
159{
160 struct mdp5_crtc *mdp5_crtc =
161 container_of(work, struct mdp5_crtc, unref_cursor_work);
162 struct mdp5_kms *mdp5_kms = get_kms(&mdp5_crtc->base);
163
164 msm_gem_put_iova(val, mdp5_kms->id);
165 drm_gem_object_unreference_unlocked(val);
166}
167
06c0dd96
RC
168static void mdp5_crtc_destroy(struct drm_crtc *crtc)
169{
170 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
171
06c0dd96 172 drm_crtc_cleanup(crtc);
e172d10a 173 drm_flip_work_cleanup(&mdp5_crtc->unref_cursor_work);
06c0dd96
RC
174
175 kfree(mdp5_crtc);
176}
177
829200ac
AT
178static inline u32 mdp5_lm_use_fg_alpha_mask(enum mdp_mixer_stage_id stage)
179{
180 switch (stage) {
181 case STAGE0: return MDP5_LM_BLEND_COLOR_OUT_STAGE0_FG_ALPHA;
182 case STAGE1: return MDP5_LM_BLEND_COLOR_OUT_STAGE1_FG_ALPHA;
183 case STAGE2: return MDP5_LM_BLEND_COLOR_OUT_STAGE2_FG_ALPHA;
184 case STAGE3: return MDP5_LM_BLEND_COLOR_OUT_STAGE3_FG_ALPHA;
185 case STAGE4: return MDP5_LM_BLEND_COLOR_OUT_STAGE4_FG_ALPHA;
186 case STAGE5: return MDP5_LM_BLEND_COLOR_OUT_STAGE5_FG_ALPHA;
187 case STAGE6: return MDP5_LM_BLEND_COLOR_OUT_STAGE6_FG_ALPHA;
188 default:
189 return 0;
190 }
191}
192
b7621b2a
AT
193/*
194 * left/right pipe offsets for the stage array used in blend_setup()
195 */
196#define PIPE_LEFT 0
197#define PIPE_RIGHT 1
198
0deed25b
SV
199/*
200 * blend_setup() - blend all the planes of a CRTC
201 *
12987781 202 * If no base layer is available, border will be enabled as the base layer.
203 * Otherwise all layers will be blended based on their stage calculated
204 * in mdp5_crtc_atomic_check.
0deed25b 205 */
06c0dd96
RC
206static void blend_setup(struct drm_crtc *crtc)
207{
208 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
0ddc3a63 209 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
f316b25a 210 struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
06c0dd96 211 struct mdp5_kms *mdp5_kms = get_kms(crtc);
0deed25b
SV
212 struct drm_plane *plane;
213 const struct mdp5_cfg_hw *hw_cfg;
12987781 214 struct mdp5_plane_state *pstate, *pstates[STAGE_MAX + 1] = {NULL};
215 const struct mdp_format *format;
f316b25a 216 struct mdp5_hw_mixer *mixer = pipeline->mixer;
adfc0e63 217 uint32_t lm = mixer->lm;
b7621b2a
AT
218 struct mdp5_hw_mixer *r_mixer = pipeline->r_mixer;
219 uint32_t r_lm = r_mixer ? r_mixer->lm : 0;
0ddc3a63 220 struct mdp5_ctl *ctl = mdp5_cstate->ctl;
12987781 221 uint32_t blend_op, fg_alpha, bg_alpha, ctl_blend_flags = 0;
0deed25b 222 unsigned long flags;
b7621b2a
AT
223 enum mdp5_pipe stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { SSPP_NONE };
224 enum mdp5_pipe r_stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { SSPP_NONE };
12987781 225 int i, plane_cnt = 0;
829200ac
AT
226 bool bg_alpha_enabled = false;
227 u32 mixer_op_mode = 0;
ed78560d 228 u32 val;
12987781 229#define blender(stage) ((stage) - STAGE0)
06c0dd96 230
42238da8 231 hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
06c0dd96 232
0deed25b
SV
233 spin_lock_irqsave(&mdp5_crtc->lm_lock, flags);
234
235 /* ctl could be released already when we are shutting down: */
0ddc3a63
AT
236 /* XXX: Can this happen now? */
237 if (!ctl)
0deed25b
SV
238 goto out;
239
12987781 240 /* Collect all plane information */
93b02beb 241 drm_atomic_crtc_for_each_plane(plane, crtc) {
12987781 242 pstate = to_mdp5_plane_state(plane->state);
243 pstates[pstate->stage] = pstate;
b7621b2a
AT
244 stage[pstate->stage][PIPE_LEFT] = mdp5_plane_pipe(plane);
245 /*
246 * if we have a right mixer, stage the same pipe as we
247 * have on the left mixer
248 */
249 if (r_mixer)
250 r_stage[pstate->stage][PIPE_LEFT] =
251 mdp5_plane_pipe(plane);
252
12987781 253 plane_cnt++;
254 }
06c0dd96 255
1455adbd 256 if (!pstates[STAGE_BASE]) {
12987781 257 ctl_blend_flags |= MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT;
258 DBG("Border Color is enabled");
829200ac
AT
259 } else if (plane_cnt) {
260 format = to_mdp_format(msm_framebuffer_format(pstates[STAGE_BASE]->base.fb));
261
262 if (format->alpha_enable)
263 bg_alpha_enabled = true;
12987781 264 }
265
266 /* The reset for blending */
267 for (i = STAGE0; i <= STAGE_MAX; i++) {
268 if (!pstates[i])
269 continue;
270
271 format = to_mdp_format(
272 msm_framebuffer_format(pstates[i]->base.fb));
273 plane = pstates[i]->base.plane;
274 blend_op = MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST) |
275 MDP5_LM_BLEND_OP_MODE_BG_ALPHA(BG_CONST);
276 fg_alpha = pstates[i]->alpha;
277 bg_alpha = 0xFF - pstates[i]->alpha;
829200ac
AT
278
279 if (!format->alpha_enable && bg_alpha_enabled)
280 mixer_op_mode = 0;
281 else
282 mixer_op_mode |= mdp5_lm_use_fg_alpha_mask(i);
283
12987781 284 DBG("Stage %d fg_alpha %x bg_alpha %x", i, fg_alpha, bg_alpha);
285
286 if (format->alpha_enable && pstates[i]->premultiplied) {
287 blend_op = MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST) |
288 MDP5_LM_BLEND_OP_MODE_BG_ALPHA(FG_PIXEL);
289 if (fg_alpha != 0xff) {
290 bg_alpha = fg_alpha;
291 blend_op |=
292 MDP5_LM_BLEND_OP_MODE_BG_MOD_ALPHA |
293 MDP5_LM_BLEND_OP_MODE_BG_INV_MOD_ALPHA;
294 } else {
295 blend_op |= MDP5_LM_BLEND_OP_MODE_BG_INV_ALPHA;
296 }
297 } else if (format->alpha_enable) {
298 blend_op = MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_PIXEL) |
299 MDP5_LM_BLEND_OP_MODE_BG_ALPHA(FG_PIXEL);
300 if (fg_alpha != 0xff) {
301 bg_alpha = fg_alpha;
302 blend_op |=
303 MDP5_LM_BLEND_OP_MODE_FG_MOD_ALPHA |
304 MDP5_LM_BLEND_OP_MODE_FG_INV_MOD_ALPHA |
305 MDP5_LM_BLEND_OP_MODE_BG_MOD_ALPHA |
306 MDP5_LM_BLEND_OP_MODE_BG_INV_MOD_ALPHA;
307 } else {
308 blend_op |= MDP5_LM_BLEND_OP_MODE_BG_INV_ALPHA;
309 }
310 }
0deed25b 311
12987781 312 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_OP_MODE(lm,
313 blender(i)), blend_op);
0deed25b 314 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_FG_ALPHA(lm,
12987781 315 blender(i)), fg_alpha);
0deed25b 316 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_BG_ALPHA(lm,
12987781 317 blender(i)), bg_alpha);
b7621b2a
AT
318 if (r_mixer) {
319 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_OP_MODE(r_lm,
320 blender(i)), blend_op);
321 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_FG_ALPHA(r_lm,
322 blender(i)), fg_alpha);
323 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_BG_ALPHA(r_lm,
324 blender(i)), bg_alpha);
325 }
0deed25b
SV
326 }
327
ed78560d
AT
328 val = mdp5_read(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(lm));
329 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(lm),
330 val | mixer_op_mode);
331 if (r_mixer) {
332 val = mdp5_read(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(r_lm));
b7621b2a 333 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(r_lm),
ed78560d
AT
334 val | mixer_op_mode);
335 }
829200ac 336
b7621b2a
AT
337 mdp5_ctl_blend(ctl, pipeline, stage, r_stage, plane_cnt,
338 ctl_blend_flags);
0deed25b
SV
339out:
340 spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags);
06c0dd96
RC
341}
342
ed851963 343static void mdp5_crtc_mode_set_nofb(struct drm_crtc *crtc)
06c0dd96
RC
344{
345 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
0ddc3a63 346 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
06c0dd96 347 struct mdp5_kms *mdp5_kms = get_kms(crtc);
0ddc3a63 348 struct mdp5_hw_mixer *mixer = mdp5_cstate->pipeline.mixer;
b7621b2a 349 struct mdp5_hw_mixer *r_mixer = mdp5_cstate->pipeline.r_mixer;
adfc0e63 350 uint32_t lm = mixer->lm;
ed78560d 351 u32 mixer_width, val;
0deed25b 352 unsigned long flags;
ed851963
RC
353 struct drm_display_mode *mode;
354
355 if (WARN_ON(!crtc->state))
356 return;
06c0dd96 357
ed851963 358 mode = &crtc->state->adjusted_mode;
06c0dd96
RC
359
360 DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
cee26588 361 crtc->name, mode->base.id, mode->name,
06c0dd96
RC
362 mode->vrefresh, mode->clock,
363 mode->hdisplay, mode->hsync_start,
364 mode->hsync_end, mode->htotal,
365 mode->vdisplay, mode->vsync_start,
366 mode->vsync_end, mode->vtotal,
367 mode->type, mode->flags);
368
ed78560d
AT
369 mixer_width = mode->hdisplay;
370 if (r_mixer)
371 mixer_width /= 2;
372
0deed25b 373 spin_lock_irqsave(&mdp5_crtc->lm_lock, flags);
adfc0e63 374 mdp5_write(mdp5_kms, REG_MDP5_LM_OUT_SIZE(lm),
ed78560d 375 MDP5_LM_OUT_SIZE_WIDTH(mixer_width) |
06c0dd96 376 MDP5_LM_OUT_SIZE_HEIGHT(mode->vdisplay));
ed78560d
AT
377
378 /* Assign mixer to LEFT side in source split mode */
379 val = mdp5_read(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(lm));
380 val &= ~MDP5_LM_BLEND_COLOR_OUT_SPLIT_LEFT_RIGHT;
381 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(lm), val);
382
383 if (r_mixer) {
384 u32 r_lm = r_mixer->lm;
385
386 mdp5_write(mdp5_kms, REG_MDP5_LM_OUT_SIZE(r_lm),
387 MDP5_LM_OUT_SIZE_WIDTH(mixer_width) |
b7621b2a 388 MDP5_LM_OUT_SIZE_HEIGHT(mode->vdisplay));
ed78560d
AT
389
390 /* Assign mixer to RIGHT side in source split mode */
391 val = mdp5_read(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(r_lm));
392 val |= MDP5_LM_BLEND_COLOR_OUT_SPLIT_LEFT_RIGHT;
393 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(r_lm), val);
394 }
395
0deed25b 396 spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags);
06c0dd96
RC
397}
398
0b776d45 399static void mdp5_crtc_disable(struct drm_crtc *crtc)
06c0dd96
RC
400{
401 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
0ddc3a63 402 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
0b776d45
RC
403 struct mdp5_kms *mdp5_kms = get_kms(crtc);
404
cee26588 405 DBG("%s", crtc->name);
0b776d45
RC
406
407 if (WARN_ON(!mdp5_crtc->enabled))
408 return;
409
0ddc3a63 410 if (mdp5_cstate->cmd_mode)
68cdbed9
HL
411 mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->pp_done);
412
0b776d45
RC
413 mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->err);
414 mdp5_disable(mdp5_kms);
415
416 mdp5_crtc->enabled = false;
06c0dd96
RC
417}
418
0b776d45 419static void mdp5_crtc_enable(struct drm_crtc *crtc)
06c0dd96 420{
ed851963 421 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
0ddc3a63 422 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
0b776d45
RC
423 struct mdp5_kms *mdp5_kms = get_kms(crtc);
424
cee26588 425 DBG("%s", crtc->name);
0b776d45
RC
426
427 if (WARN_ON(mdp5_crtc->enabled))
428 return;
429
430 mdp5_enable(mdp5_kms);
431 mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->err);
432
0ddc3a63 433 if (mdp5_cstate->cmd_mode)
68cdbed9
HL
434 mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->pp_done);
435
0b776d45 436 mdp5_crtc->enabled = true;
06c0dd96
RC
437}
438
894558ec
AT
439int mdp5_crtc_setup_pipeline(struct drm_crtc *crtc,
440 struct drm_crtc_state *new_crtc_state)
441{
442 struct mdp5_crtc_state *mdp5_cstate =
443 to_mdp5_crtc_state(new_crtc_state);
444 struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
bcb877b7 445 struct mdp5_interface *intf;
894558ec
AT
446 bool new_mixer = false;
447
448 new_mixer = !pipeline->mixer;
449
450 if (new_mixer) {
451 struct mdp5_hw_mixer *old_mixer = pipeline->mixer;
452
453 pipeline->mixer = mdp5_mixer_assign(new_crtc_state->state, crtc,
454 MDP_LM_CAP_DISPLAY);
455 if (IS_ERR(pipeline->mixer))
456 return PTR_ERR(pipeline->mixer);
457
458 mdp5_mixer_release(new_crtc_state->state, old_mixer);
459 }
460
bcb877b7
AT
461 /*
462 * these should have been already set up in the encoder's atomic
463 * check (called by drm_atomic_helper_check_modeset)
464 */
465 intf = pipeline->intf;
466
467 mdp5_cstate->err_irqmask = intf2err(intf->num);
468 mdp5_cstate->vblank_irqmask = intf2vblank(pipeline->mixer, intf);
469
470 if ((intf->type == INTF_DSI) &&
471 (intf->mode == MDP5_INTF_DSI_MODE_COMMAND)) {
472 mdp5_cstate->pp_done_irqmask = lm2ppdone(pipeline->mixer);
473 mdp5_cstate->cmd_mode = true;
474 } else {
475 mdp5_cstate->pp_done_irqmask = 0;
476 mdp5_cstate->cmd_mode = false;
477 }
478
894558ec
AT
479 return 0;
480}
481
ed851963
RC
482struct plane_state {
483 struct drm_plane *plane;
484 struct mdp5_plane_state *state;
485};
486
487static int pstate_cmp(const void *a, const void *b)
06c0dd96 488{
ed851963
RC
489 struct plane_state *pa = (struct plane_state *)a;
490 struct plane_state *pb = (struct plane_state *)b;
491 return pa->state->zpos - pb->state->zpos;
06c0dd96
RC
492}
493
1455adbd
RC
494/* is there a helper for this? */
495static bool is_fullscreen(struct drm_crtc_state *cstate,
496 struct drm_plane_state *pstate)
497{
498 return (pstate->crtc_x <= 0) && (pstate->crtc_y <= 0) &&
499 ((pstate->crtc_x + pstate->crtc_w) >= cstate->mode.hdisplay) &&
500 ((pstate->crtc_y + pstate->crtc_h) >= cstate->mode.vdisplay);
501}
502
ed851963
RC
503static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
504 struct drm_crtc_state *state)
0deed25b 505{
ed851963
RC
506 struct mdp5_kms *mdp5_kms = get_kms(crtc);
507 struct drm_plane *plane;
508 struct drm_device *dev = crtc->dev;
12987781 509 struct plane_state pstates[STAGE_MAX + 1];
510 const struct mdp5_cfg_hw *hw_cfg;
2f196b7c 511 const struct drm_plane_state *pstate;
5798c8e0 512 bool cursor_plane = false;
1455adbd 513 int cnt = 0, base = 0, i;
894558ec 514 int ret;
0deed25b 515
cee26588 516 DBG("%s: check", crtc->name);
0deed25b 517
2f196b7c 518 drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) {
ed851963
RC
519 pstates[cnt].plane = plane;
520 pstates[cnt].state = to_mdp5_plane_state(pstate);
521
522 cnt++;
5798c8e0
AT
523
524 if (plane->type == DRM_PLANE_TYPE_CURSOR)
525 cursor_plane = true;
ed851963
RC
526 }
527
894558ec
AT
528 ret = mdp5_crtc_setup_pipeline(crtc, state);
529 if (ret) {
530 dev_err(dev->dev, "couldn't assign mixers %d\n", ret);
531 return ret;
532 }
533
12987781 534 /* assign a stage based on sorted zpos property */
ed851963
RC
535 sort(pstates, cnt, sizeof(pstates[0]), pstate_cmp, NULL);
536
1455adbd
RC
537 /* if the bottom-most layer is not fullscreen, we need to use
538 * it for solid-color:
539 */
540 if ((cnt > 0) && !is_fullscreen(state, &pstates[0].state->base))
541 base++;
542
5798c8e0
AT
543 /* trigger a warning if cursor isn't the highest zorder */
544 WARN_ON(cursor_plane &&
545 (pstates[cnt - 1].plane->type != DRM_PLANE_TYPE_CURSOR));
546
1455adbd
RC
547 /* verify that there are not too many planes attached to crtc
548 * and that we don't have conflicting mixer stages:
549 */
550 hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
551
552 if ((cnt + base) >= hw_cfg->lm.nb_stages) {
cee26588 553 dev_err(dev->dev, "too many planes! cnt=%d, base=%d\n", cnt, base);
1455adbd
RC
554 return -EINVAL;
555 }
556
ed851963 557 for (i = 0; i < cnt; i++) {
5798c8e0
AT
558 if (cursor_plane && (i == (cnt - 1)))
559 pstates[i].state->stage = hw_cfg->lm.nb_stages;
560 else
561 pstates[i].state->stage = STAGE_BASE + i + base;
cee26588 562 DBG("%s: assign pipe %s on stage=%d", crtc->name,
4a0f012d 563 pstates[i].plane->name,
ed851963
RC
564 pstates[i].state->stage);
565 }
566
567 return 0;
0deed25b
SV
568}
569
613d2b27
ML
570static void mdp5_crtc_atomic_begin(struct drm_crtc *crtc,
571 struct drm_crtc_state *old_crtc_state)
ed851963 572{
cee26588 573 DBG("%s: begin", crtc->name);
ed851963 574}
0deed25b 575
613d2b27
ML
576static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc,
577 struct drm_crtc_state *old_crtc_state)
06c0dd96
RC
578{
579 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
0ddc3a63 580 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
06c0dd96 581 struct drm_device *dev = crtc->dev;
06c0dd96
RC
582 unsigned long flags;
583
cee26588 584 DBG("%s: event: %p", crtc->name, crtc->state->event);
06c0dd96 585
ed851963 586 WARN_ON(mdp5_crtc->event);
06c0dd96
RC
587
588 spin_lock_irqsave(&dev->event_lock, flags);
ed851963 589 mdp5_crtc->event = crtc->state->event;
06c0dd96
RC
590 spin_unlock_irqrestore(&dev->event_lock, flags);
591
ba0312a6
SV
592 /*
593 * If no CTL has been allocated in mdp5_crtc_atomic_check(),
594 * it means we are trying to flush a CRTC whose state is disabled:
595 * nothing else needs to be done.
596 */
0ddc3a63
AT
597 /* XXX: Can this happen now ? */
598 if (unlikely(!mdp5_cstate->ctl))
ba0312a6
SV
599 return;
600
ed851963 601 blend_setup(crtc);
0a5c9aad 602
68cdbed9
HL
603 /* PP_DONE irq is only used by command mode for now.
604 * It is better to request pending before FLUSH and START trigger
605 * to make sure no pp_done irq missed.
606 * This is safe because no pp_done will happen before SW trigger
607 * in command mode.
608 */
0ddc3a63 609 if (mdp5_cstate->cmd_mode)
68cdbed9
HL
610 request_pp_done_pending(crtc);
611
0a5c9aad
HL
612 mdp5_crtc->flushed_mask = crtc_flush_all(crtc);
613
0ddc3a63
AT
614 /* XXX are we leaking out state here? */
615 mdp5_crtc->vblank.irqmask = mdp5_cstate->vblank_irqmask;
616 mdp5_crtc->err.irqmask = mdp5_cstate->err_irqmask;
617 mdp5_crtc->pp_done.irqmask = mdp5_cstate->pp_done_irqmask;
618
ed851963 619 request_pending(crtc, PENDING_FLIP);
06c0dd96
RC
620}
621
58560890
RC
622static void get_roi(struct drm_crtc *crtc, uint32_t *roi_w, uint32_t *roi_h)
623{
624 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
625 uint32_t xres = crtc->mode.hdisplay;
626 uint32_t yres = crtc->mode.vdisplay;
627
628 /*
629 * Cursor Region Of Interest (ROI) is a plane read from cursor
630 * buffer to render. The ROI region is determined by the visibility of
631 * the cursor point. In the default Cursor image the cursor point will
632 * be at the top left of the cursor image, unless it is specified
633 * otherwise using hotspot feature.
634 *
635 * If the cursor point reaches the right (xres - x < cursor.width) or
636 * bottom (yres - y < cursor.height) boundary of the screen, then ROI
637 * width and ROI height need to be evaluated to crop the cursor image
638 * accordingly.
639 * (xres-x) will be new cursor width when x > (xres - cursor.width)
640 * (yres-y) will be new cursor height when y > (yres - cursor.height)
641 */
642 *roi_w = min(mdp5_crtc->cursor.width, xres -
643 mdp5_crtc->cursor.x);
644 *roi_h = min(mdp5_crtc->cursor.height, yres -
645 mdp5_crtc->cursor.y);
646}
647
e172d10a
BG
648static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
649 struct drm_file *file, uint32_t handle,
650 uint32_t width, uint32_t height)
651{
652 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
0ddc3a63 653 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
f316b25a 654 struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
e172d10a
BG
655 struct drm_device *dev = crtc->dev;
656 struct mdp5_kms *mdp5_kms = get_kms(crtc);
389b09a1 657 struct drm_gem_object *cursor_bo, *old_bo = NULL;
78babc16
RC
658 uint32_t blendcfg, stride;
659 uint64_t cursor_addr;
0ddc3a63 660 struct mdp5_ctl *ctl;
d13b33fa 661 int ret, lm;
e172d10a
BG
662 enum mdp5_cursor_alpha cur_alpha = CURSOR_ALPHA_PER_PIXEL;
663 uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0);
58560890 664 uint32_t roi_w, roi_h;
389b09a1 665 bool cursor_enable = true;
e172d10a
BG
666 unsigned long flags;
667
668 if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) {
669 dev_err(dev->dev, "bad cursor size: %dx%d\n", width, height);
670 return -EINVAL;
671 }
672
0ddc3a63
AT
673 ctl = mdp5_cstate->ctl;
674 if (!ctl)
e172d10a
BG
675 return -EINVAL;
676
b7621b2a
AT
677 /* don't support LM cursors when we we have source split enabled */
678 if (mdp5_cstate->pipeline.r_mixer)
679 return -EINVAL;
680
e172d10a
BG
681 if (!handle) {
682 DBG("Cursor off");
389b09a1
SV
683 cursor_enable = false;
684 goto set_cursor;
e172d10a
BG
685 }
686
a8ad0bd8 687 cursor_bo = drm_gem_object_lookup(file, handle);
e172d10a
BG
688 if (!cursor_bo)
689 return -ENOENT;
690
691 ret = msm_gem_get_iova(cursor_bo, mdp5_kms->id, &cursor_addr);
692 if (ret)
693 return -EINVAL;
694
0ddc3a63 695 lm = mdp5_cstate->pipeline.mixer->lm;
d13b33fa 696 stride = width * drm_format_plane_cpp(DRM_FORMAT_ARGB8888, 0);
e172d10a
BG
697
698 spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
699 old_bo = mdp5_crtc->cursor.scanout_bo;
700
58560890
RC
701 mdp5_crtc->cursor.scanout_bo = cursor_bo;
702 mdp5_crtc->cursor.width = width;
703 mdp5_crtc->cursor.height = height;
704
705 get_roi(crtc, &roi_w, &roi_h);
706
e172d10a
BG
707 mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_STRIDE(lm), stride);
708 mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_FORMAT(lm),
709 MDP5_LM_CURSOR_FORMAT_FORMAT(CURSOR_FMT_ARGB8888));
710 mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_IMG_SIZE(lm),
711 MDP5_LM_CURSOR_IMG_SIZE_SRC_H(height) |
712 MDP5_LM_CURSOR_IMG_SIZE_SRC_W(width));
713 mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(lm),
58560890
RC
714 MDP5_LM_CURSOR_SIZE_ROI_H(roi_h) |
715 MDP5_LM_CURSOR_SIZE_ROI_W(roi_w));
e172d10a
BG
716 mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_BASE_ADDR(lm), cursor_addr);
717
e172d10a 718 blendcfg = MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_EN;
e172d10a
BG
719 blendcfg |= MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_ALPHA_SEL(cur_alpha);
720 mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_BLEND_CONFIG(lm), blendcfg);
721
e172d10a
BG
722 spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags);
723
389b09a1 724set_cursor:
f316b25a 725 ret = mdp5_ctl_set_cursor(ctl, pipeline, 0, cursor_enable);
389b09a1
SV
726 if (ret) {
727 dev_err(dev->dev, "failed to %sable cursor: %d\n",
728 cursor_enable ? "en" : "dis", ret);
e172d10a 729 goto end;
389b09a1 730 }
e172d10a 731
e172d10a
BG
732 crtc_flush(crtc, flush_mask);
733
734end:
735 if (old_bo) {
736 drm_flip_work_queue(&mdp5_crtc->unref_cursor_work, old_bo);
737 /* enable vblank to complete cursor work: */
738 request_pending(crtc, PENDING_CURSOR);
739 }
740 return ret;
741}
742
743static int mdp5_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
744{
745 struct mdp5_kms *mdp5_kms = get_kms(crtc);
746 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
0ddc3a63
AT
747 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
748 uint32_t lm = mdp5_cstate->pipeline.mixer->lm;
e172d10a 749 uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0);
e172d10a
BG
750 uint32_t roi_w;
751 uint32_t roi_h;
752 unsigned long flags;
753
b7621b2a
AT
754 /* don't support LM cursors when we we have source split enabled */
755 if (mdp5_cstate->pipeline.r_mixer)
756 return -EINVAL;
757
ba0312a6
SV
758 /* In case the CRTC is disabled, just drop the cursor update */
759 if (unlikely(!crtc->state->enable))
760 return 0;
761
58560890
RC
762 mdp5_crtc->cursor.x = x = max(x, 0);
763 mdp5_crtc->cursor.y = y = max(y, 0);
e172d10a 764
58560890 765 get_roi(crtc, &roi_w, &roi_h);
e172d10a
BG
766
767 spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
adfc0e63 768 mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(lm),
e172d10a
BG
769 MDP5_LM_CURSOR_SIZE_ROI_H(roi_h) |
770 MDP5_LM_CURSOR_SIZE_ROI_W(roi_w));
adfc0e63 771 mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_START_XY(lm),
e172d10a
BG
772 MDP5_LM_CURSOR_START_XY_Y_START(y) |
773 MDP5_LM_CURSOR_START_XY_X_START(x));
774 spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags);
775
776 crtc_flush(crtc, flush_mask);
777
778 return 0;
779}
780
c1e2a130
AT
781static void
782mdp5_crtc_atomic_print_state(struct drm_printer *p,
783 const struct drm_crtc_state *state)
784{
785 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(state);
786 struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
b7621b2a 787 struct mdp5_kms *mdp5_kms = get_kms(state->crtc);
c1e2a130
AT
788
789 if (WARN_ON(!pipeline))
790 return;
791
792 drm_printf(p, "\thwmixer=%s\n", pipeline->mixer ?
793 pipeline->mixer->name : "(null)");
b7621b2a
AT
794
795 if (mdp5_kms->caps & MDP_CAP_SRC_SPLIT)
796 drm_printf(p, "\tright hwmixer=%s\n", pipeline->r_mixer ?
797 pipeline->r_mixer->name : "(null)");
c1e2a130
AT
798}
799
800static void mdp5_crtc_reset(struct drm_crtc *crtc)
801{
802 struct mdp5_crtc_state *mdp5_cstate;
803
804 if (crtc->state) {
805 __drm_atomic_helper_crtc_destroy_state(crtc->state);
806 kfree(to_mdp5_crtc_state(crtc->state));
807 }
808
809 mdp5_cstate = kzalloc(sizeof(*mdp5_cstate), GFP_KERNEL);
810
811 if (mdp5_cstate) {
812 mdp5_cstate->base.crtc = crtc;
813 crtc->state = &mdp5_cstate->base;
814 }
815}
816
817static struct drm_crtc_state *
818mdp5_crtc_duplicate_state(struct drm_crtc *crtc)
819{
820 struct mdp5_crtc_state *mdp5_cstate;
821
822 if (WARN_ON(!crtc->state))
823 return NULL;
824
825 mdp5_cstate = kmemdup(to_mdp5_crtc_state(crtc->state),
826 sizeof(*mdp5_cstate), GFP_KERNEL);
827 if (!mdp5_cstate)
828 return NULL;
829
830 __drm_atomic_helper_crtc_duplicate_state(crtc, &mdp5_cstate->base);
831
832 return &mdp5_cstate->base;
833}
834
835static void mdp5_crtc_destroy_state(struct drm_crtc *crtc, struct drm_crtc_state *state)
836{
837 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(state);
838
839 __drm_atomic_helper_crtc_destroy_state(state);
840
841 kfree(mdp5_cstate);
842}
843
06c0dd96 844static const struct drm_crtc_funcs mdp5_crtc_funcs = {
ed851963 845 .set_config = drm_atomic_helper_set_config,
06c0dd96 846 .destroy = mdp5_crtc_destroy,
ed851963 847 .page_flip = drm_atomic_helper_page_flip,
4103eef9 848 .set_property = drm_atomic_helper_crtc_set_property,
c1e2a130
AT
849 .reset = mdp5_crtc_reset,
850 .atomic_duplicate_state = mdp5_crtc_duplicate_state,
851 .atomic_destroy_state = mdp5_crtc_destroy_state,
e172d10a
BG
852 .cursor_set = mdp5_crtc_cursor_set,
853 .cursor_move = mdp5_crtc_cursor_move,
c1e2a130 854 .atomic_print_state = mdp5_crtc_atomic_print_state,
06c0dd96
RC
855};
856
5798c8e0
AT
857static const struct drm_crtc_funcs mdp5_crtc_no_lm_cursor_funcs = {
858 .set_config = drm_atomic_helper_set_config,
859 .destroy = mdp5_crtc_destroy,
860 .page_flip = drm_atomic_helper_page_flip,
861 .set_property = drm_atomic_helper_crtc_set_property,
c1e2a130
AT
862 .reset = mdp5_crtc_reset,
863 .atomic_duplicate_state = mdp5_crtc_duplicate_state,
864 .atomic_destroy_state = mdp5_crtc_destroy_state,
865 .atomic_print_state = mdp5_crtc_atomic_print_state,
5798c8e0
AT
866};
867
06c0dd96 868static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs = {
ed851963 869 .mode_set_nofb = mdp5_crtc_mode_set_nofb,
5db0f6e8
SV
870 .disable = mdp5_crtc_disable,
871 .enable = mdp5_crtc_enable,
ed851963
RC
872 .atomic_check = mdp5_crtc_atomic_check,
873 .atomic_begin = mdp5_crtc_atomic_begin,
874 .atomic_flush = mdp5_crtc_atomic_flush,
06c0dd96
RC
875};
876
877static void mdp5_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus)
878{
879 struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, vblank);
880 struct drm_crtc *crtc = &mdp5_crtc->base;
e172d10a 881 struct msm_drm_private *priv = crtc->dev->dev_private;
06c0dd96
RC
882 unsigned pending;
883
884 mdp_irq_unregister(&get_kms(crtc)->base, &mdp5_crtc->vblank);
885
886 pending = atomic_xchg(&mdp5_crtc->pending, 0);
887
888 if (pending & PENDING_FLIP) {
889 complete_flip(crtc, NULL);
06c0dd96 890 }
e172d10a
BG
891
892 if (pending & PENDING_CURSOR)
893 drm_flip_work_commit(&mdp5_crtc->unref_cursor_work, priv->wq);
06c0dd96
RC
894}
895
896static void mdp5_crtc_err_irq(struct mdp_irq *irq, uint32_t irqstatus)
897{
898 struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, err);
0deed25b 899
cee26588 900 DBG("%s: error: %08x", mdp5_crtc->base.name, irqstatus);
06c0dd96
RC
901}
902
68cdbed9
HL
903static void mdp5_crtc_pp_done_irq(struct mdp_irq *irq, uint32_t irqstatus)
904{
905 struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc,
906 pp_done);
907
908 complete(&mdp5_crtc->pp_completion);
909}
910
911static void mdp5_crtc_wait_for_pp_done(struct drm_crtc *crtc)
912{
913 struct drm_device *dev = crtc->dev;
914 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
0ddc3a63 915 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
68cdbed9
HL
916 int ret;
917
918 ret = wait_for_completion_timeout(&mdp5_crtc->pp_completion,
919 msecs_to_jiffies(50));
920 if (ret == 0)
adfc0e63 921 dev_warn(dev->dev, "pp done time out, lm=%d\n",
0ddc3a63 922 mdp5_cstate->pipeline.mixer->lm);
68cdbed9
HL
923}
924
0a5c9aad
HL
925static void mdp5_crtc_wait_for_flush_done(struct drm_crtc *crtc)
926{
927 struct drm_device *dev = crtc->dev;
928 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
0ddc3a63
AT
929 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
930 struct mdp5_ctl *ctl = mdp5_cstate->ctl;
0a5c9aad
HL
931 int ret;
932
933 /* Should not call this function if crtc is disabled. */
0ddc3a63 934 if (!ctl)
0a5c9aad
HL
935 return;
936
937 ret = drm_crtc_vblank_get(crtc);
938 if (ret)
939 return;
940
941 ret = wait_event_timeout(dev->vblank[drm_crtc_index(crtc)].queue,
0ddc3a63 942 ((mdp5_ctl_get_commit_status(ctl) &
0a5c9aad
HL
943 mdp5_crtc->flushed_mask) == 0),
944 msecs_to_jiffies(50));
945 if (ret <= 0)
946 dev_warn(dev->dev, "vblank time out, crtc=%d\n", mdp5_crtc->id);
947
948 mdp5_crtc->flushed_mask = 0;
949
950 drm_crtc_vblank_put(crtc);
951}
952
06c0dd96
RC
953uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc)
954{
955 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
956 return mdp5_crtc->vblank.irqmask;
957}
958
f316b25a 959void mdp5_crtc_set_pipeline(struct drm_crtc *crtc)
06c0dd96 960{
0ddc3a63 961 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
06c0dd96 962 struct mdp5_kms *mdp5_kms = get_kms(crtc);
0a5c9aad 963
0ddc3a63 964 /* should this be done elsewhere ? */
8bc1fe92 965 mdp_irq_update(&mdp5_kms->base);
06c0dd96 966
f316b25a 967 mdp5_ctl_set_pipeline(mdp5_cstate->ctl, &mdp5_cstate->pipeline);
0deed25b 968}
06c0dd96 969
10967a06
AT
970struct mdp5_ctl *mdp5_crtc_get_ctl(struct drm_crtc *crtc)
971{
0ddc3a63 972 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
10967a06 973
0ddc3a63 974 return mdp5_cstate->ctl;
10967a06
AT
975}
976
adfc0e63 977struct mdp5_hw_mixer *mdp5_crtc_get_mixer(struct drm_crtc *crtc)
0deed25b 978{
0ddc3a63
AT
979 struct mdp5_crtc_state *mdp5_cstate;
980
981 if (WARN_ON(!crtc))
982 return ERR_PTR(-EINVAL);
983
984 mdp5_cstate = to_mdp5_crtc_state(crtc->state);
985
986 return WARN_ON(!mdp5_cstate->pipeline.mixer) ?
987 ERR_PTR(-EINVAL) : mdp5_cstate->pipeline.mixer;
389b09a1 988}
0deed25b 989
f316b25a
AT
990struct mdp5_pipeline *mdp5_crtc_get_pipeline(struct drm_crtc *crtc)
991{
992 struct mdp5_crtc_state *mdp5_cstate;
993
994 if (WARN_ON(!crtc))
995 return ERR_PTR(-EINVAL);
996
997 mdp5_cstate = to_mdp5_crtc_state(crtc->state);
998
999 return &mdp5_cstate->pipeline;
1000}
1001
0a5c9aad
HL
1002void mdp5_crtc_wait_for_commit_done(struct drm_crtc *crtc)
1003{
0ddc3a63 1004 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
68cdbed9 1005
0ddc3a63 1006 if (mdp5_cstate->cmd_mode)
68cdbed9
HL
1007 mdp5_crtc_wait_for_pp_done(crtc);
1008 else
1009 mdp5_crtc_wait_for_flush_done(crtc);
0a5c9aad
HL
1010}
1011
06c0dd96
RC
1012/* initialize crtc */
1013struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
5798c8e0
AT
1014 struct drm_plane *plane,
1015 struct drm_plane *cursor_plane, int id)
06c0dd96
RC
1016{
1017 struct drm_crtc *crtc = NULL;
1018 struct mdp5_crtc *mdp5_crtc;
06c0dd96
RC
1019
1020 mdp5_crtc = kzalloc(sizeof(*mdp5_crtc), GFP_KERNEL);
d7f8db53
BB
1021 if (!mdp5_crtc)
1022 return ERR_PTR(-ENOMEM);
06c0dd96
RC
1023
1024 crtc = &mdp5_crtc->base;
1025
06c0dd96 1026 mdp5_crtc->id = id;
0deed25b
SV
1027
1028 spin_lock_init(&mdp5_crtc->lm_lock);
e172d10a 1029 spin_lock_init(&mdp5_crtc->cursor.lock);
68cdbed9 1030 init_completion(&mdp5_crtc->pp_completion);
06c0dd96
RC
1031
1032 mdp5_crtc->vblank.irq = mdp5_crtc_vblank_irq;
1033 mdp5_crtc->err.irq = mdp5_crtc_err_irq;
0ddc3a63 1034 mdp5_crtc->pp_done.irq = mdp5_crtc_pp_done_irq;
06c0dd96 1035
5798c8e0
AT
1036 if (cursor_plane)
1037 drm_crtc_init_with_planes(dev, crtc, plane, cursor_plane,
1038 &mdp5_crtc_no_lm_cursor_funcs, NULL);
1039 else
1040 drm_crtc_init_with_planes(dev, crtc, plane, NULL,
1041 &mdp5_crtc_funcs, NULL);
e172d10a
BG
1042
1043 drm_flip_work_init(&mdp5_crtc->unref_cursor_work,
1044 "unref cursor", unref_cursor_worker);
1045
06c0dd96 1046 drm_crtc_helper_add(crtc, &mdp5_crtc_helper_funcs);
a8cecf33 1047 plane->crtc = crtc;
06c0dd96 1048
06c0dd96 1049 return crtc;
06c0dd96 1050}