]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
Revert "drm/rockchip: Flip select/depends in Kconfig"
[mirror_ubuntu-artful-kernel.git] / drivers / gpu / drm / msm / mdp / mdp5 / mdp5_crtc.c
CommitLineData
06c0dd96 1/*
0deed25b 2 * Copyright (c) 2014 The Linux Foundation. All rights reserved.
06c0dd96
RC
3 * Copyright (C) 2013 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published by
8 * the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19#include "mdp5_kms.h"
20
ed851963 21#include <linux/sort.h>
06c0dd96
RC
22#include <drm/drm_mode.h>
23#include "drm_crtc.h"
24#include "drm_crtc_helper.h"
25#include "drm_flip_work.h"
26
e172d10a
BG
27#define CURSOR_WIDTH 64
28#define CURSOR_HEIGHT 64
29
0deed25b
SV
30#define SSPP_MAX (SSPP_RGB3 + 1) /* TODO: Add SSPP_MAX in mdp5.xml.h */
31
06c0dd96
RC
32struct mdp5_crtc {
33 struct drm_crtc base;
34 char name[8];
06c0dd96
RC
35 int id;
36 bool enabled;
37
0deed25b
SV
38 /* layer mixer used for this CRTC (+ its lock): */
39#define GET_LM_ID(crtc_id) ((crtc_id == 3) ? 5 : crtc_id)
40 int lm;
41 spinlock_t lm_lock; /* protect REG_MDP5_LM_* registers */
42
43 /* CTL used for this CRTC: */
42238da8 44 struct mdp5_ctl *ctl;
06c0dd96
RC
45
46 /* if there is a pending flip, these will be non-null: */
47 struct drm_pending_vblank_event *event;
06c0dd96
RC
48
49#define PENDING_CURSOR 0x1
50#define PENDING_FLIP 0x2
51 atomic_t pending;
52
e172d10a
BG
53 /* for unref'ing cursor bo's after scanout completes: */
54 struct drm_flip_work unref_cursor_work;
55
06c0dd96
RC
56 struct mdp_irq vblank;
57 struct mdp_irq err;
e172d10a
BG
58
59 struct {
60 /* protect REG_MDP5_LM_CURSOR* registers and cursor scanout_bo*/
61 spinlock_t lock;
62
63 /* current cursor being scanned out: */
64 struct drm_gem_object *scanout_bo;
65 uint32_t width;
66 uint32_t height;
67 } cursor;
06c0dd96
RC
68};
69#define to_mdp5_crtc(x) container_of(x, struct mdp5_crtc, base)
70
71static struct mdp5_kms *get_kms(struct drm_crtc *crtc)
72{
73 struct msm_drm_private *priv = crtc->dev->dev_private;
74 return to_mdp5_kms(to_mdp_kms(priv->kms));
75}
76
77static void request_pending(struct drm_crtc *crtc, uint32_t pending)
78{
79 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
80
81 atomic_or(pending, &mdp5_crtc->pending);
82 mdp_irq_register(&get_kms(crtc)->base, &mdp5_crtc->vblank);
83}
84
0deed25b
SV
85#define mdp5_lm_get_flush(lm) mdp_ctl_flush_mask_lm(lm)
86
87static void crtc_flush(struct drm_crtc *crtc, u32 flush_mask)
88{
89 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
90
91 DBG("%s: flush=%08x", mdp5_crtc->name, flush_mask);
92 mdp5_ctl_commit(mdp5_crtc->ctl, flush_mask);
93}
94
95/*
96 * flush updates, to make sure hw is updated to new scanout fb,
97 * so that we can safely queue unref to current fb (ie. next
98 * vblank we know hw is done w/ previous scanout_fb).
99 */
100static void crtc_flush_all(struct drm_crtc *crtc)
06c0dd96
RC
101{
102 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
a8cecf33 103 struct drm_plane *plane;
0deed25b
SV
104 uint32_t flush_mask = 0;
105
106 /* we could have already released CTL in the disable path: */
107 if (!mdp5_crtc->ctl)
108 return;
06c0dd96 109
93b02beb 110 drm_atomic_crtc_for_each_plane(plane, crtc) {
0deed25b 111 flush_mask |= mdp5_plane_get_flush(plane);
06c0dd96 112 }
0deed25b
SV
113 flush_mask |= mdp5_ctl_get_flush(mdp5_crtc->ctl);
114 flush_mask |= mdp5_lm_get_flush(mdp5_crtc->lm);
a8cecf33 115
0deed25b 116 crtc_flush(crtc, flush_mask);
06c0dd96
RC
117}
118
06c0dd96
RC
119/* if file!=NULL, this is preclose potential cancel-flip path */
120static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
121{
122 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
123 struct drm_device *dev = crtc->dev;
124 struct drm_pending_vblank_event *event;
a8cecf33
RC
125 struct drm_plane *plane;
126 unsigned long flags;
06c0dd96
RC
127
128 spin_lock_irqsave(&dev->event_lock, flags);
129 event = mdp5_crtc->event;
130 if (event) {
131 /* if regular vblank case (!file) or if cancel-flip from
132 * preclose on file that requested flip, then send the
133 * event:
134 */
135 if (!file || (event->base.file_priv == file)) {
136 mdp5_crtc->event = NULL;
ed851963 137 DBG("%s: send event: %p", mdp5_crtc->name, event);
06c0dd96
RC
138 drm_send_vblank_event(dev, mdp5_crtc->id, event);
139 }
140 }
141 spin_unlock_irqrestore(&dev->event_lock, flags);
142
93b02beb 143 drm_atomic_crtc_for_each_plane(plane, crtc) {
a8cecf33 144 mdp5_plane_complete_flip(plane);
93b02beb 145 }
06c0dd96
RC
146}
147
e172d10a
BG
148static void unref_cursor_worker(struct drm_flip_work *work, void *val)
149{
150 struct mdp5_crtc *mdp5_crtc =
151 container_of(work, struct mdp5_crtc, unref_cursor_work);
152 struct mdp5_kms *mdp5_kms = get_kms(&mdp5_crtc->base);
153
154 msm_gem_put_iova(val, mdp5_kms->id);
155 drm_gem_object_unreference_unlocked(val);
156}
157
06c0dd96
RC
158static void mdp5_crtc_destroy(struct drm_crtc *crtc)
159{
160 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
161
06c0dd96 162 drm_crtc_cleanup(crtc);
e172d10a 163 drm_flip_work_cleanup(&mdp5_crtc->unref_cursor_work);
06c0dd96
RC
164
165 kfree(mdp5_crtc);
166}
167
06c0dd96
RC
168static bool mdp5_crtc_mode_fixup(struct drm_crtc *crtc,
169 const struct drm_display_mode *mode,
170 struct drm_display_mode *adjusted_mode)
171{
172 return true;
173}
174
0deed25b
SV
175/*
176 * blend_setup() - blend all the planes of a CRTC
177 *
178 * When border is enabled, the border color will ALWAYS be the base layer.
179 * Therefore, the first plane (private RGB pipe) will start at STAGE0.
180 * If disabled, the first plane starts at STAGE_BASE.
181 *
182 * Note:
183 * Border is not enabled here because the private plane is exactly
184 * the CRTC resolution.
185 */
06c0dd96
RC
186static void blend_setup(struct drm_crtc *crtc)
187{
188 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
189 struct mdp5_kms *mdp5_kms = get_kms(crtc);
0deed25b
SV
190 struct drm_plane *plane;
191 const struct mdp5_cfg_hw *hw_cfg;
192 uint32_t lm = mdp5_crtc->lm, blend_cfg = 0;
0deed25b
SV
193 unsigned long flags;
194#define blender(stage) ((stage) - STAGE_BASE)
06c0dd96 195
42238da8 196 hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
06c0dd96 197
0deed25b
SV
198 spin_lock_irqsave(&mdp5_crtc->lm_lock, flags);
199
200 /* ctl could be released already when we are shutting down: */
201 if (!mdp5_crtc->ctl)
202 goto out;
203
93b02beb 204 drm_atomic_crtc_for_each_plane(plane, crtc) {
ed851963
RC
205 enum mdp_mixer_stage_id stage =
206 to_mdp5_plane_state(plane->state)->stage;
06c0dd96 207
0deed25b
SV
208 /*
209 * Note: This cannot happen with current implementation but
210 * we need to check this condition once z property is added
211 */
212 BUG_ON(stage > hw_cfg->lm.nb_stages);
213
214 /* LM */
215 mdp5_write(mdp5_kms,
216 REG_MDP5_LM_BLEND_OP_MODE(lm, blender(stage)),
217 MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST) |
218 MDP5_LM_BLEND_OP_MODE_BG_ALPHA(BG_CONST));
219 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_FG_ALPHA(lm,
220 blender(stage)), 0xff);
221 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_BG_ALPHA(lm,
222 blender(stage)), 0x00);
223 /* CTL */
224 blend_cfg |= mdp_ctl_blend_mask(mdp5_plane_pipe(plane), stage);
225 DBG("%s: blending pipe %s on stage=%d", mdp5_crtc->name,
226 pipe2name(mdp5_plane_pipe(plane)), stage);
227 }
228
229 DBG("%s: lm%d: blend config = 0x%08x", mdp5_crtc->name, lm, blend_cfg);
230 mdp5_ctl_blend(mdp5_crtc->ctl, lm, blend_cfg);
231
232out:
233 spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags);
06c0dd96
RC
234}
235
ed851963 236static void mdp5_crtc_mode_set_nofb(struct drm_crtc *crtc)
06c0dd96
RC
237{
238 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
239 struct mdp5_kms *mdp5_kms = get_kms(crtc);
0deed25b 240 unsigned long flags;
ed851963
RC
241 struct drm_display_mode *mode;
242
243 if (WARN_ON(!crtc->state))
244 return;
06c0dd96 245
ed851963 246 mode = &crtc->state->adjusted_mode;
06c0dd96
RC
247
248 DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
249 mdp5_crtc->name, mode->base.id, mode->name,
250 mode->vrefresh, mode->clock,
251 mode->hdisplay, mode->hsync_start,
252 mode->hsync_end, mode->htotal,
253 mode->vdisplay, mode->vsync_start,
254 mode->vsync_end, mode->vtotal,
255 mode->type, mode->flags);
256
0deed25b
SV
257 spin_lock_irqsave(&mdp5_crtc->lm_lock, flags);
258 mdp5_write(mdp5_kms, REG_MDP5_LM_OUT_SIZE(mdp5_crtc->lm),
06c0dd96
RC
259 MDP5_LM_OUT_SIZE_WIDTH(mode->hdisplay) |
260 MDP5_LM_OUT_SIZE_HEIGHT(mode->vdisplay));
0deed25b 261 spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags);
06c0dd96
RC
262}
263
0b776d45 264static void mdp5_crtc_disable(struct drm_crtc *crtc)
06c0dd96
RC
265{
266 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
0b776d45
RC
267 struct mdp5_kms *mdp5_kms = get_kms(crtc);
268
06c0dd96 269 DBG("%s", mdp5_crtc->name);
0b776d45
RC
270
271 if (WARN_ON(!mdp5_crtc->enabled))
272 return;
273
274 /* set STAGE_UNUSED for all layers */
275 mdp5_ctl_blend(mdp5_crtc->ctl, mdp5_crtc->lm, 0x00000000);
276
277 mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->err);
278 mdp5_disable(mdp5_kms);
279
280 mdp5_crtc->enabled = false;
06c0dd96
RC
281}
282
0b776d45 283static void mdp5_crtc_enable(struct drm_crtc *crtc)
06c0dd96 284{
ed851963 285 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
0b776d45
RC
286 struct mdp5_kms *mdp5_kms = get_kms(crtc);
287
ed851963 288 DBG("%s", mdp5_crtc->name);
0b776d45
RC
289
290 if (WARN_ON(mdp5_crtc->enabled))
291 return;
292
293 mdp5_enable(mdp5_kms);
294 mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->err);
295
0deed25b 296 crtc_flush_all(crtc);
0b776d45
RC
297
298 mdp5_crtc->enabled = true;
06c0dd96
RC
299}
300
ed851963
RC
301struct plane_state {
302 struct drm_plane *plane;
303 struct mdp5_plane_state *state;
304};
305
306static int pstate_cmp(const void *a, const void *b)
06c0dd96 307{
ed851963
RC
308 struct plane_state *pa = (struct plane_state *)a;
309 struct plane_state *pb = (struct plane_state *)b;
310 return pa->state->zpos - pb->state->zpos;
06c0dd96
RC
311}
312
ed851963
RC
313static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
314 struct drm_crtc_state *state)
0deed25b
SV
315{
316 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
ed851963
RC
317 struct mdp5_kms *mdp5_kms = get_kms(crtc);
318 struct drm_plane *plane;
319 struct drm_device *dev = crtc->dev;
320 struct plane_state pstates[STAGE3 + 1];
321 int cnt = 0, i;
0deed25b 322
ed851963 323 DBG("%s: check", mdp5_crtc->name);
0deed25b 324
ed851963
RC
325 /* request a free CTL, if none is already allocated for this CRTC */
326 if (state->enable && !mdp5_crtc->ctl) {
327 mdp5_crtc->ctl = mdp5_ctlm_request(mdp5_kms->ctlm, crtc);
328 if (WARN_ON(!mdp5_crtc->ctl))
329 return -EINVAL;
0deed25b 330 }
ed851963
RC
331
332 /* verify that there are not too many planes attached to crtc
333 * and that we don't have conflicting mixer stages:
334 */
93b02beb 335 drm_atomic_crtc_state_for_each_plane(plane, state) {
ed851963
RC
336 struct drm_plane_state *pstate;
337
338 if (cnt >= ARRAY_SIZE(pstates)) {
339 dev_err(dev->dev, "too many planes!\n");
340 return -EINVAL;
341 }
342
343 pstate = state->state->plane_states[drm_plane_index(plane)];
344
345 /* plane might not have changed, in which case take
346 * current state:
347 */
348 if (!pstate)
349 pstate = plane->state;
350
351 pstates[cnt].plane = plane;
352 pstates[cnt].state = to_mdp5_plane_state(pstate);
353
354 cnt++;
355 }
356
357 sort(pstates, cnt, sizeof(pstates[0]), pstate_cmp, NULL);
358
359 for (i = 0; i < cnt; i++) {
360 pstates[i].state->stage = STAGE_BASE + i;
361 DBG("%s: assign pipe %s on stage=%d", mdp5_crtc->name,
362 pipe2name(mdp5_plane_pipe(pstates[i].plane)),
363 pstates[i].state->stage);
364 }
365
366 return 0;
0deed25b
SV
367}
368
ed851963
RC
369static void mdp5_crtc_atomic_begin(struct drm_crtc *crtc)
370{
371 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
372 DBG("%s: begin", mdp5_crtc->name);
373}
0deed25b 374
ed851963 375static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc)
06c0dd96
RC
376{
377 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
378 struct drm_device *dev = crtc->dev;
06c0dd96
RC
379 unsigned long flags;
380
f86afecf 381 DBG("%s: event: %p", mdp5_crtc->name, crtc->state->event);
06c0dd96 382
ed851963 383 WARN_ON(mdp5_crtc->event);
06c0dd96
RC
384
385 spin_lock_irqsave(&dev->event_lock, flags);
ed851963 386 mdp5_crtc->event = crtc->state->event;
06c0dd96
RC
387 spin_unlock_irqrestore(&dev->event_lock, flags);
388
ed851963
RC
389 blend_setup(crtc);
390 crtc_flush_all(crtc);
391 request_pending(crtc, PENDING_FLIP);
06c0dd96 392
ed851963
RC
393 if (mdp5_crtc->ctl && !crtc->state->enable) {
394 mdp5_ctl_release(mdp5_crtc->ctl);
395 mdp5_crtc->ctl = NULL;
396 }
06c0dd96
RC
397}
398
399static int mdp5_crtc_set_property(struct drm_crtc *crtc,
400 struct drm_property *property, uint64_t val)
401{
402 // XXX
403 return -EINVAL;
404}
405
e172d10a
BG
406static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
407 struct drm_file *file, uint32_t handle,
408 uint32_t width, uint32_t height)
409{
410 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
411 struct drm_device *dev = crtc->dev;
412 struct mdp5_kms *mdp5_kms = get_kms(crtc);
413 struct drm_gem_object *cursor_bo, *old_bo;
414 uint32_t blendcfg, cursor_addr, stride;
415 int ret, bpp, lm;
416 unsigned int depth;
417 enum mdp5_cursor_alpha cur_alpha = CURSOR_ALPHA_PER_PIXEL;
418 uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0);
419 unsigned long flags;
420
421 if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) {
422 dev_err(dev->dev, "bad cursor size: %dx%d\n", width, height);
423 return -EINVAL;
424 }
425
426 if (NULL == mdp5_crtc->ctl)
427 return -EINVAL;
428
429 if (!handle) {
430 DBG("Cursor off");
431 return mdp5_ctl_set_cursor(mdp5_crtc->ctl, false);
432 }
433
434 cursor_bo = drm_gem_object_lookup(dev, file, handle);
435 if (!cursor_bo)
436 return -ENOENT;
437
438 ret = msm_gem_get_iova(cursor_bo, mdp5_kms->id, &cursor_addr);
439 if (ret)
440 return -EINVAL;
441
442 lm = mdp5_crtc->lm;
443 drm_fb_get_bpp_depth(DRM_FORMAT_ARGB8888, &depth, &bpp);
444 stride = width * (bpp >> 3);
445
446 spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
447 old_bo = mdp5_crtc->cursor.scanout_bo;
448
449 mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_STRIDE(lm), stride);
450 mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_FORMAT(lm),
451 MDP5_LM_CURSOR_FORMAT_FORMAT(CURSOR_FMT_ARGB8888));
452 mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_IMG_SIZE(lm),
453 MDP5_LM_CURSOR_IMG_SIZE_SRC_H(height) |
454 MDP5_LM_CURSOR_IMG_SIZE_SRC_W(width));
455 mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(lm),
456 MDP5_LM_CURSOR_SIZE_ROI_H(height) |
457 MDP5_LM_CURSOR_SIZE_ROI_W(width));
458 mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_BASE_ADDR(lm), cursor_addr);
459
460
461 blendcfg = MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_EN;
462 blendcfg |= MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_TRANSP_EN;
463 blendcfg |= MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_ALPHA_SEL(cur_alpha);
464 mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_BLEND_CONFIG(lm), blendcfg);
465
466 mdp5_crtc->cursor.scanout_bo = cursor_bo;
467 mdp5_crtc->cursor.width = width;
468 mdp5_crtc->cursor.height = height;
469 spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags);
470
471 ret = mdp5_ctl_set_cursor(mdp5_crtc->ctl, true);
472 if (ret)
473 goto end;
474
475 flush_mask |= mdp5_ctl_get_flush(mdp5_crtc->ctl);
476 crtc_flush(crtc, flush_mask);
477
478end:
479 if (old_bo) {
480 drm_flip_work_queue(&mdp5_crtc->unref_cursor_work, old_bo);
481 /* enable vblank to complete cursor work: */
482 request_pending(crtc, PENDING_CURSOR);
483 }
484 return ret;
485}
486
487static int mdp5_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
488{
489 struct mdp5_kms *mdp5_kms = get_kms(crtc);
490 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
491 uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0);
492 uint32_t xres = crtc->mode.hdisplay;
493 uint32_t yres = crtc->mode.vdisplay;
494 uint32_t roi_w;
495 uint32_t roi_h;
496 unsigned long flags;
497
498 x = (x > 0) ? x : 0;
499 y = (y > 0) ? y : 0;
500
501 /*
502 * Cursor Region Of Interest (ROI) is a plane read from cursor
503 * buffer to render. The ROI region is determined by the visiblity of
504 * the cursor point. In the default Cursor image the cursor point will
505 * be at the top left of the cursor image, unless it is specified
506 * otherwise using hotspot feature.
507 *
508 * If the cursor point reaches the right (xres - x < cursor.width) or
509 * bottom (yres - y < cursor.height) boundary of the screen, then ROI
510 * width and ROI height need to be evaluated to crop the cursor image
511 * accordingly.
512 * (xres-x) will be new cursor width when x > (xres - cursor.width)
513 * (yres-y) will be new cursor height when y > (yres - cursor.height)
514 */
515 roi_w = min(mdp5_crtc->cursor.width, xres - x);
516 roi_h = min(mdp5_crtc->cursor.height, yres - y);
517
518 spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
519 mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(mdp5_crtc->lm),
520 MDP5_LM_CURSOR_SIZE_ROI_H(roi_h) |
521 MDP5_LM_CURSOR_SIZE_ROI_W(roi_w));
522 mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_START_XY(mdp5_crtc->lm),
523 MDP5_LM_CURSOR_START_XY_Y_START(y) |
524 MDP5_LM_CURSOR_START_XY_X_START(x));
525 spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags);
526
527 crtc_flush(crtc, flush_mask);
528
529 return 0;
530}
531
06c0dd96 532static const struct drm_crtc_funcs mdp5_crtc_funcs = {
ed851963 533 .set_config = drm_atomic_helper_set_config,
06c0dd96 534 .destroy = mdp5_crtc_destroy,
ed851963 535 .page_flip = drm_atomic_helper_page_flip,
06c0dd96 536 .set_property = mdp5_crtc_set_property,
ed851963
RC
537 .reset = drm_atomic_helper_crtc_reset,
538 .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
539 .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
e172d10a
BG
540 .cursor_set = mdp5_crtc_cursor_set,
541 .cursor_move = mdp5_crtc_cursor_move,
06c0dd96
RC
542};
543
544static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs = {
06c0dd96 545 .mode_fixup = mdp5_crtc_mode_fixup,
ed851963 546 .mode_set_nofb = mdp5_crtc_mode_set_nofb,
0b776d45
RC
547 .prepare = mdp5_crtc_disable,
548 .commit = mdp5_crtc_enable,
ed851963
RC
549 .atomic_check = mdp5_crtc_atomic_check,
550 .atomic_begin = mdp5_crtc_atomic_begin,
551 .atomic_flush = mdp5_crtc_atomic_flush,
06c0dd96
RC
552};
553
554static void mdp5_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus)
555{
556 struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, vblank);
557 struct drm_crtc *crtc = &mdp5_crtc->base;
e172d10a 558 struct msm_drm_private *priv = crtc->dev->dev_private;
06c0dd96
RC
559 unsigned pending;
560
561 mdp_irq_unregister(&get_kms(crtc)->base, &mdp5_crtc->vblank);
562
563 pending = atomic_xchg(&mdp5_crtc->pending, 0);
564
565 if (pending & PENDING_FLIP) {
566 complete_flip(crtc, NULL);
06c0dd96 567 }
e172d10a
BG
568
569 if (pending & PENDING_CURSOR)
570 drm_flip_work_commit(&mdp5_crtc->unref_cursor_work, priv->wq);
06c0dd96
RC
571}
572
573static void mdp5_crtc_err_irq(struct mdp_irq *irq, uint32_t irqstatus)
574{
575 struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, err);
0deed25b 576
06c0dd96 577 DBG("%s: error: %08x", mdp5_crtc->name, irqstatus);
06c0dd96
RC
578}
579
580uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc)
581{
582 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
583 return mdp5_crtc->vblank.irqmask;
584}
585
586void mdp5_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file)
587{
588 DBG("cancel: %p", file);
589 complete_flip(crtc, file);
590}
591
592/* set interface for routing crtc->encoder: */
593void mdp5_crtc_set_intf(struct drm_crtc *crtc, int intf,
594 enum mdp5_intf intf_id)
595{
596 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
597 struct mdp5_kms *mdp5_kms = get_kms(crtc);
0deed25b 598 uint32_t flush_mask = 0;
06c0dd96 599 uint32_t intf_sel;
0deed25b 600 unsigned long flags;
06c0dd96
RC
601
602 /* now that we know what irq's we want: */
603 mdp5_crtc->err.irqmask = intf2err(intf);
604 mdp5_crtc->vblank.irqmask = intf2vblank(intf);
8bc1fe92 605 mdp_irq_update(&mdp5_kms->base);
06c0dd96 606
0deed25b 607 spin_lock_irqsave(&mdp5_kms->resource_lock, flags);
06c0dd96
RC
608 intf_sel = mdp5_read(mdp5_kms, REG_MDP5_DISP_INTF_SEL);
609
610 switch (intf) {
611 case 0:
612 intf_sel &= ~MDP5_DISP_INTF_SEL_INTF0__MASK;
613 intf_sel |= MDP5_DISP_INTF_SEL_INTF0(intf_id);
614 break;
615 case 1:
616 intf_sel &= ~MDP5_DISP_INTF_SEL_INTF1__MASK;
617 intf_sel |= MDP5_DISP_INTF_SEL_INTF1(intf_id);
618 break;
619 case 2:
620 intf_sel &= ~MDP5_DISP_INTF_SEL_INTF2__MASK;
621 intf_sel |= MDP5_DISP_INTF_SEL_INTF2(intf_id);
622 break;
623 case 3:
624 intf_sel &= ~MDP5_DISP_INTF_SEL_INTF3__MASK;
625 intf_sel |= MDP5_DISP_INTF_SEL_INTF3(intf_id);
626 break;
627 default:
628 BUG();
629 break;
630 }
631
0deed25b
SV
632 mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, intf_sel);
633 spin_unlock_irqrestore(&mdp5_kms->resource_lock, flags);
06c0dd96
RC
634
635 DBG("%s: intf_sel=%08x", mdp5_crtc->name, intf_sel);
0deed25b
SV
636 mdp5_ctl_set_intf(mdp5_crtc->ctl, intf);
637 flush_mask |= mdp5_ctl_get_flush(mdp5_crtc->ctl);
638 flush_mask |= mdp5_lm_get_flush(mdp5_crtc->lm);
06c0dd96 639
0deed25b
SV
640 crtc_flush(crtc, flush_mask);
641}
06c0dd96 642
0deed25b
SV
643int mdp5_crtc_get_lm(struct drm_crtc *crtc)
644{
645 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
646
647 if (WARN_ON(!crtc))
648 return -EINVAL;
649
650 return mdp5_crtc->lm;
651}
652
06c0dd96
RC
653/* initialize crtc */
654struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
655 struct drm_plane *plane, int id)
656{
657 struct drm_crtc *crtc = NULL;
658 struct mdp5_crtc *mdp5_crtc;
06c0dd96
RC
659
660 mdp5_crtc = kzalloc(sizeof(*mdp5_crtc), GFP_KERNEL);
d7f8db53
BB
661 if (!mdp5_crtc)
662 return ERR_PTR(-ENOMEM);
06c0dd96
RC
663
664 crtc = &mdp5_crtc->base;
665
06c0dd96 666 mdp5_crtc->id = id;
0deed25b
SV
667 mdp5_crtc->lm = GET_LM_ID(id);
668
669 spin_lock_init(&mdp5_crtc->lm_lock);
e172d10a 670 spin_lock_init(&mdp5_crtc->cursor.lock);
06c0dd96
RC
671
672 mdp5_crtc->vblank.irq = mdp5_crtc_vblank_irq;
673 mdp5_crtc->err.irq = mdp5_crtc_err_irq;
674
675 snprintf(mdp5_crtc->name, sizeof(mdp5_crtc->name), "%s:%d",
676 pipe2name(mdp5_plane_pipe(plane)), id);
677
2d82d188 678 drm_crtc_init_with_planes(dev, crtc, plane, NULL, &mdp5_crtc_funcs);
e172d10a
BG
679
680 drm_flip_work_init(&mdp5_crtc->unref_cursor_work,
681 "unref cursor", unref_cursor_worker);
682
06c0dd96 683 drm_crtc_helper_add(crtc, &mdp5_crtc_helper_funcs);
a8cecf33 684 plane->crtc = crtc;
06c0dd96 685
8845ef80 686 mdp5_plane_install_properties(plane, &crtc->base);
06c0dd96
RC
687
688 return crtc;
06c0dd96 689}