]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blame - drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 234
[mirror_ubuntu-focal-kernel.git] / drivers / gpu / drm / msm / disp / mdp5 / mdp5_kms.c
CommitLineData
caab277b 1// SPDX-License-Identifier: GPL-2.0-only
06c0dd96 2/*
2e362e17 3 * Copyright (c) 2014, The Linux Foundation. All rights reserved.
06c0dd96
RC
4 * Copyright (C) 2013 Red Hat
5 * Author: Rob Clark <robdclark@gmail.com>
06c0dd96
RC
6 */
7
aec095ec 8#include <linux/of_irq.h>
06c0dd96
RC
9
10#include "msm_drv.h"
667ce33e 11#include "msm_gem.h"
06c0dd96
RC
12#include "msm_mmu.h"
13#include "mdp5_kms.h"
14
87e956e9
SV
15static const char *iommu_ports[] = {
16 "mdp_0",
17};
18
3d47fd47
SV
19static int mdp5_hw_init(struct msm_kms *kms)
20{
21 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
d68fe15b 22 struct device *dev = &mdp5_kms->pdev->dev;
0deed25b 23 unsigned long flags;
3d47fd47 24
d68fe15b 25 pm_runtime_get_sync(dev);
3d47fd47 26
06c0dd96
RC
27 /* Magic unknown register writes:
28 *
29 * W VBIF:0x004 00000001 (mdss_mdp.c:839)
30 * W MDP5:0x2e0 0xe9 (mdss_mdp.c:839)
31 * W MDP5:0x2e4 0x55 (mdss_mdp.c:839)
32 * W MDP5:0x3ac 0xc0000ccc (mdss_mdp.c:839)
33 * W MDP5:0x3b4 0xc0000ccc (mdss_mdp.c:839)
34 * W MDP5:0x3bc 0xcccccc (mdss_mdp.c:839)
35 * W MDP5:0x4a8 0xcccc0c0 (mdss_mdp.c:839)
36 * W MDP5:0x4b0 0xccccc0c0 (mdss_mdp.c:839)
37 * W MDP5:0x4b8 0xccccc000 (mdss_mdp.c:839)
38 *
39 * Downstream fbdev driver gets these register offsets/values
40 * from DT.. not really sure what these registers are or if
41 * different values for different boards/SoC's, etc. I guess
42 * they are the golden registers.
43 *
44 * Not setting these does not seem to cause any problem. But
45 * we may be getting lucky with the bootloader initializing
46 * them for us. OTOH, if we can always count on the bootloader
47 * setting the golden registers, then perhaps we don't need to
48 * care.
49 */
50
0deed25b 51 spin_lock_irqsave(&mdp5_kms->resource_lock, flags);
7b59c7e4 52 mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, 0);
0deed25b 53 spin_unlock_irqrestore(&mdp5_kms->resource_lock, flags);
06c0dd96 54
42238da8 55 mdp5_ctlm_hw_reset(mdp5_kms->ctlm);
3d47fd47 56
d68fe15b 57 pm_runtime_put_sync(dev);
06c0dd96 58
3d47fd47 59 return 0;
06c0dd96
RC
60}
61
8d58ef34
AT
62/* Global/shared object state funcs */
63
64/*
65 * This is a helper that returns the private state currently in operation.
66 * Note that this would return the "old_state" if called in the atomic check
67 * path, and the "new_state" after the atomic swap has been done.
68 */
69struct mdp5_global_state *
70mdp5_get_existing_global_state(struct mdp5_kms *mdp5_kms)
71{
72 return to_mdp5_global_state(mdp5_kms->glob_state.state);
73}
74
75/*
76 * This acquires the modeset lock set aside for global state, creates
77 * a new duplicated private object state.
78 */
79struct mdp5_global_state *mdp5_get_global_state(struct drm_atomic_state *s)
80{
81 struct msm_drm_private *priv = s->dev->dev_private;
82 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms));
83 struct drm_private_state *priv_state;
84 int ret;
85
86 ret = drm_modeset_lock(&mdp5_kms->glob_state_lock, s->acquire_ctx);
87 if (ret)
88 return ERR_PTR(ret);
89
90 priv_state = drm_atomic_get_private_obj_state(s, &mdp5_kms->glob_state);
91 if (IS_ERR(priv_state))
92 return ERR_CAST(priv_state);
93
94 return to_mdp5_global_state(priv_state);
95}
96
97static struct drm_private_state *
98mdp5_global_duplicate_state(struct drm_private_obj *obj)
99{
100 struct mdp5_global_state *state;
101
102 state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
103 if (!state)
104 return NULL;
105
106 __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
107
108 return &state->base;
109}
110
111static void mdp5_global_destroy_state(struct drm_private_obj *obj,
112 struct drm_private_state *state)
113{
114 struct mdp5_global_state *mdp5_state = to_mdp5_global_state(state);
115
116 kfree(mdp5_state);
117}
118
119static const struct drm_private_state_funcs mdp5_global_state_funcs = {
120 .atomic_duplicate_state = mdp5_global_duplicate_state,
121 .atomic_destroy_state = mdp5_global_destroy_state,
122};
123
124static int mdp5_global_obj_init(struct mdp5_kms *mdp5_kms)
125{
126 struct mdp5_global_state *state;
127
128 drm_modeset_lock_init(&mdp5_kms->glob_state_lock);
129
130 state = kzalloc(sizeof(*state), GFP_KERNEL);
131 if (!state)
132 return -ENOMEM;
133
134 state->mdp5_kms = mdp5_kms;
135
b962a120 136 drm_atomic_private_obj_init(mdp5_kms->dev, &mdp5_kms->glob_state,
8d58ef34
AT
137 &state->base,
138 &mdp5_global_state_funcs);
139 return 0;
140}
141
0b776d45
RC
142static void mdp5_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *state)
143{
144 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
d68fe15b 145 struct device *dev = &mdp5_kms->pdev->dev;
7907a0d7
AT
146 struct mdp5_global_state *global_state;
147
148 global_state = mdp5_get_existing_global_state(mdp5_kms);
49ec5b2e 149
d68fe15b 150 pm_runtime_get_sync(dev);
49ec5b2e
RC
151
152 if (mdp5_kms->smp)
7907a0d7 153 mdp5_smp_prepare_commit(mdp5_kms->smp, &global_state->smp);
0b776d45
RC
154}
155
156static void mdp5_complete_commit(struct msm_kms *kms, struct drm_atomic_state *state)
157{
158 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
d68fe15b 159 struct device *dev = &mdp5_kms->pdev->dev;
7907a0d7
AT
160 struct mdp5_global_state *global_state;
161
a5c6b599
SP
162 drm_atomic_helper_wait_for_vblanks(mdp5_kms->dev, state);
163
7907a0d7 164 global_state = mdp5_get_existing_global_state(mdp5_kms);
657c63f0 165
49ec5b2e 166 if (mdp5_kms->smp)
7907a0d7 167 mdp5_smp_complete_commit(mdp5_kms->smp, &global_state->smp);
49ec5b2e 168
3c352b66 169 pm_runtime_put_sync(dev);
0b776d45
RC
170}
171
0a5c9aad
HL
172static void mdp5_wait_for_crtc_commit_done(struct msm_kms *kms,
173 struct drm_crtc *crtc)
174{
175 mdp5_crtc_wait_for_commit_done(crtc);
176}
177
06c0dd96
RC
178static long mdp5_round_pixclk(struct msm_kms *kms, unsigned long rate,
179 struct drm_encoder *encoder)
180{
181 return rate;
182}
183
d5af49c9
HL
184static int mdp5_set_split_display(struct msm_kms *kms,
185 struct drm_encoder *encoder,
186 struct drm_encoder *slave_encoder,
187 bool is_cmd_mode)
188{
189 if (is_cmd_mode)
190 return mdp5_cmd_encoder_set_split_display(encoder,
191 slave_encoder);
192 else
df8a71d2
AT
193 return mdp5_vid_encoder_set_split_display(encoder,
194 slave_encoder);
d5af49c9
HL
195}
196
9c9f6f8d
AT
197static void mdp5_set_encoder_mode(struct msm_kms *kms,
198 struct drm_encoder *encoder,
199 bool cmd_mode)
200{
201 mdp5_encoder_set_intf_mode(encoder, cmd_mode);
202}
203
1dd0a0b1 204static void mdp5_kms_destroy(struct msm_kms *kms)
06c0dd96
RC
205{
206 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
f59f62d5 207 struct msm_gem_address_space *aspace = kms->aspace;
c056b55d
RC
208 int i;
209
6803c606
AT
210 for (i = 0; i < mdp5_kms->num_hwmixers; i++)
211 mdp5_mixer_destroy(mdp5_kms->hwmixers[i]);
212
c056b55d
RC
213 for (i = 0; i < mdp5_kms->num_hwpipes; i++)
214 mdp5_pipe_destroy(mdp5_kms->hwpipes[i]);
87e956e9 215
667ce33e
RC
216 if (aspace) {
217 aspace->mmu->funcs->detach(aspace->mmu,
218 iommu_ports, ARRAY_SIZE(iommu_ports));
ee546cd3 219 msm_gem_address_space_put(aspace);
87e956e9 220 }
aec095ec
AT
221}
222
bc5289ee
RC
223#ifdef CONFIG_DEBUG_FS
224static int smp_show(struct seq_file *m, void *arg)
225{
226 struct drm_info_node *node = (struct drm_info_node *) m->private;
227 struct drm_device *dev = node->minor->dev;
228 struct msm_drm_private *priv = dev->dev_private;
229 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms));
230 struct drm_printer p = drm_seq_file_printer(m);
231
232 if (!mdp5_kms->smp) {
233 drm_printf(&p, "no SMP pool\n");
234 return 0;
235 }
236
237 mdp5_smp_dump(mdp5_kms->smp, &p);
238
239 return 0;
240}
241
242static struct drm_info_list mdp5_debugfs_list[] = {
243 {"smp", smp_show },
244};
245
246static int mdp5_kms_debugfs_init(struct msm_kms *kms, struct drm_minor *minor)
247{
248 struct drm_device *dev = minor->dev;
249 int ret;
250
251 ret = drm_debugfs_create_files(mdp5_debugfs_list,
252 ARRAY_SIZE(mdp5_debugfs_list),
253 minor->debugfs_root, minor);
254
255 if (ret) {
6a41da17 256 DRM_DEV_ERROR(dev->dev, "could not install mdp5_debugfs_list\n");
bc5289ee
RC
257 return ret;
258 }
259
260 return 0;
261}
bc5289ee
RC
262#endif
263
06c0dd96
RC
264static const struct mdp_kms_funcs kms_funcs = {
265 .base = {
266 .hw_init = mdp5_hw_init,
267 .irq_preinstall = mdp5_irq_preinstall,
268 .irq_postinstall = mdp5_irq_postinstall,
269 .irq_uninstall = mdp5_irq_uninstall,
270 .irq = mdp5_irq,
271 .enable_vblank = mdp5_enable_vblank,
272 .disable_vblank = mdp5_disable_vblank,
0b776d45
RC
273 .prepare_commit = mdp5_prepare_commit,
274 .complete_commit = mdp5_complete_commit,
0a5c9aad 275 .wait_for_crtc_commit_done = mdp5_wait_for_crtc_commit_done,
06c0dd96
RC
276 .get_format = mdp_get_format,
277 .round_pixclk = mdp5_round_pixclk,
d5af49c9 278 .set_split_display = mdp5_set_split_display,
9c9f6f8d 279 .set_encoder_mode = mdp5_set_encoder_mode,
392ae6e0 280 .destroy = mdp5_kms_destroy,
bc5289ee
RC
281#ifdef CONFIG_DEBUG_FS
282 .debugfs_init = mdp5_kms_debugfs_init,
bc5289ee 283#endif
06c0dd96
RC
284 },
285 .set_irqmask = mdp5_set_irqmask,
286};
287
288int mdp5_disable(struct mdp5_kms *mdp5_kms)
289{
290 DBG("");
291
a7d3bb00
RC
292 mdp5_kms->enable_count--;
293 WARN_ON(mdp5_kms->enable_count < 0);
294
06c0dd96
RC
295 clk_disable_unprepare(mdp5_kms->ahb_clk);
296 clk_disable_unprepare(mdp5_kms->axi_clk);
297 clk_disable_unprepare(mdp5_kms->core_clk);
3a84f846
SV
298 if (mdp5_kms->lut_clk)
299 clk_disable_unprepare(mdp5_kms->lut_clk);
06c0dd96
RC
300
301 return 0;
302}
303
304int mdp5_enable(struct mdp5_kms *mdp5_kms)
305{
306 DBG("");
307
a7d3bb00
RC
308 mdp5_kms->enable_count++;
309
06c0dd96
RC
310 clk_prepare_enable(mdp5_kms->ahb_clk);
311 clk_prepare_enable(mdp5_kms->axi_clk);
312 clk_prepare_enable(mdp5_kms->core_clk);
3a84f846
SV
313 if (mdp5_kms->lut_clk)
314 clk_prepare_enable(mdp5_kms->lut_clk);
06c0dd96
RC
315
316 return 0;
317}
318
5722a9e3 319static struct drm_encoder *construct_encoder(struct mdp5_kms *mdp5_kms,
36d1364a
AT
320 struct mdp5_interface *intf,
321 struct mdp5_ctl *ctl)
67ac0a2d
SV
322{
323 struct drm_device *dev = mdp5_kms->dev;
324 struct msm_drm_private *priv = dev->dev_private;
325 struct drm_encoder *encoder;
67ac0a2d 326
36d1364a 327 encoder = mdp5_encoder_init(dev, intf, ctl);
67ac0a2d 328 if (IS_ERR(encoder)) {
6a41da17 329 DRM_DEV_ERROR(dev->dev, "failed to construct encoder\n");
5722a9e3 330 return encoder;
67ac0a2d
SV
331 }
332
67ac0a2d
SV
333 priv->encoders[priv->num_encoders++] = encoder;
334
5722a9e3
HL
335 return encoder;
336}
337
d5af49c9
HL
338static int get_dsi_id_from_intf(const struct mdp5_cfg_hw *hw_cfg, int intf_num)
339{
fe34464d
SV
340 const enum mdp5_intf_type *intfs = hw_cfg->intf.connect;
341 const int intf_cnt = ARRAY_SIZE(hw_cfg->intf.connect);
d5af49c9
HL
342 int id = 0, i;
343
344 for (i = 0; i < intf_cnt; i++) {
345 if (intfs[i] == INTF_DSI) {
346 if (intf_num == i)
347 return id;
348
349 id++;
350 }
351 }
352
353 return -EINVAL;
354}
355
36d1364a
AT
356static int modeset_init_intf(struct mdp5_kms *mdp5_kms,
357 struct mdp5_interface *intf)
5722a9e3
HL
358{
359 struct drm_device *dev = mdp5_kms->dev;
360 struct msm_drm_private *priv = dev->dev_private;
c71716b1
HL
361 struct mdp5_ctl_manager *ctlm = mdp5_kms->ctlm;
362 struct mdp5_ctl *ctl;
5722a9e3
HL
363 struct drm_encoder *encoder;
364 int ret = 0;
365
36d1364a 366 switch (intf->type) {
5722a9e3
HL
367 case INTF_eDP:
368 if (!priv->edp)
369 break;
370
36d1364a 371 ctl = mdp5_ctlm_request(ctlm, intf->num);
c71716b1
HL
372 if (!ctl) {
373 ret = -EINVAL;
374 break;
375 }
376
36d1364a 377 encoder = construct_encoder(mdp5_kms, intf, ctl);
5722a9e3
HL
378 if (IS_ERR(encoder)) {
379 ret = PTR_ERR(encoder);
380 break;
381 }
67ac0a2d 382
67ac0a2d 383 ret = msm_edp_modeset_init(priv->edp, dev, encoder);
5722a9e3
HL
384 break;
385 case INTF_HDMI:
386 if (!priv->hdmi)
387 break;
388
36d1364a 389 ctl = mdp5_ctlm_request(ctlm, intf->num);
c71716b1
HL
390 if (!ctl) {
391 ret = -EINVAL;
392 break;
393 }
394
36d1364a 395 encoder = construct_encoder(mdp5_kms, intf, ctl);
5722a9e3
HL
396 if (IS_ERR(encoder)) {
397 ret = PTR_ERR(encoder);
398 break;
399 }
400
fcda50c8 401 ret = msm_hdmi_modeset_init(priv->hdmi, dev, encoder);
5722a9e3 402 break;
d5af49c9
HL
403 case INTF_DSI:
404 {
36d1364a
AT
405 const struct mdp5_cfg_hw *hw_cfg =
406 mdp5_cfg_get_hw_config(mdp5_kms->cfg);
407 int dsi_id = get_dsi_id_from_intf(hw_cfg, intf->num);
d5af49c9
HL
408
409 if ((dsi_id >= ARRAY_SIZE(priv->dsi)) || (dsi_id < 0)) {
6a41da17 410 DRM_DEV_ERROR(dev->dev, "failed to find dsi from intf %d\n",
36d1364a 411 intf->num);
d5af49c9
HL
412 ret = -EINVAL;
413 break;
414 }
415
416 if (!priv->dsi[dsi_id])
417 break;
418
36d1364a 419 ctl = mdp5_ctlm_request(ctlm, intf->num);
c71716b1
HL
420 if (!ctl) {
421 ret = -EINVAL;
422 break;
423 }
424
36d1364a 425 encoder = construct_encoder(mdp5_kms, intf, ctl);
97e00119
AT
426 if (IS_ERR(encoder)) {
427 ret = PTR_ERR(encoder);
428 break;
d5af49c9
HL
429 }
430
97e00119 431 ret = msm_dsi_modeset_init(priv->dsi[dsi_id], dev, encoder);
d5af49c9
HL
432 break;
433 }
5722a9e3 434 default:
6a41da17 435 DRM_DEV_ERROR(dev->dev, "unknown intf: %d\n", intf->type);
5722a9e3
HL
436 ret = -EINVAL;
437 break;
67ac0a2d
SV
438 }
439
440 return ret;
441}
442
06c0dd96
RC
443static int modeset_init(struct mdp5_kms *mdp5_kms)
444{
06c0dd96
RC
445 struct drm_device *dev = mdp5_kms->dev;
446 struct msm_drm_private *priv = dev->dev_private;
2e362e17 447 const struct mdp5_cfg_hw *hw_cfg;
e5366ffe 448 unsigned int num_crtcs;
bff8fba4
AT
449 int i, ret, pi = 0, ci = 0;
450 struct drm_plane *primary[MAX_BASES] = { NULL };
451 struct drm_plane *cursor[MAX_BASES] = { NULL };
06c0dd96 452
42238da8 453 hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
2e362e17 454
e5366ffe
AT
455 /*
456 * Construct encoders and modeset initialize connector devices
457 * for each external display interface.
458 */
36d1364a
AT
459 for (i = 0; i < mdp5_kms->num_intfs; i++) {
460 ret = modeset_init_intf(mdp5_kms, mdp5_kms->intfs[i]);
e5366ffe
AT
461 if (ret)
462 goto fail;
463 }
464
465 /*
466 * We should ideally have less number of encoders (set up by parsing
467 * the MDP5 interfaces) than the number of layer mixers present in HW,
468 * but let's be safe here anyway
469 */
adfc0e63 470 num_crtcs = min(priv->num_encoders, mdp5_kms->num_hwmixers);
e5366ffe
AT
471
472 /*
473 * Construct planes equaling the number of hw pipes, and CRTCs for the
474 * N encoders set up by the driver. The first N planes become primary
c056b55d
RC
475 * planes for the CRTCs, with the remainder as overlay planes:
476 */
477 for (i = 0; i < mdp5_kms->num_hwpipes; i++) {
bff8fba4 478 struct mdp5_hw_pipe *hwpipe = mdp5_kms->hwpipes[i];
06c0dd96 479 struct drm_plane *plane;
5798c8e0 480 enum drm_plane_type type;
06c0dd96 481
bff8fba4 482 if (i < num_crtcs)
5798c8e0 483 type = DRM_PLANE_TYPE_PRIMARY;
bff8fba4
AT
484 else if (hwpipe->caps & MDP_PIPE_CAP_CURSOR)
485 type = DRM_PLANE_TYPE_CURSOR;
5798c8e0
AT
486 else
487 type = DRM_PLANE_TYPE_OVERLAY;
488
489 plane = mdp5_plane_init(dev, type);
06c0dd96
RC
490 if (IS_ERR(plane)) {
491 ret = PTR_ERR(plane);
6a41da17 492 DRM_DEV_ERROR(dev->dev, "failed to construct plane %d (%d)\n", i, ret);
06c0dd96
RC
493 goto fail;
494 }
bc5289ee 495 priv->planes[priv->num_planes++] = plane;
06c0dd96 496
bff8fba4
AT
497 if (type == DRM_PLANE_TYPE_PRIMARY)
498 primary[pi++] = plane;
499 if (type == DRM_PLANE_TYPE_CURSOR)
500 cursor[ci++] = plane;
501 }
502
503 for (i = 0; i < num_crtcs; i++) {
504 struct drm_crtc *crtc;
c056b55d 505
bff8fba4 506 crtc = mdp5_crtc_init(dev, primary[i], cursor[i], i);
06c0dd96
RC
507 if (IS_ERR(crtc)) {
508 ret = PTR_ERR(crtc);
6a41da17 509 DRM_DEV_ERROR(dev->dev, "failed to construct crtc %d (%d)\n", i, ret);
06c0dd96
RC
510 goto fail;
511 }
512 priv->crtcs[priv->num_crtcs++] = crtc;
513 }
514
e5366ffe
AT
515 /*
516 * Now that we know the number of crtcs we've created, set the possible
517 * crtcs for the encoders
5722a9e3 518 */
e5366ffe
AT
519 for (i = 0; i < priv->num_encoders; i++) {
520 struct drm_encoder *encoder = priv->encoders[i];
521
522 encoder->possible_crtcs = (1 << priv->num_crtcs) - 1;
00453981
HL
523 }
524
06c0dd96
RC
525 return 0;
526
527fail:
528 return ret;
529}
530
1dd0a0b1
AT
531static void read_mdp_hw_revision(struct mdp5_kms *mdp5_kms,
532 u32 *major, u32 *minor)
533{
d68fe15b 534 struct device *dev = &mdp5_kms->pdev->dev;
1dd0a0b1
AT
535 u32 version;
536
d68fe15b 537 pm_runtime_get_sync(dev);
7b59c7e4 538 version = mdp5_read(mdp5_kms, REG_MDP5_HW_VERSION);
3c352b66 539 pm_runtime_put_sync(dev);
1dd0a0b1 540
7b59c7e4
AT
541 *major = FIELD(version, MDP5_HW_VERSION_MAJOR);
542 *minor = FIELD(version, MDP5_HW_VERSION_MINOR);
1dd0a0b1 543
6a41da17 544 DRM_DEV_INFO(dev, "MDP5 version v%d.%d", *major, *minor);
1dd0a0b1
AT
545}
546
06c0dd96 547static int get_clk(struct platform_device *pdev, struct clk **clkp,
d40325b4 548 const char *name, bool mandatory)
06c0dd96
RC
549{
550 struct device *dev = &pdev->dev;
d0538f50 551 struct clk *clk = msm_clk_get(pdev, name);
d40325b4 552 if (IS_ERR(clk) && mandatory) {
6a41da17 553 DRM_DEV_ERROR(dev, "failed to get %s (%ld)\n", name, PTR_ERR(clk));
06c0dd96
RC
554 return PTR_ERR(clk);
555 }
d40325b4
SV
556 if (IS_ERR(clk))
557 DBG("skipping %s", name);
558 else
559 *clkp = clk;
560
06c0dd96
RC
561 return 0;
562}
563
e2dd9f9f
AT
564static struct drm_encoder *get_encoder_from_crtc(struct drm_crtc *crtc)
565{
566 struct drm_device *dev = crtc->dev;
567 struct drm_encoder *encoder;
568
569 drm_for_each_encoder(encoder, dev)
570 if (encoder->crtc == crtc)
571 return encoder;
572
573 return NULL;
574}
575
1bf6ad62
DV
576static bool mdp5_get_scanoutpos(struct drm_device *dev, unsigned int pipe,
577 bool in_vblank_irq, int *vpos, int *hpos,
578 ktime_t *stime, ktime_t *etime,
579 const struct drm_display_mode *mode)
e2dd9f9f
AT
580{
581 struct msm_drm_private *priv = dev->dev_private;
582 struct drm_crtc *crtc;
583 struct drm_encoder *encoder;
584 int line, vsw, vbp, vactive_start, vactive_end, vfp_end;
e2dd9f9f
AT
585
586 crtc = priv->crtcs[pipe];
587 if (!crtc) {
588 DRM_ERROR("Invalid crtc %d\n", pipe);
1bf6ad62 589 return false;
e2dd9f9f
AT
590 }
591
592 encoder = get_encoder_from_crtc(crtc);
593 if (!encoder) {
594 DRM_ERROR("no encoder found for crtc %d\n", pipe);
1bf6ad62 595 return false;
e2dd9f9f
AT
596 }
597
e2dd9f9f
AT
598 vsw = mode->crtc_vsync_end - mode->crtc_vsync_start;
599 vbp = mode->crtc_vtotal - mode->crtc_vsync_end;
600
601 /*
602 * the line counter is 1 at the start of the VSYNC pulse and VTOTAL at
603 * the end of VFP. Translate the porch values relative to the line
604 * counter positions.
605 */
606
607 vactive_start = vsw + vbp + 1;
608
609 vactive_end = vactive_start + mode->crtc_vdisplay;
610
611 /* last scan line before VSYNC */
612 vfp_end = mode->crtc_vtotal;
613
614 if (stime)
615 *stime = ktime_get();
616
617 line = mdp5_encoder_get_linecount(encoder);
618
619 if (line < vactive_start) {
620 line -= vactive_start;
e2dd9f9f
AT
621 } else if (line > vactive_end) {
622 line = line - vfp_end - vactive_start;
e2dd9f9f
AT
623 } else {
624 line -= vactive_start;
625 }
626
627 *vpos = line;
628 *hpos = 0;
629
630 if (etime)
631 *etime = ktime_get();
632
1bf6ad62 633 return true;
e2dd9f9f
AT
634}
635
636static u32 mdp5_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
637{
638 struct msm_drm_private *priv = dev->dev_private;
639 struct drm_crtc *crtc;
640 struct drm_encoder *encoder;
641
0c17151a 642 if (pipe >= priv->num_crtcs)
e2dd9f9f
AT
643 return 0;
644
645 crtc = priv->crtcs[pipe];
646 if (!crtc)
647 return 0;
648
649 encoder = get_encoder_from_crtc(crtc);
650 if (!encoder)
651 return 0;
652
653 return mdp5_encoder_get_framecount(encoder);
654}
655
06c0dd96 656struct msm_kms *mdp5_kms_init(struct drm_device *dev)
aec095ec
AT
657{
658 struct msm_drm_private *priv = dev->dev_private;
659 struct platform_device *pdev;
660 struct mdp5_kms *mdp5_kms;
661 struct mdp5_cfg *config;
662 struct msm_kms *kms;
667ce33e 663 struct msm_gem_address_space *aspace;
aec095ec
AT
664 int irq, i, ret;
665
666 /* priv->kms would have been populated by the MDP5 driver */
667 kms = priv->kms;
668 if (!kms)
669 return NULL;
670
671 mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
672
673 mdp_kms_init(&mdp5_kms->base, &kms_funcs);
674
675 pdev = mdp5_kms->pdev;
676
677 irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
678 if (irq < 0) {
679 ret = irq;
6a41da17 680 DRM_DEV_ERROR(&pdev->dev, "failed to get irq: %d\n", ret);
aec095ec
AT
681 goto fail;
682 }
683
684 kms->irq = irq;
685
686 config = mdp5_cfg_get_config(mdp5_kms->cfg);
687
688 /* make sure things are off before attaching iommu (bootloader could
689 * have left things on, in which case we'll start getting faults if
690 * we don't disable):
691 */
d68fe15b 692 pm_runtime_get_sync(&pdev->dev);
aec095ec
AT
693 for (i = 0; i < MDP5_INTF_NUM_MAX; i++) {
694 if (mdp5_cfg_intf_is_virtual(config->hw->intf.connect[i]) ||
695 !config->hw->intf.base[i])
696 continue;
697 mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(i), 0);
698
699 mdp5_write(mdp5_kms, REG_MDP5_INTF_FRAME_LINE_COUNT_EN(i), 0x3);
700 }
aec095ec
AT
701 mdelay(16);
702
703 if (config->platform.iommu) {
667ce33e
RC
704 aspace = msm_gem_address_space_create(&pdev->dev,
705 config->platform.iommu, "mdp5");
706 if (IS_ERR(aspace)) {
707 ret = PTR_ERR(aspace);
aec095ec
AT
708 goto fail;
709 }
710
f59f62d5 711 kms->aspace = aspace;
667ce33e
RC
712
713 ret = aspace->mmu->funcs->attach(aspace->mmu, iommu_ports,
aec095ec
AT
714 ARRAY_SIZE(iommu_ports));
715 if (ret) {
6a41da17 716 DRM_DEV_ERROR(&pdev->dev, "failed to attach iommu: %d\n",
aec095ec 717 ret);
aec095ec
AT
718 goto fail;
719 }
720 } else {
6a41da17 721 DRM_DEV_INFO(&pdev->dev,
aec095ec 722 "no iommu, fallback to phys contig buffers for scanout\n");
52a8988d 723 aspace = NULL;
aec095ec 724 }
aec095ec 725
3c352b66 726 pm_runtime_put_sync(&pdev->dev);
d68fe15b 727
aec095ec
AT
728 ret = modeset_init(mdp5_kms);
729 if (ret) {
6a41da17 730 DRM_DEV_ERROR(&pdev->dev, "modeset_init failed: %d\n", ret);
aec095ec
AT
731 goto fail;
732 }
733
734 dev->mode_config.min_width = 0;
735 dev->mode_config.min_height = 0;
9708ebbe
RC
736 dev->mode_config.max_width = 0xffff;
737 dev->mode_config.max_height = 0xffff;
aec095ec 738
1bf6ad62 739 dev->driver->get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos;
aec095ec
AT
740 dev->driver->get_scanout_position = mdp5_get_scanoutpos;
741 dev->driver->get_vblank_counter = mdp5_get_vblank_counter;
742 dev->max_vblank_count = 0xffffffff;
743 dev->vblank_disable_immediate = true;
744
745 return kms;
746fail:
747 if (kms)
392ae6e0 748 mdp5_kms_destroy(kms);
aec095ec
AT
749 return ERR_PTR(ret);
750}
751
1dd0a0b1
AT
752static void mdp5_destroy(struct platform_device *pdev)
753{
754 struct mdp5_kms *mdp5_kms = platform_get_drvdata(pdev);
36d1364a 755 int i;
1dd0a0b1
AT
756
757 if (mdp5_kms->ctlm)
758 mdp5_ctlm_destroy(mdp5_kms->ctlm);
759 if (mdp5_kms->smp)
760 mdp5_smp_destroy(mdp5_kms->smp);
761 if (mdp5_kms->cfg)
762 mdp5_cfg_destroy(mdp5_kms->cfg);
cd792726 763
36d1364a
AT
764 for (i = 0; i < mdp5_kms->num_intfs; i++)
765 kfree(mdp5_kms->intfs[i]);
766
cd792726
AT
767 if (mdp5_kms->rpm_enabled)
768 pm_runtime_disable(&pdev->dev);
ac2a3fd3 769
8d58ef34
AT
770 drm_atomic_private_obj_fini(&mdp5_kms->glob_state);
771 drm_modeset_lock_fini(&mdp5_kms->glob_state_lock);
1dd0a0b1
AT
772}
773
c056b55d
RC
774static int construct_pipes(struct mdp5_kms *mdp5_kms, int cnt,
775 const enum mdp5_pipe *pipes, const uint32_t *offsets,
776 uint32_t caps)
777{
778 struct drm_device *dev = mdp5_kms->dev;
779 int i, ret;
780
781 for (i = 0; i < cnt; i++) {
782 struct mdp5_hw_pipe *hwpipe;
783
784 hwpipe = mdp5_pipe_init(pipes[i], offsets[i], caps);
785 if (IS_ERR(hwpipe)) {
786 ret = PTR_ERR(hwpipe);
6a41da17 787 DRM_DEV_ERROR(dev->dev, "failed to construct pipe for %s (%d)\n",
c056b55d
RC
788 pipe2name(pipes[i]), ret);
789 return ret;
790 }
791 hwpipe->idx = mdp5_kms->num_hwpipes;
792 mdp5_kms->hwpipes[mdp5_kms->num_hwpipes++] = hwpipe;
793 }
794
795 return 0;
796}
797
798static int hwpipe_init(struct mdp5_kms *mdp5_kms)
799{
800 static const enum mdp5_pipe rgb_planes[] = {
801 SSPP_RGB0, SSPP_RGB1, SSPP_RGB2, SSPP_RGB3,
802 };
803 static const enum mdp5_pipe vig_planes[] = {
804 SSPP_VIG0, SSPP_VIG1, SSPP_VIG2, SSPP_VIG3,
805 };
806 static const enum mdp5_pipe dma_planes[] = {
807 SSPP_DMA0, SSPP_DMA1,
808 };
bff8fba4
AT
809 static const enum mdp5_pipe cursor_planes[] = {
810 SSPP_CURSOR0, SSPP_CURSOR1,
811 };
c056b55d
RC
812 const struct mdp5_cfg_hw *hw_cfg;
813 int ret;
814
815 hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
816
817 /* Construct RGB pipes: */
818 ret = construct_pipes(mdp5_kms, hw_cfg->pipe_rgb.count, rgb_planes,
819 hw_cfg->pipe_rgb.base, hw_cfg->pipe_rgb.caps);
820 if (ret)
821 return ret;
822
823 /* Construct video (VIG) pipes: */
824 ret = construct_pipes(mdp5_kms, hw_cfg->pipe_vig.count, vig_planes,
825 hw_cfg->pipe_vig.base, hw_cfg->pipe_vig.caps);
826 if (ret)
827 return ret;
828
829 /* Construct DMA pipes: */
830 ret = construct_pipes(mdp5_kms, hw_cfg->pipe_dma.count, dma_planes,
831 hw_cfg->pipe_dma.base, hw_cfg->pipe_dma.caps);
832 if (ret)
833 return ret;
834
bff8fba4
AT
835 /* Construct cursor pipes: */
836 ret = construct_pipes(mdp5_kms, hw_cfg->pipe_cursor.count,
837 cursor_planes, hw_cfg->pipe_cursor.base,
838 hw_cfg->pipe_cursor.caps);
839 if (ret)
840 return ret;
841
c056b55d
RC
842 return 0;
843}
844
6803c606
AT
845static int hwmixer_init(struct mdp5_kms *mdp5_kms)
846{
847 struct drm_device *dev = mdp5_kms->dev;
848 const struct mdp5_cfg_hw *hw_cfg;
849 int i, ret;
850
851 hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
852
853 for (i = 0; i < hw_cfg->lm.count; i++) {
854 struct mdp5_hw_mixer *mixer;
855
856 mixer = mdp5_mixer_init(&hw_cfg->lm.instances[i]);
857 if (IS_ERR(mixer)) {
858 ret = PTR_ERR(mixer);
6a41da17 859 DRM_DEV_ERROR(dev->dev, "failed to construct LM%d (%d)\n",
6803c606
AT
860 i, ret);
861 return ret;
862 }
863
864 mixer->idx = mdp5_kms->num_hwmixers;
865 mdp5_kms->hwmixers[mdp5_kms->num_hwmixers++] = mixer;
866 }
867
868 return 0;
869}
870
36d1364a
AT
871static int interface_init(struct mdp5_kms *mdp5_kms)
872{
873 struct drm_device *dev = mdp5_kms->dev;
874 const struct mdp5_cfg_hw *hw_cfg;
875 const enum mdp5_intf_type *intf_types;
876 int i;
877
878 hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
879 intf_types = hw_cfg->intf.connect;
880
881 for (i = 0; i < ARRAY_SIZE(hw_cfg->intf.connect); i++) {
882 struct mdp5_interface *intf;
883
884 if (intf_types[i] == INTF_DISABLED)
885 continue;
886
887 intf = kzalloc(sizeof(*intf), GFP_KERNEL);
888 if (!intf) {
6a41da17 889 DRM_DEV_ERROR(dev->dev, "failed to construct INTF%d\n", i);
36d1364a
AT
890 return -ENOMEM;
891 }
892
893 intf->num = i;
894 intf->type = intf_types[i];
895 intf->mode = MDP5_INTF_MODE_NONE;
896 intf->idx = mdp5_kms->num_intfs;
897 mdp5_kms->intfs[mdp5_kms->num_intfs++] = intf;
898 }
899
900 return 0;
901}
902
1dd0a0b1
AT
903static int mdp5_init(struct platform_device *pdev, struct drm_device *dev)
904{
905 struct msm_drm_private *priv = dev->dev_private;
906 struct mdp5_kms *mdp5_kms;
907 struct mdp5_cfg *config;
908 u32 major, minor;
909 int ret;
910
911 mdp5_kms = devm_kzalloc(&pdev->dev, sizeof(*mdp5_kms), GFP_KERNEL);
912 if (!mdp5_kms) {
913 ret = -ENOMEM;
914 goto fail;
915 }
916
917 platform_set_drvdata(pdev, mdp5_kms);
918
919 spin_lock_init(&mdp5_kms->resource_lock);
920
921 mdp5_kms->dev = dev;
922 mdp5_kms->pdev = pdev;
923
8d58ef34
AT
924 ret = mdp5_global_obj_init(mdp5_kms);
925 if (ret)
926 goto fail;
927
1dd0a0b1
AT
928 mdp5_kms->mmio = msm_ioremap(pdev, "mdp_phys", "MDP5");
929 if (IS_ERR(mdp5_kms->mmio)) {
930 ret = PTR_ERR(mdp5_kms->mmio);
931 goto fail;
932 }
933
934 /* mandatory clocks: */
d0538f50 935 ret = get_clk(pdev, &mdp5_kms->axi_clk, "bus", true);
1dd0a0b1
AT
936 if (ret)
937 goto fail;
d0538f50 938 ret = get_clk(pdev, &mdp5_kms->ahb_clk, "iface", true);
1dd0a0b1
AT
939 if (ret)
940 goto fail;
d0538f50 941 ret = get_clk(pdev, &mdp5_kms->core_clk, "core", true);
1dd0a0b1
AT
942 if (ret)
943 goto fail;
d0538f50 944 ret = get_clk(pdev, &mdp5_kms->vsync_clk, "vsync", true);
1dd0a0b1
AT
945 if (ret)
946 goto fail;
947
948 /* optional clocks: */
d0538f50 949 get_clk(pdev, &mdp5_kms->lut_clk, "lut", false);
1dd0a0b1
AT
950
951 /* we need to set a default rate before enabling. Set a safe
952 * rate first, then figure out hw revision, and then set a
953 * more optimal rate:
954 */
955 clk_set_rate(mdp5_kms->core_clk, 200000000);
956
cd792726
AT
957 pm_runtime_enable(&pdev->dev);
958 mdp5_kms->rpm_enabled = true;
959
1dd0a0b1
AT
960 read_mdp_hw_revision(mdp5_kms, &major, &minor);
961
962 mdp5_kms->cfg = mdp5_cfg_init(mdp5_kms, major, minor);
963 if (IS_ERR(mdp5_kms->cfg)) {
964 ret = PTR_ERR(mdp5_kms->cfg);
965 mdp5_kms->cfg = NULL;
966 goto fail;
967 }
968
969 config = mdp5_cfg_get_config(mdp5_kms->cfg);
970 mdp5_kms->caps = config->hw->mdp.caps;
971
972 /* TODO: compute core clock rate at runtime */
973 clk_set_rate(mdp5_kms->core_clk, config->hw->max_clk);
974
975 /*
976 * Some chipsets have a Shared Memory Pool (SMP), while others
977 * have dedicated latency buffering per source pipe instead;
978 * this section initializes the SMP:
979 */
980 if (mdp5_kms->caps & MDP_CAP_SMP) {
49ec5b2e 981 mdp5_kms->smp = mdp5_smp_init(mdp5_kms, &config->hw->smp);
1dd0a0b1
AT
982 if (IS_ERR(mdp5_kms->smp)) {
983 ret = PTR_ERR(mdp5_kms->smp);
984 mdp5_kms->smp = NULL;
985 goto fail;
986 }
987 }
988
989 mdp5_kms->ctlm = mdp5_ctlm_init(dev, mdp5_kms->mmio, mdp5_kms->cfg);
990 if (IS_ERR(mdp5_kms->ctlm)) {
991 ret = PTR_ERR(mdp5_kms->ctlm);
992 mdp5_kms->ctlm = NULL;
993 goto fail;
994 }
995
c056b55d
RC
996 ret = hwpipe_init(mdp5_kms);
997 if (ret)
998 goto fail;
999
6803c606
AT
1000 ret = hwmixer_init(mdp5_kms);
1001 if (ret)
1002 goto fail;
1003
36d1364a
AT
1004 ret = interface_init(mdp5_kms);
1005 if (ret)
1006 goto fail;
1007
1dd0a0b1
AT
1008 /* set uninit-ed kms */
1009 priv->kms = &mdp5_kms->base.base;
1010
1011 return 0;
1012fail:
1013 mdp5_destroy(pdev);
1014 return ret;
1015}
1016
1017static int mdp5_bind(struct device *dev, struct device *master, void *data)
1018{
1019 struct drm_device *ddev = dev_get_drvdata(master);
1020 struct platform_device *pdev = to_platform_device(dev);
1021
1022 DBG("");
1023
1024 return mdp5_init(pdev, ddev);
1025}
1026
1027static void mdp5_unbind(struct device *dev, struct device *master,
1028 void *data)
1029{
1030 struct platform_device *pdev = to_platform_device(dev);
1031
1032 mdp5_destroy(pdev);
1033}
1034
1035static const struct component_ops mdp5_ops = {
1036 .bind = mdp5_bind,
1037 .unbind = mdp5_unbind,
1038};
1039
1040static int mdp5_dev_probe(struct platform_device *pdev)
1041{
1042 DBG("");
1043 return component_add(&pdev->dev, &mdp5_ops);
1044}
1045
1046static int mdp5_dev_remove(struct platform_device *pdev)
1047{
1048 DBG("");
1049 component_del(&pdev->dev, &mdp5_ops);
1050 return 0;
1051}
1052
d1f08d82 1053static __maybe_unused int mdp5_runtime_suspend(struct device *dev)
d68fe15b
AT
1054{
1055 struct platform_device *pdev = to_platform_device(dev);
1056 struct mdp5_kms *mdp5_kms = platform_get_drvdata(pdev);
1057
1058 DBG("");
1059
1060 return mdp5_disable(mdp5_kms);
1061}
1062
d1f08d82 1063static __maybe_unused int mdp5_runtime_resume(struct device *dev)
d68fe15b
AT
1064{
1065 struct platform_device *pdev = to_platform_device(dev);
1066 struct mdp5_kms *mdp5_kms = platform_get_drvdata(pdev);
1067
1068 DBG("");
1069
1070 return mdp5_enable(mdp5_kms);
1071}
1072
1073static const struct dev_pm_ops mdp5_pm_ops = {
1074 SET_RUNTIME_PM_OPS(mdp5_runtime_suspend, mdp5_runtime_resume, NULL)
1075};
1076
96a611b5
AT
1077static const struct of_device_id mdp5_dt_match[] = {
1078 { .compatible = "qcom,mdp5", },
1079 /* to support downstream DT files */
1080 { .compatible = "qcom,mdss_mdp", },
1081 {}
1082};
1083MODULE_DEVICE_TABLE(of, mdp5_dt_match);
1084
1dd0a0b1
AT
1085static struct platform_driver mdp5_driver = {
1086 .probe = mdp5_dev_probe,
1087 .remove = mdp5_dev_remove,
1088 .driver = {
1089 .name = "msm_mdp",
96a611b5 1090 .of_match_table = mdp5_dt_match,
d68fe15b 1091 .pm = &mdp5_pm_ops,
1dd0a0b1
AT
1092 },
1093};
1094
1095void __init msm_mdp_register(void)
1096{
1097 DBG("");
1098 platform_driver_register(&mdp5_driver);
1099}
1100
1101void __exit msm_mdp_unregister(void)
1102{
1103 DBG("");
1104 platform_driver_unregister(&mdp5_driver);
1105}