]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
drm/msm/mdp5: Misc cursor plane bits
[mirror_ubuntu-artful-kernel.git] / drivers / gpu / drm / msm / mdp / mdp5 / mdp5_kms.c
CommitLineData
06c0dd96 1/*
2e362e17 2 * Copyright (c) 2014, The Linux Foundation. All rights reserved.
06c0dd96
RC
3 * Copyright (C) 2013 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published by
8 * the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
aec095ec 19#include <linux/of_irq.h>
06c0dd96
RC
20
21#include "msm_drv.h"
667ce33e 22#include "msm_gem.h"
06c0dd96
RC
23#include "msm_mmu.h"
24#include "mdp5_kms.h"
25
87e956e9
SV
26static const char *iommu_ports[] = {
27 "mdp_0",
28};
29
3d47fd47
SV
30static int mdp5_hw_init(struct msm_kms *kms)
31{
32 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
0a6030d2 33 struct platform_device *pdev = mdp5_kms->pdev;
0deed25b 34 unsigned long flags;
3d47fd47 35
0a6030d2 36 pm_runtime_get_sync(&pdev->dev);
7c8f0235 37 mdp5_enable(mdp5_kms);
3d47fd47 38
06c0dd96
RC
39 /* Magic unknown register writes:
40 *
41 * W VBIF:0x004 00000001 (mdss_mdp.c:839)
42 * W MDP5:0x2e0 0xe9 (mdss_mdp.c:839)
43 * W MDP5:0x2e4 0x55 (mdss_mdp.c:839)
44 * W MDP5:0x3ac 0xc0000ccc (mdss_mdp.c:839)
45 * W MDP5:0x3b4 0xc0000ccc (mdss_mdp.c:839)
46 * W MDP5:0x3bc 0xcccccc (mdss_mdp.c:839)
47 * W MDP5:0x4a8 0xcccc0c0 (mdss_mdp.c:839)
48 * W MDP5:0x4b0 0xccccc0c0 (mdss_mdp.c:839)
49 * W MDP5:0x4b8 0xccccc000 (mdss_mdp.c:839)
50 *
51 * Downstream fbdev driver gets these register offsets/values
52 * from DT.. not really sure what these registers are or if
53 * different values for different boards/SoC's, etc. I guess
54 * they are the golden registers.
55 *
56 * Not setting these does not seem to cause any problem. But
57 * we may be getting lucky with the bootloader initializing
58 * them for us. OTOH, if we can always count on the bootloader
59 * setting the golden registers, then perhaps we don't need to
60 * care.
61 */
62
0deed25b 63 spin_lock_irqsave(&mdp5_kms->resource_lock, flags);
7b59c7e4 64 mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, 0);
0deed25b 65 spin_unlock_irqrestore(&mdp5_kms->resource_lock, flags);
06c0dd96 66
42238da8 67 mdp5_ctlm_hw_reset(mdp5_kms->ctlm);
3d47fd47 68
7c8f0235 69 mdp5_disable(mdp5_kms);
0a6030d2 70 pm_runtime_put_sync(&pdev->dev);
06c0dd96 71
3d47fd47 72 return 0;
06c0dd96
RC
73}
74
ac2a3fd3
RC
75struct mdp5_state *mdp5_get_state(struct drm_atomic_state *s)
76{
77 struct msm_drm_private *priv = s->dev->dev_private;
78 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms));
79 struct msm_kms_state *state = to_kms_state(s);
80 struct mdp5_state *new_state;
81 int ret;
82
83 if (state->state)
84 return state->state;
85
86 ret = drm_modeset_lock(&mdp5_kms->state_lock, s->acquire_ctx);
87 if (ret)
88 return ERR_PTR(ret);
89
90 new_state = kmalloc(sizeof(*mdp5_kms->state), GFP_KERNEL);
91 if (!new_state)
92 return ERR_PTR(-ENOMEM);
93
94 /* Copy state: */
4a0f012d 95 new_state->hwpipe = mdp5_kms->state->hwpipe;
49ec5b2e
RC
96 if (mdp5_kms->smp)
97 new_state->smp = mdp5_kms->state->smp;
ac2a3fd3
RC
98
99 state->state = new_state;
100
101 return new_state;
102}
103
104static void mdp5_swap_state(struct msm_kms *kms, struct drm_atomic_state *state)
105{
106 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
107 swap(to_kms_state(state)->state, mdp5_kms->state);
108}
109
0b776d45
RC
110static void mdp5_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *state)
111{
112 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
49ec5b2e 113
0b776d45 114 mdp5_enable(mdp5_kms);
49ec5b2e
RC
115
116 if (mdp5_kms->smp)
117 mdp5_smp_prepare_commit(mdp5_kms->smp, &mdp5_kms->state->smp);
0b776d45
RC
118}
119
120static void mdp5_complete_commit(struct msm_kms *kms, struct drm_atomic_state *state)
121{
122 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
657c63f0 123
49ec5b2e
RC
124 if (mdp5_kms->smp)
125 mdp5_smp_complete_commit(mdp5_kms->smp, &mdp5_kms->state->smp);
126
0b776d45
RC
127 mdp5_disable(mdp5_kms);
128}
129
0a5c9aad
HL
130static void mdp5_wait_for_crtc_commit_done(struct msm_kms *kms,
131 struct drm_crtc *crtc)
132{
133 mdp5_crtc_wait_for_commit_done(crtc);
134}
135
06c0dd96
RC
136static long mdp5_round_pixclk(struct msm_kms *kms, unsigned long rate,
137 struct drm_encoder *encoder)
138{
139 return rate;
140}
141
d5af49c9
HL
142static int mdp5_set_split_display(struct msm_kms *kms,
143 struct drm_encoder *encoder,
144 struct drm_encoder *slave_encoder,
145 bool is_cmd_mode)
146{
147 if (is_cmd_mode)
148 return mdp5_cmd_encoder_set_split_display(encoder,
149 slave_encoder);
150 else
df8a71d2
AT
151 return mdp5_vid_encoder_set_split_display(encoder,
152 slave_encoder);
d5af49c9
HL
153}
154
9c9f6f8d
AT
155static void mdp5_set_encoder_mode(struct msm_kms *kms,
156 struct drm_encoder *encoder,
157 bool cmd_mode)
158{
159 mdp5_encoder_set_intf_mode(encoder, cmd_mode);
160}
161
1dd0a0b1 162static void mdp5_kms_destroy(struct msm_kms *kms)
06c0dd96
RC
163{
164 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
667ce33e 165 struct msm_gem_address_space *aspace = mdp5_kms->aspace;
c056b55d
RC
166 int i;
167
168 for (i = 0; i < mdp5_kms->num_hwpipes; i++)
169 mdp5_pipe_destroy(mdp5_kms->hwpipes[i]);
87e956e9 170
667ce33e
RC
171 if (aspace) {
172 aspace->mmu->funcs->detach(aspace->mmu,
173 iommu_ports, ARRAY_SIZE(iommu_ports));
174 msm_gem_address_space_destroy(aspace);
87e956e9 175 }
aec095ec
AT
176}
177
bc5289ee
RC
178#ifdef CONFIG_DEBUG_FS
179static int smp_show(struct seq_file *m, void *arg)
180{
181 struct drm_info_node *node = (struct drm_info_node *) m->private;
182 struct drm_device *dev = node->minor->dev;
183 struct msm_drm_private *priv = dev->dev_private;
184 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms));
185 struct drm_printer p = drm_seq_file_printer(m);
186
187 if (!mdp5_kms->smp) {
188 drm_printf(&p, "no SMP pool\n");
189 return 0;
190 }
191
192 mdp5_smp_dump(mdp5_kms->smp, &p);
193
194 return 0;
195}
196
197static struct drm_info_list mdp5_debugfs_list[] = {
198 {"smp", smp_show },
199};
200
201static int mdp5_kms_debugfs_init(struct msm_kms *kms, struct drm_minor *minor)
202{
203 struct drm_device *dev = minor->dev;
204 int ret;
205
206 ret = drm_debugfs_create_files(mdp5_debugfs_list,
207 ARRAY_SIZE(mdp5_debugfs_list),
208 minor->debugfs_root, minor);
209
210 if (ret) {
211 dev_err(dev->dev, "could not install mdp5_debugfs_list\n");
212 return ret;
213 }
214
215 return 0;
216}
217
218static void mdp5_kms_debugfs_cleanup(struct msm_kms *kms, struct drm_minor *minor)
219{
220 drm_debugfs_remove_files(mdp5_debugfs_list,
221 ARRAY_SIZE(mdp5_debugfs_list), minor);
222}
223#endif
224
06c0dd96
RC
225static const struct mdp_kms_funcs kms_funcs = {
226 .base = {
227 .hw_init = mdp5_hw_init,
228 .irq_preinstall = mdp5_irq_preinstall,
229 .irq_postinstall = mdp5_irq_postinstall,
230 .irq_uninstall = mdp5_irq_uninstall,
231 .irq = mdp5_irq,
232 .enable_vblank = mdp5_enable_vblank,
233 .disable_vblank = mdp5_disable_vblank,
ac2a3fd3 234 .swap_state = mdp5_swap_state,
0b776d45
RC
235 .prepare_commit = mdp5_prepare_commit,
236 .complete_commit = mdp5_complete_commit,
0a5c9aad 237 .wait_for_crtc_commit_done = mdp5_wait_for_crtc_commit_done,
06c0dd96
RC
238 .get_format = mdp_get_format,
239 .round_pixclk = mdp5_round_pixclk,
d5af49c9 240 .set_split_display = mdp5_set_split_display,
9c9f6f8d 241 .set_encoder_mode = mdp5_set_encoder_mode,
392ae6e0 242 .destroy = mdp5_kms_destroy,
bc5289ee
RC
243#ifdef CONFIG_DEBUG_FS
244 .debugfs_init = mdp5_kms_debugfs_init,
245 .debugfs_cleanup = mdp5_kms_debugfs_cleanup,
246#endif
06c0dd96
RC
247 },
248 .set_irqmask = mdp5_set_irqmask,
249};
250
251int mdp5_disable(struct mdp5_kms *mdp5_kms)
252{
253 DBG("");
254
255 clk_disable_unprepare(mdp5_kms->ahb_clk);
256 clk_disable_unprepare(mdp5_kms->axi_clk);
257 clk_disable_unprepare(mdp5_kms->core_clk);
3a84f846
SV
258 if (mdp5_kms->lut_clk)
259 clk_disable_unprepare(mdp5_kms->lut_clk);
06c0dd96
RC
260
261 return 0;
262}
263
264int mdp5_enable(struct mdp5_kms *mdp5_kms)
265{
266 DBG("");
267
268 clk_prepare_enable(mdp5_kms->ahb_clk);
269 clk_prepare_enable(mdp5_kms->axi_clk);
270 clk_prepare_enable(mdp5_kms->core_clk);
3a84f846
SV
271 if (mdp5_kms->lut_clk)
272 clk_prepare_enable(mdp5_kms->lut_clk);
06c0dd96
RC
273
274 return 0;
275}
276
5722a9e3
HL
277static struct drm_encoder *construct_encoder(struct mdp5_kms *mdp5_kms,
278 enum mdp5_intf_type intf_type, int intf_num,
b3a94705 279 struct mdp5_ctl *ctl)
67ac0a2d
SV
280{
281 struct drm_device *dev = mdp5_kms->dev;
282 struct msm_drm_private *priv = dev->dev_private;
283 struct drm_encoder *encoder;
284 struct mdp5_interface intf = {
285 .num = intf_num,
286 .type = intf_type,
b3a94705 287 .mode = MDP5_INTF_MODE_NONE,
67ac0a2d 288 };
67ac0a2d 289
b3a94705 290 encoder = mdp5_encoder_init(dev, &intf, ctl);
67ac0a2d 291 if (IS_ERR(encoder)) {
5722a9e3
HL
292 dev_err(dev->dev, "failed to construct encoder\n");
293 return encoder;
67ac0a2d
SV
294 }
295
67ac0a2d
SV
296 priv->encoders[priv->num_encoders++] = encoder;
297
5722a9e3
HL
298 return encoder;
299}
300
d5af49c9
HL
301static int get_dsi_id_from_intf(const struct mdp5_cfg_hw *hw_cfg, int intf_num)
302{
fe34464d
SV
303 const enum mdp5_intf_type *intfs = hw_cfg->intf.connect;
304 const int intf_cnt = ARRAY_SIZE(hw_cfg->intf.connect);
d5af49c9
HL
305 int id = 0, i;
306
307 for (i = 0; i < intf_cnt; i++) {
308 if (intfs[i] == INTF_DSI) {
309 if (intf_num == i)
310 return id;
311
312 id++;
313 }
314 }
315
316 return -EINVAL;
317}
318
5722a9e3
HL
319static int modeset_init_intf(struct mdp5_kms *mdp5_kms, int intf_num)
320{
321 struct drm_device *dev = mdp5_kms->dev;
322 struct msm_drm_private *priv = dev->dev_private;
323 const struct mdp5_cfg_hw *hw_cfg =
324 mdp5_cfg_get_hw_config(mdp5_kms->cfg);
fe34464d 325 enum mdp5_intf_type intf_type = hw_cfg->intf.connect[intf_num];
c71716b1
HL
326 struct mdp5_ctl_manager *ctlm = mdp5_kms->ctlm;
327 struct mdp5_ctl *ctl;
5722a9e3
HL
328 struct drm_encoder *encoder;
329 int ret = 0;
330
331 switch (intf_type) {
332 case INTF_DISABLED:
333 break;
334 case INTF_eDP:
335 if (!priv->edp)
336 break;
337
c71716b1
HL
338 ctl = mdp5_ctlm_request(ctlm, intf_num);
339 if (!ctl) {
340 ret = -EINVAL;
341 break;
342 }
343
b3a94705 344 encoder = construct_encoder(mdp5_kms, INTF_eDP, intf_num, ctl);
5722a9e3
HL
345 if (IS_ERR(encoder)) {
346 ret = PTR_ERR(encoder);
347 break;
348 }
67ac0a2d 349
67ac0a2d 350 ret = msm_edp_modeset_init(priv->edp, dev, encoder);
5722a9e3
HL
351 break;
352 case INTF_HDMI:
353 if (!priv->hdmi)
354 break;
355
c71716b1
HL
356 ctl = mdp5_ctlm_request(ctlm, intf_num);
357 if (!ctl) {
358 ret = -EINVAL;
359 break;
360 }
361
b3a94705 362 encoder = construct_encoder(mdp5_kms, INTF_HDMI, intf_num, ctl);
5722a9e3
HL
363 if (IS_ERR(encoder)) {
364 ret = PTR_ERR(encoder);
365 break;
366 }
367
fcda50c8 368 ret = msm_hdmi_modeset_init(priv->hdmi, dev, encoder);
5722a9e3 369 break;
d5af49c9
HL
370 case INTF_DSI:
371 {
372 int dsi_id = get_dsi_id_from_intf(hw_cfg, intf_num);
d5af49c9
HL
373
374 if ((dsi_id >= ARRAY_SIZE(priv->dsi)) || (dsi_id < 0)) {
375 dev_err(dev->dev, "failed to find dsi from intf %d\n",
376 intf_num);
377 ret = -EINVAL;
378 break;
379 }
380
381 if (!priv->dsi[dsi_id])
382 break;
383
c71716b1
HL
384 ctl = mdp5_ctlm_request(ctlm, intf_num);
385 if (!ctl) {
386 ret = -EINVAL;
387 break;
388 }
389
b3a94705 390 encoder = construct_encoder(mdp5_kms, INTF_DSI, intf_num, ctl);
97e00119
AT
391 if (IS_ERR(encoder)) {
392 ret = PTR_ERR(encoder);
393 break;
d5af49c9
HL
394 }
395
97e00119 396 ret = msm_dsi_modeset_init(priv->dsi[dsi_id], dev, encoder);
d5af49c9
HL
397 break;
398 }
5722a9e3
HL
399 default:
400 dev_err(dev->dev, "unknown intf: %d\n", intf_type);
401 ret = -EINVAL;
402 break;
67ac0a2d
SV
403 }
404
405 return ret;
406}
407
06c0dd96
RC
408static int modeset_init(struct mdp5_kms *mdp5_kms)
409{
06c0dd96
RC
410 struct drm_device *dev = mdp5_kms->dev;
411 struct msm_drm_private *priv = dev->dev_private;
2e362e17 412 const struct mdp5_cfg_hw *hw_cfg;
e5366ffe 413 unsigned int num_crtcs;
06c0dd96
RC
414 int i, ret;
415
42238da8 416 hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
2e362e17 417
e5366ffe
AT
418 /*
419 * Construct encoders and modeset initialize connector devices
420 * for each external display interface.
421 */
422 for (i = 0; i < ARRAY_SIZE(hw_cfg->intf.connect); i++) {
423 ret = modeset_init_intf(mdp5_kms, i);
424 if (ret)
425 goto fail;
426 }
427
428 /*
429 * We should ideally have less number of encoders (set up by parsing
430 * the MDP5 interfaces) than the number of layer mixers present in HW,
431 * but let's be safe here anyway
432 */
433 num_crtcs = min(priv->num_encoders, mdp5_cfg->lm.count);
434
435 /*
436 * Construct planes equaling the number of hw pipes, and CRTCs for the
437 * N encoders set up by the driver. The first N planes become primary
c056b55d
RC
438 * planes for the CRTCs, with the remainder as overlay planes:
439 */
440 for (i = 0; i < mdp5_kms->num_hwpipes; i++) {
e5366ffe 441 bool primary = i < num_crtcs;
06c0dd96
RC
442 struct drm_plane *plane;
443 struct drm_crtc *crtc;
5798c8e0 444 enum drm_plane_type type;
06c0dd96 445
5798c8e0
AT
446 if (primary)
447 type = DRM_PLANE_TYPE_PRIMARY;
448 else
449 type = DRM_PLANE_TYPE_OVERLAY;
450
451 plane = mdp5_plane_init(dev, type);
06c0dd96
RC
452 if (IS_ERR(plane)) {
453 ret = PTR_ERR(plane);
c056b55d 454 dev_err(dev->dev, "failed to construct plane %d (%d)\n", i, ret);
06c0dd96
RC
455 goto fail;
456 }
bc5289ee 457 priv->planes[priv->num_planes++] = plane;
06c0dd96 458
c056b55d
RC
459 if (!primary)
460 continue;
461
5798c8e0 462 crtc = mdp5_crtc_init(dev, plane, NULL, i);
06c0dd96
RC
463 if (IS_ERR(crtc)) {
464 ret = PTR_ERR(crtc);
c056b55d 465 dev_err(dev->dev, "failed to construct crtc %d (%d)\n", i, ret);
06c0dd96
RC
466 goto fail;
467 }
468 priv->crtcs[priv->num_crtcs++] = crtc;
469 }
470
e5366ffe
AT
471 /*
472 * Now that we know the number of crtcs we've created, set the possible
473 * crtcs for the encoders
5722a9e3 474 */
e5366ffe
AT
475 for (i = 0; i < priv->num_encoders; i++) {
476 struct drm_encoder *encoder = priv->encoders[i];
477
478 encoder->possible_crtcs = (1 << priv->num_crtcs) - 1;
00453981
HL
479 }
480
06c0dd96
RC
481 return 0;
482
483fail:
484 return ret;
485}
486
1dd0a0b1
AT
487static void read_mdp_hw_revision(struct mdp5_kms *mdp5_kms,
488 u32 *major, u32 *minor)
489{
490 u32 version;
491
492 mdp5_enable(mdp5_kms);
7b59c7e4 493 version = mdp5_read(mdp5_kms, REG_MDP5_HW_VERSION);
1dd0a0b1
AT
494 mdp5_disable(mdp5_kms);
495
7b59c7e4
AT
496 *major = FIELD(version, MDP5_HW_VERSION_MAJOR);
497 *minor = FIELD(version, MDP5_HW_VERSION_MINOR);
1dd0a0b1
AT
498
499 DBG("MDP5 version v%d.%d", *major, *minor);
500}
501
06c0dd96 502static int get_clk(struct platform_device *pdev, struct clk **clkp,
d40325b4 503 const char *name, bool mandatory)
06c0dd96
RC
504{
505 struct device *dev = &pdev->dev;
506 struct clk *clk = devm_clk_get(dev, name);
d40325b4 507 if (IS_ERR(clk) && mandatory) {
06c0dd96
RC
508 dev_err(dev, "failed to get %s (%ld)\n", name, PTR_ERR(clk));
509 return PTR_ERR(clk);
510 }
d40325b4
SV
511 if (IS_ERR(clk))
512 DBG("skipping %s", name);
513 else
514 *clkp = clk;
515
06c0dd96
RC
516 return 0;
517}
518
e2dd9f9f
AT
519static struct drm_encoder *get_encoder_from_crtc(struct drm_crtc *crtc)
520{
521 struct drm_device *dev = crtc->dev;
522 struct drm_encoder *encoder;
523
524 drm_for_each_encoder(encoder, dev)
525 if (encoder->crtc == crtc)
526 return encoder;
527
528 return NULL;
529}
530
531static int mdp5_get_scanoutpos(struct drm_device *dev, unsigned int pipe,
532 unsigned int flags, int *vpos, int *hpos,
533 ktime_t *stime, ktime_t *etime,
534 const struct drm_display_mode *mode)
535{
536 struct msm_drm_private *priv = dev->dev_private;
537 struct drm_crtc *crtc;
538 struct drm_encoder *encoder;
539 int line, vsw, vbp, vactive_start, vactive_end, vfp_end;
540 int ret = 0;
541
542 crtc = priv->crtcs[pipe];
543 if (!crtc) {
544 DRM_ERROR("Invalid crtc %d\n", pipe);
545 return 0;
546 }
547
548 encoder = get_encoder_from_crtc(crtc);
549 if (!encoder) {
550 DRM_ERROR("no encoder found for crtc %d\n", pipe);
551 return 0;
552 }
553
554 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
555
556 vsw = mode->crtc_vsync_end - mode->crtc_vsync_start;
557 vbp = mode->crtc_vtotal - mode->crtc_vsync_end;
558
559 /*
560 * the line counter is 1 at the start of the VSYNC pulse and VTOTAL at
561 * the end of VFP. Translate the porch values relative to the line
562 * counter positions.
563 */
564
565 vactive_start = vsw + vbp + 1;
566
567 vactive_end = vactive_start + mode->crtc_vdisplay;
568
569 /* last scan line before VSYNC */
570 vfp_end = mode->crtc_vtotal;
571
572 if (stime)
573 *stime = ktime_get();
574
575 line = mdp5_encoder_get_linecount(encoder);
576
577 if (line < vactive_start) {
578 line -= vactive_start;
579 ret |= DRM_SCANOUTPOS_IN_VBLANK;
580 } else if (line > vactive_end) {
581 line = line - vfp_end - vactive_start;
582 ret |= DRM_SCANOUTPOS_IN_VBLANK;
583 } else {
584 line -= vactive_start;
585 }
586
587 *vpos = line;
588 *hpos = 0;
589
590 if (etime)
591 *etime = ktime_get();
592
593 return ret;
594}
595
596static int mdp5_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe,
597 int *max_error,
598 struct timeval *vblank_time,
599 unsigned flags)
600{
601 struct msm_drm_private *priv = dev->dev_private;
602 struct drm_crtc *crtc;
603
604 if (pipe < 0 || pipe >= priv->num_crtcs) {
605 DRM_ERROR("Invalid crtc %d\n", pipe);
606 return -EINVAL;
607 }
608
609 crtc = priv->crtcs[pipe];
610 if (!crtc) {
611 DRM_ERROR("Invalid crtc %d\n", pipe);
612 return -EINVAL;
613 }
614
615 return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
616 vblank_time, flags,
617 &crtc->mode);
618}
619
620static u32 mdp5_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
621{
622 struct msm_drm_private *priv = dev->dev_private;
623 struct drm_crtc *crtc;
624 struct drm_encoder *encoder;
625
626 if (pipe < 0 || pipe >= priv->num_crtcs)
627 return 0;
628
629 crtc = priv->crtcs[pipe];
630 if (!crtc)
631 return 0;
632
633 encoder = get_encoder_from_crtc(crtc);
634 if (!encoder)
635 return 0;
636
637 return mdp5_encoder_get_framecount(encoder);
638}
639
06c0dd96 640struct msm_kms *mdp5_kms_init(struct drm_device *dev)
aec095ec
AT
641{
642 struct msm_drm_private *priv = dev->dev_private;
643 struct platform_device *pdev;
644 struct mdp5_kms *mdp5_kms;
645 struct mdp5_cfg *config;
646 struct msm_kms *kms;
667ce33e 647 struct msm_gem_address_space *aspace;
aec095ec
AT
648 int irq, i, ret;
649
650 /* priv->kms would have been populated by the MDP5 driver */
651 kms = priv->kms;
652 if (!kms)
653 return NULL;
654
655 mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
656
657 mdp_kms_init(&mdp5_kms->base, &kms_funcs);
658
659 pdev = mdp5_kms->pdev;
660
661 irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
662 if (irq < 0) {
663 ret = irq;
664 dev_err(&pdev->dev, "failed to get irq: %d\n", ret);
665 goto fail;
666 }
667
668 kms->irq = irq;
669
670 config = mdp5_cfg_get_config(mdp5_kms->cfg);
671
672 /* make sure things are off before attaching iommu (bootloader could
673 * have left things on, in which case we'll start getting faults if
674 * we don't disable):
675 */
676 mdp5_enable(mdp5_kms);
677 for (i = 0; i < MDP5_INTF_NUM_MAX; i++) {
678 if (mdp5_cfg_intf_is_virtual(config->hw->intf.connect[i]) ||
679 !config->hw->intf.base[i])
680 continue;
681 mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(i), 0);
682
683 mdp5_write(mdp5_kms, REG_MDP5_INTF_FRAME_LINE_COUNT_EN(i), 0x3);
684 }
685 mdp5_disable(mdp5_kms);
686 mdelay(16);
687
688 if (config->platform.iommu) {
667ce33e
RC
689 aspace = msm_gem_address_space_create(&pdev->dev,
690 config->platform.iommu, "mdp5");
691 if (IS_ERR(aspace)) {
692 ret = PTR_ERR(aspace);
aec095ec
AT
693 goto fail;
694 }
695
667ce33e
RC
696 mdp5_kms->aspace = aspace;
697
698 ret = aspace->mmu->funcs->attach(aspace->mmu, iommu_ports,
aec095ec
AT
699 ARRAY_SIZE(iommu_ports));
700 if (ret) {
701 dev_err(&pdev->dev, "failed to attach iommu: %d\n",
702 ret);
aec095ec
AT
703 goto fail;
704 }
705 } else {
706 dev_info(&pdev->dev,
707 "no iommu, fallback to phys contig buffers for scanout\n");
667ce33e 708 aspace = NULL;;
aec095ec 709 }
aec095ec 710
667ce33e 711 mdp5_kms->id = msm_register_address_space(dev, aspace);
aec095ec
AT
712 if (mdp5_kms->id < 0) {
713 ret = mdp5_kms->id;
714 dev_err(&pdev->dev, "failed to register mdp5 iommu: %d\n", ret);
715 goto fail;
716 }
717
718 ret = modeset_init(mdp5_kms);
719 if (ret) {
720 dev_err(&pdev->dev, "modeset_init failed: %d\n", ret);
721 goto fail;
722 }
723
724 dev->mode_config.min_width = 0;
725 dev->mode_config.min_height = 0;
9708ebbe
RC
726 dev->mode_config.max_width = 0xffff;
727 dev->mode_config.max_height = 0xffff;
aec095ec
AT
728
729 dev->driver->get_vblank_timestamp = mdp5_get_vblank_timestamp;
730 dev->driver->get_scanout_position = mdp5_get_scanoutpos;
731 dev->driver->get_vblank_counter = mdp5_get_vblank_counter;
732 dev->max_vblank_count = 0xffffffff;
733 dev->vblank_disable_immediate = true;
734
735 return kms;
736fail:
737 if (kms)
392ae6e0 738 mdp5_kms_destroy(kms);
aec095ec
AT
739 return ERR_PTR(ret);
740}
741
1dd0a0b1
AT
742static void mdp5_destroy(struct platform_device *pdev)
743{
744 struct mdp5_kms *mdp5_kms = platform_get_drvdata(pdev);
745
746 if (mdp5_kms->ctlm)
747 mdp5_ctlm_destroy(mdp5_kms->ctlm);
748 if (mdp5_kms->smp)
749 mdp5_smp_destroy(mdp5_kms->smp);
750 if (mdp5_kms->cfg)
751 mdp5_cfg_destroy(mdp5_kms->cfg);
cd792726
AT
752
753 if (mdp5_kms->rpm_enabled)
754 pm_runtime_disable(&pdev->dev);
ac2a3fd3
RC
755
756 kfree(mdp5_kms->state);
1dd0a0b1
AT
757}
758
c056b55d
RC
759static int construct_pipes(struct mdp5_kms *mdp5_kms, int cnt,
760 const enum mdp5_pipe *pipes, const uint32_t *offsets,
761 uint32_t caps)
762{
763 struct drm_device *dev = mdp5_kms->dev;
764 int i, ret;
765
766 for (i = 0; i < cnt; i++) {
767 struct mdp5_hw_pipe *hwpipe;
768
769 hwpipe = mdp5_pipe_init(pipes[i], offsets[i], caps);
770 if (IS_ERR(hwpipe)) {
771 ret = PTR_ERR(hwpipe);
772 dev_err(dev->dev, "failed to construct pipe for %s (%d)\n",
773 pipe2name(pipes[i]), ret);
774 return ret;
775 }
776 hwpipe->idx = mdp5_kms->num_hwpipes;
777 mdp5_kms->hwpipes[mdp5_kms->num_hwpipes++] = hwpipe;
778 }
779
780 return 0;
781}
782
783static int hwpipe_init(struct mdp5_kms *mdp5_kms)
784{
785 static const enum mdp5_pipe rgb_planes[] = {
786 SSPP_RGB0, SSPP_RGB1, SSPP_RGB2, SSPP_RGB3,
787 };
788 static const enum mdp5_pipe vig_planes[] = {
789 SSPP_VIG0, SSPP_VIG1, SSPP_VIG2, SSPP_VIG3,
790 };
791 static const enum mdp5_pipe dma_planes[] = {
792 SSPP_DMA0, SSPP_DMA1,
793 };
794 const struct mdp5_cfg_hw *hw_cfg;
795 int ret;
796
797 hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
798
799 /* Construct RGB pipes: */
800 ret = construct_pipes(mdp5_kms, hw_cfg->pipe_rgb.count, rgb_planes,
801 hw_cfg->pipe_rgb.base, hw_cfg->pipe_rgb.caps);
802 if (ret)
803 return ret;
804
805 /* Construct video (VIG) pipes: */
806 ret = construct_pipes(mdp5_kms, hw_cfg->pipe_vig.count, vig_planes,
807 hw_cfg->pipe_vig.base, hw_cfg->pipe_vig.caps);
808 if (ret)
809 return ret;
810
811 /* Construct DMA pipes: */
812 ret = construct_pipes(mdp5_kms, hw_cfg->pipe_dma.count, dma_planes,
813 hw_cfg->pipe_dma.base, hw_cfg->pipe_dma.caps);
814 if (ret)
815 return ret;
816
817 return 0;
818}
819
1dd0a0b1
AT
820static int mdp5_init(struct platform_device *pdev, struct drm_device *dev)
821{
822 struct msm_drm_private *priv = dev->dev_private;
823 struct mdp5_kms *mdp5_kms;
824 struct mdp5_cfg *config;
825 u32 major, minor;
826 int ret;
827
828 mdp5_kms = devm_kzalloc(&pdev->dev, sizeof(*mdp5_kms), GFP_KERNEL);
829 if (!mdp5_kms) {
830 ret = -ENOMEM;
831 goto fail;
832 }
833
834 platform_set_drvdata(pdev, mdp5_kms);
835
836 spin_lock_init(&mdp5_kms->resource_lock);
837
838 mdp5_kms->dev = dev;
839 mdp5_kms->pdev = pdev;
840
ac2a3fd3
RC
841 drm_modeset_lock_init(&mdp5_kms->state_lock);
842 mdp5_kms->state = kzalloc(sizeof(*mdp5_kms->state), GFP_KERNEL);
843 if (!mdp5_kms->state) {
844 ret = -ENOMEM;
845 goto fail;
846 }
847
1dd0a0b1
AT
848 mdp5_kms->mmio = msm_ioremap(pdev, "mdp_phys", "MDP5");
849 if (IS_ERR(mdp5_kms->mmio)) {
850 ret = PTR_ERR(mdp5_kms->mmio);
851 goto fail;
852 }
853
854 /* mandatory clocks: */
855 ret = get_clk(pdev, &mdp5_kms->axi_clk, "bus_clk", true);
856 if (ret)
857 goto fail;
858 ret = get_clk(pdev, &mdp5_kms->ahb_clk, "iface_clk", true);
859 if (ret)
860 goto fail;
861 ret = get_clk(pdev, &mdp5_kms->core_clk, "core_clk", true);
862 if (ret)
863 goto fail;
864 ret = get_clk(pdev, &mdp5_kms->vsync_clk, "vsync_clk", true);
865 if (ret)
866 goto fail;
867
868 /* optional clocks: */
869 get_clk(pdev, &mdp5_kms->lut_clk, "lut_clk", false);
870
871 /* we need to set a default rate before enabling. Set a safe
872 * rate first, then figure out hw revision, and then set a
873 * more optimal rate:
874 */
875 clk_set_rate(mdp5_kms->core_clk, 200000000);
876
cd792726
AT
877 pm_runtime_enable(&pdev->dev);
878 mdp5_kms->rpm_enabled = true;
879
1dd0a0b1
AT
880 read_mdp_hw_revision(mdp5_kms, &major, &minor);
881
882 mdp5_kms->cfg = mdp5_cfg_init(mdp5_kms, major, minor);
883 if (IS_ERR(mdp5_kms->cfg)) {
884 ret = PTR_ERR(mdp5_kms->cfg);
885 mdp5_kms->cfg = NULL;
886 goto fail;
887 }
888
889 config = mdp5_cfg_get_config(mdp5_kms->cfg);
890 mdp5_kms->caps = config->hw->mdp.caps;
891
892 /* TODO: compute core clock rate at runtime */
893 clk_set_rate(mdp5_kms->core_clk, config->hw->max_clk);
894
895 /*
896 * Some chipsets have a Shared Memory Pool (SMP), while others
897 * have dedicated latency buffering per source pipe instead;
898 * this section initializes the SMP:
899 */
900 if (mdp5_kms->caps & MDP_CAP_SMP) {
49ec5b2e 901 mdp5_kms->smp = mdp5_smp_init(mdp5_kms, &config->hw->smp);
1dd0a0b1
AT
902 if (IS_ERR(mdp5_kms->smp)) {
903 ret = PTR_ERR(mdp5_kms->smp);
904 mdp5_kms->smp = NULL;
905 goto fail;
906 }
907 }
908
909 mdp5_kms->ctlm = mdp5_ctlm_init(dev, mdp5_kms->mmio, mdp5_kms->cfg);
910 if (IS_ERR(mdp5_kms->ctlm)) {
911 ret = PTR_ERR(mdp5_kms->ctlm);
912 mdp5_kms->ctlm = NULL;
913 goto fail;
914 }
915
c056b55d
RC
916 ret = hwpipe_init(mdp5_kms);
917 if (ret)
918 goto fail;
919
1dd0a0b1
AT
920 /* set uninit-ed kms */
921 priv->kms = &mdp5_kms->base.base;
922
923 return 0;
924fail:
925 mdp5_destroy(pdev);
926 return ret;
927}
928
929static int mdp5_bind(struct device *dev, struct device *master, void *data)
930{
931 struct drm_device *ddev = dev_get_drvdata(master);
932 struct platform_device *pdev = to_platform_device(dev);
933
934 DBG("");
935
936 return mdp5_init(pdev, ddev);
937}
938
939static void mdp5_unbind(struct device *dev, struct device *master,
940 void *data)
941{
942 struct platform_device *pdev = to_platform_device(dev);
943
944 mdp5_destroy(pdev);
945}
946
947static const struct component_ops mdp5_ops = {
948 .bind = mdp5_bind,
949 .unbind = mdp5_unbind,
950};
951
952static int mdp5_dev_probe(struct platform_device *pdev)
953{
954 DBG("");
955 return component_add(&pdev->dev, &mdp5_ops);
956}
957
958static int mdp5_dev_remove(struct platform_device *pdev)
959{
960 DBG("");
961 component_del(&pdev->dev, &mdp5_ops);
962 return 0;
963}
964
96a611b5
AT
965static const struct of_device_id mdp5_dt_match[] = {
966 { .compatible = "qcom,mdp5", },
967 /* to support downstream DT files */
968 { .compatible = "qcom,mdss_mdp", },
969 {}
970};
971MODULE_DEVICE_TABLE(of, mdp5_dt_match);
972
1dd0a0b1
AT
973static struct platform_driver mdp5_driver = {
974 .probe = mdp5_dev_probe,
975 .remove = mdp5_dev_remove,
976 .driver = {
977 .name = "msm_mdp",
96a611b5 978 .of_match_table = mdp5_dt_match,
1dd0a0b1
AT
979 },
980};
981
982void __init msm_mdp_register(void)
983{
984 DBG("");
985 platform_driver_register(&mdp5_driver);
986}
987
988void __exit msm_mdp_unregister(void)
989{
990 DBG("");
991 platform_driver_unregister(&mdp5_driver);
992}