]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
drm/msm: add hdmi support for apq8x74/mdp5
[mirror_ubuntu-bionic-kernel.git] / drivers / gpu / drm / msm / mdp / mdp4 / mdp4_kms.c
CommitLineData
c8afe684
RC
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18
19#include "msm_drv.h"
871d812a 20#include "msm_mmu.h"
c8afe684
RC
21#include "mdp4_kms.h"
22
c8afe684
RC
23static struct mdp4_platform_config *mdp4_get_config(struct platform_device *dev);
24
25static int mdp4_hw_init(struct msm_kms *kms)
26{
9e0efa63 27 struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
c8afe684
RC
28 struct drm_device *dev = mdp4_kms->dev;
29 uint32_t version, major, minor, dmap_cfg, vg_cfg;
30 unsigned long clk;
31 int ret = 0;
32
33 pm_runtime_get_sync(dev->dev);
34
e529c7e6 35 mdp4_enable(mdp4_kms);
c8afe684 36 version = mdp4_read(mdp4_kms, REG_MDP4_VERSION);
e529c7e6 37 mdp4_disable(mdp4_kms);
c8afe684
RC
38
39 major = FIELD(version, MDP4_VERSION_MAJOR);
40 minor = FIELD(version, MDP4_VERSION_MINOR);
41
dada25bd 42 DBG("found MDP4 version v%d.%d", major, minor);
c8afe684
RC
43
44 if (major != 4) {
45 dev_err(dev->dev, "unexpected MDP version: v%d.%d\n",
46 major, minor);
47 ret = -ENXIO;
48 goto out;
49 }
50
51 mdp4_kms->rev = minor;
52
53 if (mdp4_kms->dsi_pll_vdda) {
54 if ((mdp4_kms->rev == 2) || (mdp4_kms->rev == 4)) {
55 ret = regulator_set_voltage(mdp4_kms->dsi_pll_vdda,
56 1200000, 1200000);
57 if (ret) {
58 dev_err(dev->dev,
59 "failed to set dsi_pll_vdda voltage: %d\n", ret);
60 goto out;
61 }
62 }
63 }
64
65 if (mdp4_kms->dsi_pll_vddio) {
66 if (mdp4_kms->rev == 2) {
67 ret = regulator_set_voltage(mdp4_kms->dsi_pll_vddio,
68 1800000, 1800000);
69 if (ret) {
70 dev_err(dev->dev,
71 "failed to set dsi_pll_vddio voltage: %d\n", ret);
72 goto out;
73 }
74 }
75 }
76
77 if (mdp4_kms->rev > 1) {
78 mdp4_write(mdp4_kms, REG_MDP4_CS_CONTROLLER0, 0x0707ffff);
79 mdp4_write(mdp4_kms, REG_MDP4_CS_CONTROLLER1, 0x03073f3f);
80 }
81
82 mdp4_write(mdp4_kms, REG_MDP4_PORTMAP_MODE, 0x3);
83
84 /* max read pending cmd config, 3 pending requests: */
85 mdp4_write(mdp4_kms, REG_MDP4_READ_CNFG, 0x02222);
86
87 clk = clk_get_rate(mdp4_kms->clk);
88
89 if ((mdp4_kms->rev >= 1) || (clk >= 90000000)) {
90 dmap_cfg = 0x47; /* 16 bytes-burst x 8 req */
91 vg_cfg = 0x47; /* 16 bytes-burs x 8 req */
92 } else {
93 dmap_cfg = 0x27; /* 8 bytes-burst x 8 req */
94 vg_cfg = 0x43; /* 16 bytes-burst x 4 req */
95 }
96
97 DBG("fetch config: dmap=%02x, vg=%02x", dmap_cfg, vg_cfg);
98
99 mdp4_write(mdp4_kms, REG_MDP4_DMA_FETCH_CONFIG(DMA_P), dmap_cfg);
100 mdp4_write(mdp4_kms, REG_MDP4_DMA_FETCH_CONFIG(DMA_E), dmap_cfg);
101
102 mdp4_write(mdp4_kms, REG_MDP4_PIPE_FETCH_CONFIG(VG1), vg_cfg);
103 mdp4_write(mdp4_kms, REG_MDP4_PIPE_FETCH_CONFIG(VG2), vg_cfg);
104 mdp4_write(mdp4_kms, REG_MDP4_PIPE_FETCH_CONFIG(RGB1), vg_cfg);
105 mdp4_write(mdp4_kms, REG_MDP4_PIPE_FETCH_CONFIG(RGB2), vg_cfg);
106
107 if (mdp4_kms->rev >= 2)
108 mdp4_write(mdp4_kms, REG_MDP4_LAYERMIXER_IN_CFG_UPDATE_METHOD, 1);
109
110 /* disable CSC matrix / YUV by default: */
111 mdp4_write(mdp4_kms, REG_MDP4_PIPE_OP_MODE(VG1), 0);
112 mdp4_write(mdp4_kms, REG_MDP4_PIPE_OP_MODE(VG2), 0);
113 mdp4_write(mdp4_kms, REG_MDP4_DMA_P_OP_MODE, 0);
114 mdp4_write(mdp4_kms, REG_MDP4_DMA_S_OP_MODE, 0);
115 mdp4_write(mdp4_kms, REG_MDP4_OVLP_CSC_CONFIG(1), 0);
116 mdp4_write(mdp4_kms, REG_MDP4_OVLP_CSC_CONFIG(2), 0);
117
118 if (mdp4_kms->rev > 1)
119 mdp4_write(mdp4_kms, REG_MDP4_RESET_STATUS, 1);
120
121out:
122 pm_runtime_put_sync(dev->dev);
123
124 return ret;
125}
126
127static long mdp4_round_pixclk(struct msm_kms *kms, unsigned long rate,
128 struct drm_encoder *encoder)
129{
130 /* if we had >1 encoder, we'd need something more clever: */
131 return mdp4_dtv_round_pixclk(encoder, rate);
132}
133
134static void mdp4_preclose(struct msm_kms *kms, struct drm_file *file)
135{
9e0efa63 136 struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
c8afe684
RC
137 struct msm_drm_private *priv = mdp4_kms->dev->dev_private;
138 unsigned i;
139
140 for (i = 0; i < priv->num_crtcs; i++)
2a2b8fa6 141 mdp4_crtc_cancel_pending_flip(priv->crtcs[i], file);
c8afe684
RC
142}
143
144static void mdp4_destroy(struct msm_kms *kms)
145{
9e0efa63 146 struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
c8afe684
RC
147 kfree(mdp4_kms);
148}
149
9e0efa63
RC
150static const struct mdp_kms_funcs kms_funcs = {
151 .base = {
c8afe684
RC
152 .hw_init = mdp4_hw_init,
153 .irq_preinstall = mdp4_irq_preinstall,
154 .irq_postinstall = mdp4_irq_postinstall,
155 .irq_uninstall = mdp4_irq_uninstall,
156 .irq = mdp4_irq,
157 .enable_vblank = mdp4_enable_vblank,
158 .disable_vblank = mdp4_disable_vblank,
10a02eb6 159 .get_format = mdp_get_format,
c8afe684
RC
160 .round_pixclk = mdp4_round_pixclk,
161 .preclose = mdp4_preclose,
162 .destroy = mdp4_destroy,
9e0efa63
RC
163 },
164 .set_irqmask = mdp4_set_irqmask,
c8afe684
RC
165};
166
167int mdp4_disable(struct mdp4_kms *mdp4_kms)
168{
169 DBG("");
170
171 clk_disable_unprepare(mdp4_kms->clk);
172 if (mdp4_kms->pclk)
173 clk_disable_unprepare(mdp4_kms->pclk);
174 clk_disable_unprepare(mdp4_kms->lut_clk);
175
176 return 0;
177}
178
179int mdp4_enable(struct mdp4_kms *mdp4_kms)
180{
181 DBG("");
182
183 clk_prepare_enable(mdp4_kms->clk);
184 if (mdp4_kms->pclk)
185 clk_prepare_enable(mdp4_kms->pclk);
186 clk_prepare_enable(mdp4_kms->lut_clk);
187
188 return 0;
189}
190
191static int modeset_init(struct mdp4_kms *mdp4_kms)
192{
193 struct drm_device *dev = mdp4_kms->dev;
194 struct msm_drm_private *priv = dev->dev_private;
195 struct drm_plane *plane;
196 struct drm_crtc *crtc;
197 struct drm_encoder *encoder;
dada25bd 198 struct hdmi *hdmi;
c8afe684
RC
199 int ret;
200
201 /*
202 * NOTE: this is a bit simplistic until we add support
203 * for more than just RGB1->DMA_E->DTV->HDMI
204 */
205
a8623918
RC
206 /* construct non-private planes: */
207 plane = mdp4_plane_init(dev, VG1, false);
208 if (IS_ERR(plane)) {
209 dev_err(dev->dev, "failed to construct plane for VG1\n");
210 ret = PTR_ERR(plane);
211 goto fail;
212 }
213 priv->planes[priv->num_planes++] = plane;
214
215 plane = mdp4_plane_init(dev, VG2, false);
216 if (IS_ERR(plane)) {
217 dev_err(dev->dev, "failed to construct plane for VG2\n");
218 ret = PTR_ERR(plane);
219 goto fail;
220 }
221 priv->planes[priv->num_planes++] = plane;
222
c8afe684
RC
223 /* the CRTCs get constructed with a private plane: */
224 plane = mdp4_plane_init(dev, RGB1, true);
225 if (IS_ERR(plane)) {
226 dev_err(dev->dev, "failed to construct plane for RGB1\n");
227 ret = PTR_ERR(plane);
228 goto fail;
229 }
230
231 crtc = mdp4_crtc_init(dev, plane, priv->num_crtcs, 1, DMA_E);
232 if (IS_ERR(crtc)) {
233 dev_err(dev->dev, "failed to construct crtc for DMA_E\n");
234 ret = PTR_ERR(crtc);
235 goto fail;
236 }
237 priv->crtcs[priv->num_crtcs++] = crtc;
238
239 encoder = mdp4_dtv_encoder_init(dev);
240 if (IS_ERR(encoder)) {
241 dev_err(dev->dev, "failed to construct DTV encoder\n");
242 ret = PTR_ERR(encoder);
243 goto fail;
244 }
245 encoder->possible_crtcs = 0x1; /* DTV can be hooked to DMA_E */
246 priv->encoders[priv->num_encoders++] = encoder;
247
dada25bd
RC
248 hdmi = hdmi_init(dev, encoder);
249 if (IS_ERR(hdmi)) {
250 ret = PTR_ERR(hdmi);
251 dev_err(dev->dev, "failed to initialize HDMI: %d\n", ret);
c8afe684
RC
252 goto fail;
253 }
c8afe684
RC
254
255 return 0;
256
257fail:
258 return ret;
259}
260
261static const char *iommu_ports[] = {
262 "mdp_port0_cb0", "mdp_port1_cb0",
263};
264
265struct msm_kms *mdp4_kms_init(struct drm_device *dev)
266{
267 struct platform_device *pdev = dev->platformdev;
268 struct mdp4_platform_config *config = mdp4_get_config(pdev);
269 struct mdp4_kms *mdp4_kms;
270 struct msm_kms *kms = NULL;
871d812a 271 struct msm_mmu *mmu;
c8afe684
RC
272 int ret;
273
274 mdp4_kms = kzalloc(sizeof(*mdp4_kms), GFP_KERNEL);
275 if (!mdp4_kms) {
276 dev_err(dev->dev, "failed to allocate kms\n");
277 ret = -ENOMEM;
278 goto fail;
279 }
280
9e0efa63
RC
281 mdp_kms_init(&mdp4_kms->base, &kms_funcs);
282
283 kms = &mdp4_kms->base.base;
c8afe684
RC
284
285 mdp4_kms->dev = dev;
286
287 mdp4_kms->mmio = msm_ioremap(pdev, NULL, "MDP4");
288 if (IS_ERR(mdp4_kms->mmio)) {
289 ret = PTR_ERR(mdp4_kms->mmio);
290 goto fail;
291 }
292
293 mdp4_kms->dsi_pll_vdda = devm_regulator_get(&pdev->dev, "dsi_pll_vdda");
294 if (IS_ERR(mdp4_kms->dsi_pll_vdda))
295 mdp4_kms->dsi_pll_vdda = NULL;
296
297 mdp4_kms->dsi_pll_vddio = devm_regulator_get(&pdev->dev, "dsi_pll_vddio");
298 if (IS_ERR(mdp4_kms->dsi_pll_vddio))
299 mdp4_kms->dsi_pll_vddio = NULL;
300
301 mdp4_kms->vdd = devm_regulator_get(&pdev->dev, "vdd");
302 if (IS_ERR(mdp4_kms->vdd))
303 mdp4_kms->vdd = NULL;
304
305 if (mdp4_kms->vdd) {
306 ret = regulator_enable(mdp4_kms->vdd);
307 if (ret) {
308 dev_err(dev->dev, "failed to enable regulator vdd: %d\n", ret);
309 goto fail;
310 }
311 }
312
313 mdp4_kms->clk = devm_clk_get(&pdev->dev, "core_clk");
314 if (IS_ERR(mdp4_kms->clk)) {
315 dev_err(dev->dev, "failed to get core_clk\n");
316 ret = PTR_ERR(mdp4_kms->clk);
317 goto fail;
318 }
319
320 mdp4_kms->pclk = devm_clk_get(&pdev->dev, "iface_clk");
321 if (IS_ERR(mdp4_kms->pclk))
322 mdp4_kms->pclk = NULL;
323
324 // XXX if (rev >= MDP_REV_42) { ???
325 mdp4_kms->lut_clk = devm_clk_get(&pdev->dev, "lut_clk");
326 if (IS_ERR(mdp4_kms->lut_clk)) {
327 dev_err(dev->dev, "failed to get lut_clk\n");
328 ret = PTR_ERR(mdp4_kms->lut_clk);
329 goto fail;
330 }
331
332 clk_set_rate(mdp4_kms->clk, config->max_clk);
333 clk_set_rate(mdp4_kms->lut_clk, config->max_clk);
334
c8afe684
RC
335 /* make sure things are off before attaching iommu (bootloader could
336 * have left things on, in which case we'll start getting faults if
337 * we don't disable):
338 */
e529c7e6 339 mdp4_enable(mdp4_kms);
c8afe684
RC
340 mdp4_write(mdp4_kms, REG_MDP4_DTV_ENABLE, 0);
341 mdp4_write(mdp4_kms, REG_MDP4_LCDC_ENABLE, 0);
342 mdp4_write(mdp4_kms, REG_MDP4_DSI_ENABLE, 0);
e529c7e6 343 mdp4_disable(mdp4_kms);
c8afe684
RC
344 mdelay(16);
345
871d812a
RC
346 if (config->iommu) {
347 mmu = msm_iommu_new(dev, config->iommu);
348 if (IS_ERR(mmu)) {
349 ret = PTR_ERR(mmu);
350 goto fail;
351 }
352 ret = mmu->funcs->attach(mmu, iommu_ports,
353 ARRAY_SIZE(iommu_ports));
354 if (ret)
355 goto fail;
356 } else {
357 dev_info(dev->dev, "no iommu, fallback to phys "
358 "contig buffers for scanout\n");
359 mmu = NULL;
360 }
c8afe684 361
871d812a 362 mdp4_kms->id = msm_register_mmu(dev, mmu);
c8afe684
RC
363 if (mdp4_kms->id < 0) {
364 ret = mdp4_kms->id;
365 dev_err(dev->dev, "failed to register mdp4 iommu: %d\n", ret);
366 goto fail;
367 }
368
369 ret = modeset_init(mdp4_kms);
370 if (ret) {
371 dev_err(dev->dev, "modeset_init failed: %d\n", ret);
372 goto fail;
373 }
374
375 return kms;
376
377fail:
378 if (kms)
379 mdp4_destroy(kms);
380 return ERR_PTR(ret);
381}
382
383static struct mdp4_platform_config *mdp4_get_config(struct platform_device *dev)
384{
385 static struct mdp4_platform_config config = {};
386#ifdef CONFIG_OF
387 /* TODO */
388#else
389 if (cpu_is_apq8064())
390 config.max_clk = 266667000;
391 else
392 config.max_clk = 200000000;
393
394 config.iommu = msm_get_iommu_domain(DISPLAY_READ_DOMAIN);
395#endif
396 return &config;
397}