]>
Commit | Line | Data |
---|---|---|
caab277b | 1 | // SPDX-License-Identifier: GPL-2.0-only |
c8afe684 | 2 | /* |
25fdd593 | 3 | * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. |
c8afe684 RC |
4 | * Copyright (C) 2013 Red Hat |
5 | * Author: Rob Clark <robdclark@gmail.com> | |
c8afe684 RC |
6 | */ |
7 | ||
feea39a8 | 8 | #include <linux/dma-mapping.h> |
25fdd593 | 9 | #include <linux/kthread.h> |
feea39a8 | 10 | #include <linux/uaccess.h> |
25fdd593 | 11 | #include <uapi/linux/sched/types.h> |
feea39a8 SR |
12 | |
13 | #include <drm/drm_drv.h> | |
14 | #include <drm/drm_file.h> | |
15 | #include <drm/drm_ioctl.h> | |
16 | #include <drm/drm_irq.h> | |
17 | #include <drm/drm_prime.h> | |
97ac0e47 | 18 | #include <drm/drm_of.h> |
feea39a8 | 19 | #include <drm/drm_vblank.h> |
97ac0e47 | 20 | |
c8afe684 | 21 | #include "msm_drv.h" |
edcd60ce | 22 | #include "msm_debugfs.h" |
fde5de6c | 23 | #include "msm_fence.h" |
f05c83e7 | 24 | #include "msm_gem.h" |
7198e6b0 | 25 | #include "msm_gpu.h" |
dd2da6e3 | 26 | #include "msm_kms.h" |
c2052a4e | 27 | #include "adreno/adreno_gpu.h" |
c8afe684 | 28 | |
a8d854c1 RC |
29 | /* |
30 | * MSM driver version: | |
31 | * - 1.0.0 - initial interface | |
32 | * - 1.1.0 - adds madvise, and support for submits with > 4 cmd buffers | |
7a3bcc0a | 33 | * - 1.2.0 - adds explicit fence support for submit ioctl |
f7de1545 JC |
34 | * - 1.3.0 - adds GMEM_BASE + NR_RINGS params, SUBMITQUEUE_NEW + |
35 | * SUBMITQUEUE_CLOSE ioctls, and MSM_INFO_IOVA flag for | |
36 | * MSM_GEM_INFO ioctl. | |
1fed8df3 RC |
37 | * - 1.4.0 - softpin, MSM_RELOC_BO_DUMP, and GEM_INFO support to set/get |
38 | * GEM object's debug name | |
b0fb6604 | 39 | * - 1.5.0 - Add SUBMITQUERY_QUERY ioctl |
a8d854c1 RC |
40 | */ |
41 | #define MSM_VERSION_MAJOR 1 | |
b0fb6604 | 42 | #define MSM_VERSION_MINOR 5 |
a8d854c1 RC |
43 | #define MSM_VERSION_PATCHLEVEL 0 |
44 | ||
c8afe684 RC |
45 | static const struct drm_mode_config_funcs mode_config_funcs = { |
46 | .fb_create = msm_framebuffer_create, | |
4ccbc6e5 | 47 | .output_poll_changed = drm_fb_helper_output_poll_changed, |
1f920175 | 48 | .atomic_check = drm_atomic_helper_check, |
d14659f5 SP |
49 | .atomic_commit = drm_atomic_helper_commit, |
50 | }; | |
51 | ||
52 | static const struct drm_mode_config_helper_funcs mode_config_helper_funcs = { | |
53 | .atomic_commit_tail = msm_atomic_commit_tail, | |
c8afe684 RC |
54 | }; |
55 | ||
c8afe684 RC |
56 | #ifdef CONFIG_DRM_MSM_REGISTER_LOGGING |
57 | static bool reglog = false; | |
58 | MODULE_PARM_DESC(reglog, "Enable register read/write logging"); | |
59 | module_param(reglog, bool, 0600); | |
60 | #else | |
61 | #define reglog 0 | |
62 | #endif | |
63 | ||
a9ee34b7 | 64 | #ifdef CONFIG_DRM_FBDEV_EMULATION |
e90dfec7 RC |
65 | static bool fbdev = true; |
66 | MODULE_PARM_DESC(fbdev, "Enable fbdev compat layer"); | |
67 | module_param(fbdev, bool, 0600); | |
68 | #endif | |
69 | ||
3a10ba8c | 70 | static char *vram = "16m"; |
4313c744 | 71 | MODULE_PARM_DESC(vram, "Configure VRAM size (for devices without IOMMU/GPUMMU)"); |
871d812a RC |
72 | module_param(vram, charp, 0); |
73 | ||
06d9f56f RC |
74 | bool dumpstate = false; |
75 | MODULE_PARM_DESC(dumpstate, "Dump KMS state on errors"); | |
76 | module_param(dumpstate, bool, 0600); | |
77 | ||
ba4dd718 RC |
78 | static bool modeset = true; |
79 | MODULE_PARM_DESC(modeset, "Use kernel modesetting [KMS] (1=on (default), 0=disable)"); | |
80 | module_param(modeset, bool, 0600); | |
81 | ||
060530f1 RC |
82 | /* |
83 | * Util/helpers: | |
84 | */ | |
85 | ||
8e54eea5 JC |
86 | struct clk *msm_clk_bulk_get_clock(struct clk_bulk_data *bulk, int count, |
87 | const char *name) | |
88 | { | |
89 | int i; | |
90 | char n[32]; | |
91 | ||
92 | snprintf(n, sizeof(n), "%s_clk", name); | |
93 | ||
94 | for (i = 0; bulk && i < count; i++) { | |
95 | if (!strcmp(bulk[i].id, name) || !strcmp(bulk[i].id, n)) | |
96 | return bulk[i].clk; | |
97 | } | |
98 | ||
99 | ||
100 | return NULL; | |
101 | } | |
102 | ||
720c3bb8 RC |
103 | struct clk *msm_clk_get(struct platform_device *pdev, const char *name) |
104 | { | |
105 | struct clk *clk; | |
106 | char name2[32]; | |
107 | ||
108 | clk = devm_clk_get(&pdev->dev, name); | |
109 | if (!IS_ERR(clk) || PTR_ERR(clk) == -EPROBE_DEFER) | |
110 | return clk; | |
111 | ||
112 | snprintf(name2, sizeof(name2), "%s_clk", name); | |
113 | ||
114 | clk = devm_clk_get(&pdev->dev, name2); | |
115 | if (!IS_ERR(clk)) | |
116 | dev_warn(&pdev->dev, "Using legacy clk name binding. Use " | |
117 | "\"%s\" instead of \"%s\"\n", name, name2); | |
118 | ||
119 | return clk; | |
120 | } | |
121 | ||
c8afe684 RC |
122 | void __iomem *msm_ioremap(struct platform_device *pdev, const char *name, |
123 | const char *dbgname) | |
124 | { | |
125 | struct resource *res; | |
126 | unsigned long size; | |
127 | void __iomem *ptr; | |
128 | ||
129 | if (name) | |
130 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name); | |
131 | else | |
132 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
133 | ||
134 | if (!res) { | |
6a41da17 | 135 | DRM_DEV_ERROR(&pdev->dev, "failed to get memory resource: %s\n", name); |
c8afe684 RC |
136 | return ERR_PTR(-EINVAL); |
137 | } | |
138 | ||
139 | size = resource_size(res); | |
140 | ||
4bdc0d67 | 141 | ptr = devm_ioremap(&pdev->dev, res->start, size); |
c8afe684 | 142 | if (!ptr) { |
6a41da17 | 143 | DRM_DEV_ERROR(&pdev->dev, "failed to ioremap: %s\n", name); |
c8afe684 RC |
144 | return ERR_PTR(-ENOMEM); |
145 | } | |
146 | ||
147 | if (reglog) | |
fc99f97a | 148 | printk(KERN_DEBUG "IO:region %s %p %08lx\n", dbgname, ptr, size); |
c8afe684 RC |
149 | |
150 | return ptr; | |
151 | } | |
152 | ||
153 | void msm_writel(u32 data, void __iomem *addr) | |
154 | { | |
155 | if (reglog) | |
fc99f97a | 156 | printk(KERN_DEBUG "IO:W %p %08x\n", addr, data); |
c8afe684 RC |
157 | writel(data, addr); |
158 | } | |
159 | ||
160 | u32 msm_readl(const void __iomem *addr) | |
161 | { | |
162 | u32 val = readl(addr); | |
163 | if (reglog) | |
8dfe162a | 164 | pr_err("IO:R %p %08x\n", addr, val); |
c8afe684 RC |
165 | return val; |
166 | } | |
167 | ||
48d1d28e JS |
168 | struct msm_vblank_work { |
169 | struct work_struct work; | |
78b1d470 HL |
170 | int crtc_id; |
171 | bool enable; | |
48d1d28e | 172 | struct msm_drm_private *priv; |
78b1d470 HL |
173 | }; |
174 | ||
5aeb6656 | 175 | static void vblank_ctrl_worker(struct work_struct *work) |
78b1d470 | 176 | { |
48d1d28e JS |
177 | struct msm_vblank_work *vbl_work = container_of(work, |
178 | struct msm_vblank_work, work); | |
179 | struct msm_drm_private *priv = vbl_work->priv; | |
78b1d470 | 180 | struct msm_kms *kms = priv->kms; |
78b1d470 | 181 | |
48d1d28e JS |
182 | if (vbl_work->enable) |
183 | kms->funcs->enable_vblank(kms, priv->crtcs[vbl_work->crtc_id]); | |
184 | else | |
185 | kms->funcs->disable_vblank(kms, priv->crtcs[vbl_work->crtc_id]); | |
78b1d470 | 186 | |
48d1d28e | 187 | kfree(vbl_work); |
78b1d470 HL |
188 | } |
189 | ||
190 | static int vblank_ctrl_queue_work(struct msm_drm_private *priv, | |
191 | int crtc_id, bool enable) | |
192 | { | |
48d1d28e | 193 | struct msm_vblank_work *vbl_work; |
78b1d470 | 194 | |
48d1d28e JS |
195 | vbl_work = kzalloc(sizeof(*vbl_work), GFP_ATOMIC); |
196 | if (!vbl_work) | |
78b1d470 HL |
197 | return -ENOMEM; |
198 | ||
48d1d28e | 199 | INIT_WORK(&vbl_work->work, vblank_ctrl_worker); |
78b1d470 | 200 | |
48d1d28e JS |
201 | vbl_work->crtc_id = crtc_id; |
202 | vbl_work->enable = enable; | |
203 | vbl_work->priv = priv; | |
78b1d470 | 204 | |
48d1d28e | 205 | queue_work(priv->wq, &vbl_work->work); |
78b1d470 HL |
206 | |
207 | return 0; | |
208 | } | |
209 | ||
2b669875 | 210 | static int msm_drm_uninit(struct device *dev) |
c8afe684 | 211 | { |
2b669875 AT |
212 | struct platform_device *pdev = to_platform_device(dev); |
213 | struct drm_device *ddev = platform_get_drvdata(pdev); | |
214 | struct msm_drm_private *priv = ddev->dev_private; | |
c8afe684 | 215 | struct msm_kms *kms = priv->kms; |
bc3220be | 216 | struct msm_mdss *mdss = priv->mdss; |
25fdd593 | 217 | int i; |
78b1d470 | 218 | |
2aa31767 SP |
219 | /* |
220 | * Shutdown the hw if we're far enough along where things might be on. | |
221 | * If we run this too early, we'll end up panicking in any variety of | |
222 | * places. Since we don't register the drm device until late in | |
223 | * msm_drm_init, drm_dev->registered is used as an indicator that the | |
224 | * shutdown will be successful. | |
225 | */ | |
226 | if (ddev->registered) { | |
227 | drm_dev_unregister(ddev); | |
228 | drm_atomic_helper_shutdown(ddev); | |
229 | } | |
230 | ||
78b1d470 HL |
231 | /* We must cancel and cleanup any pending vblank enable/disable |
232 | * work before drm_irq_uninstall() to avoid work re-enabling an | |
233 | * irq after uninstall has disabled it. | |
234 | */ | |
c8afe684 | 235 | |
48d1d28e | 236 | flush_workqueue(priv->wq); |
25fdd593 | 237 | |
d9db30ce | 238 | /* clean up event worker threads */ |
25fdd593 | 239 | for (i = 0; i < priv->num_crtcs; i++) { |
25fdd593 | 240 | if (priv->event_thread[i].thread) { |
3c125682 | 241 | kthread_destroy_worker(&priv->event_thread[i].worker); |
25fdd593 JS |
242 | priv->event_thread[i].thread = NULL; |
243 | } | |
244 | } | |
245 | ||
68209390 RC |
246 | msm_gem_shrinker_cleanup(ddev); |
247 | ||
2b669875 AT |
248 | drm_kms_helper_poll_fini(ddev); |
249 | ||
85eac470 NT |
250 | msm_perf_debugfs_cleanup(priv); |
251 | msm_rd_debugfs_cleanup(priv); | |
252 | ||
1aaa57f5 AT |
253 | #ifdef CONFIG_DRM_FBDEV_EMULATION |
254 | if (fbdev && priv->fbdev) | |
2b669875 | 255 | msm_fbdev_free(ddev); |
1aaa57f5 | 256 | #endif |
2aa31767 | 257 | |
2b669875 | 258 | drm_mode_config_cleanup(ddev); |
c8afe684 | 259 | |
2b669875 AT |
260 | pm_runtime_get_sync(dev); |
261 | drm_irq_uninstall(ddev); | |
262 | pm_runtime_put_sync(dev); | |
c8afe684 | 263 | |
16976085 | 264 | if (kms && kms->funcs) |
c8afe684 | 265 | kms->funcs->destroy(kms); |
c8afe684 | 266 | |
871d812a | 267 | if (priv->vram.paddr) { |
00085f1e | 268 | unsigned long attrs = DMA_ATTR_NO_KERNEL_MAPPING; |
871d812a | 269 | drm_mm_takedown(&priv->vram.mm); |
2b669875 | 270 | dma_free_attrs(dev, priv->vram.size, NULL, |
00085f1e | 271 | priv->vram.paddr, attrs); |
871d812a RC |
272 | } |
273 | ||
2b669875 | 274 | component_unbind_all(dev, ddev); |
060530f1 | 275 | |
bc3220be RY |
276 | if (mdss && mdss->funcs) |
277 | mdss->funcs->destroy(ddev); | |
0a6030d2 | 278 | |
2b669875 | 279 | ddev->dev_private = NULL; |
4d8dc2df | 280 | drm_dev_put(ddev); |
c8afe684 | 281 | |
2aa31767 | 282 | destroy_workqueue(priv->wq); |
c8afe684 RC |
283 | kfree(priv); |
284 | ||
285 | return 0; | |
286 | } | |
287 | ||
aaded2e3 JS |
288 | #define KMS_MDP4 4 |
289 | #define KMS_MDP5 5 | |
25fdd593 | 290 | #define KMS_DPU 3 |
aaded2e3 | 291 | |
06c0dd96 RC |
292 | static int get_mdp_ver(struct platform_device *pdev) |
293 | { | |
06c0dd96 | 294 | struct device *dev = &pdev->dev; |
e9fbdaf2 AT |
295 | |
296 | return (int) (unsigned long) of_device_get_match_data(dev); | |
06c0dd96 RC |
297 | } |
298 | ||
072f1f91 RC |
299 | #include <linux/of_address.h> |
300 | ||
c2052a4e JM |
301 | bool msm_use_mmu(struct drm_device *dev) |
302 | { | |
303 | struct msm_drm_private *priv = dev->dev_private; | |
304 | ||
305 | /* a2xx comes with its own MMU */ | |
306 | return priv->is_a2xx || iommu_present(&platform_bus_type); | |
307 | } | |
308 | ||
5bf9c0b6 | 309 | static int msm_init_vram(struct drm_device *dev) |
c8afe684 | 310 | { |
5bf9c0b6 | 311 | struct msm_drm_private *priv = dev->dev_private; |
e9fbdaf2 | 312 | struct device_node *node; |
072f1f91 RC |
313 | unsigned long size = 0; |
314 | int ret = 0; | |
315 | ||
072f1f91 RC |
316 | /* In the device-tree world, we could have a 'memory-region' |
317 | * phandle, which gives us a link to our "vram". Allocating | |
318 | * is all nicely abstracted behind the dma api, but we need | |
319 | * to know the entire size to allocate it all in one go. There | |
320 | * are two cases: | |
321 | * 1) device with no IOMMU, in which case we need exclusive | |
322 | * access to a VRAM carveout big enough for all gpu | |
323 | * buffers | |
324 | * 2) device with IOMMU, but where the bootloader puts up | |
325 | * a splash screen. In this case, the VRAM carveout | |
326 | * need only be large enough for fbdev fb. But we need | |
327 | * exclusive access to the buffer to avoid the kernel | |
328 | * using those pages for other purposes (which appears | |
329 | * as corruption on screen before we have a chance to | |
330 | * load and do initial modeset) | |
331 | */ | |
072f1f91 RC |
332 | |
333 | node = of_parse_phandle(dev->dev->of_node, "memory-region", 0); | |
334 | if (node) { | |
335 | struct resource r; | |
336 | ret = of_address_to_resource(node, 0, &r); | |
2ca41c17 | 337 | of_node_put(node); |
072f1f91 RC |
338 | if (ret) |
339 | return ret; | |
340 | size = r.end - r.start; | |
fc99f97a | 341 | DRM_INFO("using VRAM carveout: %lx@%pa\n", size, &r.start); |
c8afe684 | 342 | |
e9fbdaf2 AT |
343 | /* if we have no IOMMU, then we need to use carveout allocator. |
344 | * Grab the entire CMA chunk carved out in early startup in | |
345 | * mach-msm: | |
346 | */ | |
c2052a4e | 347 | } else if (!msm_use_mmu(dev)) { |
072f1f91 RC |
348 | DRM_INFO("using %s VRAM carveout\n", vram); |
349 | size = memparse(vram, NULL); | |
350 | } | |
351 | ||
352 | if (size) { | |
00085f1e | 353 | unsigned long attrs = 0; |
871d812a RC |
354 | void *p; |
355 | ||
871d812a RC |
356 | priv->vram.size = size; |
357 | ||
358 | drm_mm_init(&priv->vram.mm, 0, (size >> PAGE_SHIFT) - 1); | |
0e08270a | 359 | spin_lock_init(&priv->vram.lock); |
871d812a | 360 | |
00085f1e KK |
361 | attrs |= DMA_ATTR_NO_KERNEL_MAPPING; |
362 | attrs |= DMA_ATTR_WRITE_COMBINE; | |
871d812a RC |
363 | |
364 | /* note that for no-kernel-mapping, the vaddr returned | |
365 | * is bogus, but non-null if allocation succeeded: | |
366 | */ | |
367 | p = dma_alloc_attrs(dev->dev, size, | |
00085f1e | 368 | &priv->vram.paddr, GFP_KERNEL, attrs); |
871d812a | 369 | if (!p) { |
6a41da17 | 370 | DRM_DEV_ERROR(dev->dev, "failed to allocate VRAM\n"); |
871d812a | 371 | priv->vram.paddr = 0; |
5bf9c0b6 | 372 | return -ENOMEM; |
871d812a RC |
373 | } |
374 | ||
6a41da17 | 375 | DRM_DEV_INFO(dev->dev, "VRAM: %08x->%08x\n", |
871d812a RC |
376 | (uint32_t)priv->vram.paddr, |
377 | (uint32_t)(priv->vram.paddr + size)); | |
378 | } | |
379 | ||
072f1f91 | 380 | return ret; |
5bf9c0b6 RC |
381 | } |
382 | ||
2b669875 | 383 | static int msm_drm_init(struct device *dev, struct drm_driver *drv) |
5bf9c0b6 | 384 | { |
2b669875 AT |
385 | struct platform_device *pdev = to_platform_device(dev); |
386 | struct drm_device *ddev; | |
5bf9c0b6 RC |
387 | struct msm_drm_private *priv; |
388 | struct msm_kms *kms; | |
bc3220be | 389 | struct msm_mdss *mdss; |
25fdd593 JS |
390 | int ret, i; |
391 | struct sched_param param; | |
5bf9c0b6 | 392 | |
2b669875 | 393 | ddev = drm_dev_alloc(drv, dev); |
0f288605 | 394 | if (IS_ERR(ddev)) { |
6a41da17 | 395 | DRM_DEV_ERROR(dev, "failed to allocate drm_device\n"); |
0f288605 | 396 | return PTR_ERR(ddev); |
2b669875 AT |
397 | } |
398 | ||
399 | platform_set_drvdata(pdev, ddev); | |
2b669875 | 400 | |
5bf9c0b6 RC |
401 | priv = kzalloc(sizeof(*priv), GFP_KERNEL); |
402 | if (!priv) { | |
77050c3f | 403 | ret = -ENOMEM; |
4d8dc2df | 404 | goto err_put_drm_dev; |
5bf9c0b6 RC |
405 | } |
406 | ||
2b669875 | 407 | ddev->dev_private = priv; |
68209390 | 408 | priv->dev = ddev; |
5bf9c0b6 | 409 | |
25fdd593 JS |
410 | switch (get_mdp_ver(pdev)) { |
411 | case KMS_MDP5: | |
412 | ret = mdp5_mdss_init(ddev); | |
413 | break; | |
414 | case KMS_DPU: | |
415 | ret = dpu_mdss_init(ddev); | |
416 | break; | |
417 | default: | |
418 | ret = 0; | |
419 | break; | |
420 | } | |
77050c3f JS |
421 | if (ret) |
422 | goto err_free_priv; | |
0a6030d2 | 423 | |
bc3220be RY |
424 | mdss = priv->mdss; |
425 | ||
5bf9c0b6 | 426 | priv->wq = alloc_ordered_workqueue("msm", 0); |
5bf9c0b6 | 427 | |
48e7f183 KK |
428 | INIT_WORK(&priv->free_work, msm_gem_free_work); |
429 | init_llist_head(&priv->free_list); | |
430 | ||
5bf9c0b6 | 431 | INIT_LIST_HEAD(&priv->inactive_list); |
5bf9c0b6 | 432 | |
2b669875 | 433 | drm_mode_config_init(ddev); |
060530f1 RC |
434 | |
435 | /* Bind all our sub-components: */ | |
2b669875 | 436 | ret = component_bind_all(dev, ddev); |
77050c3f JS |
437 | if (ret) |
438 | goto err_destroy_mdss; | |
060530f1 | 439 | |
2b669875 | 440 | ret = msm_init_vram(ddev); |
13f15565 | 441 | if (ret) |
77050c3f | 442 | goto err_msm_uninit; |
13f15565 | 443 | |
db735fc4 SP |
444 | if (!dev->dma_parms) { |
445 | dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms), | |
446 | GFP_KERNEL); | |
447 | if (!dev->dma_parms) | |
448 | return -ENOMEM; | |
449 | } | |
450 | dma_set_max_seg_size(dev, DMA_BIT_MASK(32)); | |
451 | ||
68209390 RC |
452 | msm_gem_shrinker_init(ddev); |
453 | ||
06c0dd96 | 454 | switch (get_mdp_ver(pdev)) { |
aaded2e3 | 455 | case KMS_MDP4: |
2b669875 | 456 | kms = mdp4_kms_init(ddev); |
0a6030d2 | 457 | priv->kms = kms; |
06c0dd96 | 458 | break; |
aaded2e3 | 459 | case KMS_MDP5: |
392ae6e0 | 460 | kms = mdp5_kms_init(ddev); |
06c0dd96 | 461 | break; |
25fdd593 JS |
462 | case KMS_DPU: |
463 | kms = dpu_kms_init(ddev); | |
464 | priv->kms = kms; | |
465 | break; | |
06c0dd96 | 466 | default: |
e6f6d63e JM |
467 | /* valid only for the dummy headless case, where of_node=NULL */ |
468 | WARN_ON(dev->of_node); | |
469 | kms = NULL; | |
06c0dd96 RC |
470 | break; |
471 | } | |
472 | ||
c8afe684 | 473 | if (IS_ERR(kms)) { |
6a41da17 | 474 | DRM_DEV_ERROR(dev, "failed to load kms\n"); |
e4826a94 | 475 | ret = PTR_ERR(kms); |
b2ccfdf1 | 476 | priv->kms = NULL; |
77050c3f | 477 | goto err_msm_uninit; |
c8afe684 RC |
478 | } |
479 | ||
bb676df1 JS |
480 | /* Enable normalization of plane zpos */ |
481 | ddev->mode_config.normalize_zpos = true; | |
482 | ||
c8afe684 | 483 | if (kms) { |
2d99ced7 | 484 | kms->dev = ddev; |
c8afe684 RC |
485 | ret = kms->funcs->hw_init(kms); |
486 | if (ret) { | |
6a41da17 | 487 | DRM_DEV_ERROR(dev, "kms hw init failed: %d\n", ret); |
77050c3f | 488 | goto err_msm_uninit; |
c8afe684 RC |
489 | } |
490 | } | |
491 | ||
2b669875 | 492 | ddev->mode_config.funcs = &mode_config_funcs; |
d14659f5 | 493 | ddev->mode_config.helper_private = &mode_config_helper_funcs; |
c8afe684 | 494 | |
25fdd593 JS |
495 | /** |
496 | * this priority was found during empiric testing to have appropriate | |
497 | * realtime scheduling to process display updates and interact with | |
498 | * other real time and normal priority task | |
499 | */ | |
500 | param.sched_priority = 16; | |
501 | for (i = 0; i < priv->num_crtcs; i++) { | |
25fdd593 JS |
502 | /* initialize event thread */ |
503 | priv->event_thread[i].crtc_id = priv->crtcs[i]->base.id; | |
504 | kthread_init_worker(&priv->event_thread[i].worker); | |
505 | priv->event_thread[i].dev = ddev; | |
506 | priv->event_thread[i].thread = | |
507 | kthread_run(kthread_worker_fn, | |
508 | &priv->event_thread[i].worker, | |
509 | "crtc_event:%d", priv->event_thread[i].crtc_id); | |
7f9743ab | 510 | if (IS_ERR(priv->event_thread[i].thread)) { |
4971f090 | 511 | DRM_DEV_ERROR(dev, "failed to create crtc_event kthread\n"); |
7f9743ab JS |
512 | priv->event_thread[i].thread = NULL; |
513 | goto err_msm_uninit; | |
514 | } | |
515 | ||
25fdd593 | 516 | ret = sched_setscheduler(priv->event_thread[i].thread, |
7f9743ab | 517 | SCHED_FIFO, ¶m); |
25fdd593 | 518 | if (ret) |
7f9743ab JS |
519 | dev_warn(dev, "event_thread set priority failed:%d\n", |
520 | ret); | |
25fdd593 JS |
521 | } |
522 | ||
2b669875 | 523 | ret = drm_vblank_init(ddev, priv->num_crtcs); |
c8afe684 | 524 | if (ret < 0) { |
6a41da17 | 525 | DRM_DEV_ERROR(dev, "failed to initialize vblank\n"); |
77050c3f | 526 | goto err_msm_uninit; |
c8afe684 RC |
527 | } |
528 | ||
a2b3a557 AT |
529 | if (kms) { |
530 | pm_runtime_get_sync(dev); | |
531 | ret = drm_irq_install(ddev, kms->irq); | |
532 | pm_runtime_put_sync(dev); | |
533 | if (ret < 0) { | |
6a41da17 | 534 | DRM_DEV_ERROR(dev, "failed to install IRQ handler\n"); |
77050c3f | 535 | goto err_msm_uninit; |
a2b3a557 | 536 | } |
c8afe684 RC |
537 | } |
538 | ||
2b669875 AT |
539 | ret = drm_dev_register(ddev, 0); |
540 | if (ret) | |
77050c3f | 541 | goto err_msm_uninit; |
2b669875 | 542 | |
2b669875 | 543 | drm_mode_config_reset(ddev); |
cf3a7e4c | 544 | |
a9ee34b7 | 545 | #ifdef CONFIG_DRM_FBDEV_EMULATION |
e6f6d63e | 546 | if (kms && fbdev) |
2b669875 | 547 | priv->fbdev = msm_fbdev_init(ddev); |
c8afe684 RC |
548 | #endif |
549 | ||
2b669875 | 550 | ret = msm_debugfs_late_init(ddev); |
a7d3c950 | 551 | if (ret) |
77050c3f | 552 | goto err_msm_uninit; |
a7d3c950 | 553 | |
2b669875 | 554 | drm_kms_helper_poll_init(ddev); |
c8afe684 RC |
555 | |
556 | return 0; | |
557 | ||
77050c3f | 558 | err_msm_uninit: |
2b669875 | 559 | msm_drm_uninit(dev); |
c8afe684 | 560 | return ret; |
77050c3f JS |
561 | err_destroy_mdss: |
562 | if (mdss && mdss->funcs) | |
563 | mdss->funcs->destroy(ddev); | |
564 | err_free_priv: | |
565 | kfree(priv); | |
4d8dc2df TZ |
566 | err_put_drm_dev: |
567 | drm_dev_put(ddev); | |
77050c3f | 568 | return ret; |
c8afe684 RC |
569 | } |
570 | ||
2b669875 AT |
571 | /* |
572 | * DRM operations: | |
573 | */ | |
574 | ||
7198e6b0 RC |
575 | static void load_gpu(struct drm_device *dev) |
576 | { | |
a1ad3523 | 577 | static DEFINE_MUTEX(init_lock); |
7198e6b0 | 578 | struct msm_drm_private *priv = dev->dev_private; |
7198e6b0 | 579 | |
a1ad3523 RC |
580 | mutex_lock(&init_lock); |
581 | ||
e2550b7a RC |
582 | if (!priv->gpu) |
583 | priv->gpu = adreno_load_gpu(dev); | |
7198e6b0 | 584 | |
a1ad3523 | 585 | mutex_unlock(&init_lock); |
7198e6b0 RC |
586 | } |
587 | ||
f97decac | 588 | static int context_init(struct drm_device *dev, struct drm_file *file) |
7198e6b0 | 589 | { |
295b22ae | 590 | struct msm_drm_private *priv = dev->dev_private; |
7198e6b0 RC |
591 | struct msm_file_private *ctx; |
592 | ||
7198e6b0 RC |
593 | ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); |
594 | if (!ctx) | |
595 | return -ENOMEM; | |
596 | ||
f97decac | 597 | msm_submitqueue_init(dev, ctx); |
f7de1545 | 598 | |
7af5cdb1 | 599 | ctx->aspace = priv->gpu ? priv->gpu->aspace : NULL; |
7198e6b0 RC |
600 | file->driver_priv = ctx; |
601 | ||
602 | return 0; | |
603 | } | |
604 | ||
f7de1545 JC |
605 | static int msm_open(struct drm_device *dev, struct drm_file *file) |
606 | { | |
607 | /* For now, load gpu on open.. to avoid the requirement of having | |
608 | * firmware in the initrd. | |
609 | */ | |
610 | load_gpu(dev); | |
611 | ||
f97decac | 612 | return context_init(dev, file); |
f7de1545 JC |
613 | } |
614 | ||
615 | static void context_close(struct msm_file_private *ctx) | |
616 | { | |
617 | msm_submitqueue_close(ctx); | |
618 | kfree(ctx); | |
619 | } | |
620 | ||
94df145c | 621 | static void msm_postclose(struct drm_device *dev, struct drm_file *file) |
c8afe684 RC |
622 | { |
623 | struct msm_drm_private *priv = dev->dev_private; | |
7198e6b0 | 624 | struct msm_file_private *ctx = file->driver_priv; |
7198e6b0 | 625 | |
7198e6b0 RC |
626 | mutex_lock(&dev->struct_mutex); |
627 | if (ctx == priv->lastctx) | |
628 | priv->lastctx = NULL; | |
629 | mutex_unlock(&dev->struct_mutex); | |
630 | ||
f7de1545 | 631 | context_close(ctx); |
c8afe684 RC |
632 | } |
633 | ||
e9f0d76f | 634 | static irqreturn_t msm_irq(int irq, void *arg) |
c8afe684 RC |
635 | { |
636 | struct drm_device *dev = arg; | |
637 | struct msm_drm_private *priv = dev->dev_private; | |
638 | struct msm_kms *kms = priv->kms; | |
639 | BUG_ON(!kms); | |
640 | return kms->funcs->irq(kms); | |
641 | } | |
642 | ||
643 | static void msm_irq_preinstall(struct drm_device *dev) | |
644 | { | |
645 | struct msm_drm_private *priv = dev->dev_private; | |
646 | struct msm_kms *kms = priv->kms; | |
647 | BUG_ON(!kms); | |
648 | kms->funcs->irq_preinstall(kms); | |
649 | } | |
650 | ||
651 | static int msm_irq_postinstall(struct drm_device *dev) | |
652 | { | |
653 | struct msm_drm_private *priv = dev->dev_private; | |
654 | struct msm_kms *kms = priv->kms; | |
655 | BUG_ON(!kms); | |
ab07e0c1 JC |
656 | |
657 | if (kms->funcs->irq_postinstall) | |
658 | return kms->funcs->irq_postinstall(kms); | |
659 | ||
660 | return 0; | |
c8afe684 RC |
661 | } |
662 | ||
663 | static void msm_irq_uninstall(struct drm_device *dev) | |
664 | { | |
665 | struct msm_drm_private *priv = dev->dev_private; | |
666 | struct msm_kms *kms = priv->kms; | |
667 | BUG_ON(!kms); | |
668 | kms->funcs->irq_uninstall(kms); | |
669 | } | |
670 | ||
88e72717 | 671 | static int msm_enable_vblank(struct drm_device *dev, unsigned int pipe) |
c8afe684 RC |
672 | { |
673 | struct msm_drm_private *priv = dev->dev_private; | |
674 | struct msm_kms *kms = priv->kms; | |
675 | if (!kms) | |
676 | return -ENXIO; | |
88e72717 TR |
677 | DBG("dev=%p, crtc=%u", dev, pipe); |
678 | return vblank_ctrl_queue_work(priv, pipe, true); | |
c8afe684 RC |
679 | } |
680 | ||
88e72717 | 681 | static void msm_disable_vblank(struct drm_device *dev, unsigned int pipe) |
c8afe684 RC |
682 | { |
683 | struct msm_drm_private *priv = dev->dev_private; | |
684 | struct msm_kms *kms = priv->kms; | |
685 | if (!kms) | |
686 | return; | |
88e72717 TR |
687 | DBG("dev=%p, crtc=%u", dev, pipe); |
688 | vblank_ctrl_queue_work(priv, pipe, false); | |
c8afe684 RC |
689 | } |
690 | ||
7198e6b0 RC |
691 | /* |
692 | * DRM ioctls: | |
693 | */ | |
694 | ||
695 | static int msm_ioctl_get_param(struct drm_device *dev, void *data, | |
696 | struct drm_file *file) | |
697 | { | |
698 | struct msm_drm_private *priv = dev->dev_private; | |
699 | struct drm_msm_param *args = data; | |
700 | struct msm_gpu *gpu; | |
701 | ||
702 | /* for now, we just have 3d pipe.. eventually this would need to | |
703 | * be more clever to dispatch to appropriate gpu module: | |
704 | */ | |
705 | if (args->pipe != MSM_PIPE_3D0) | |
706 | return -EINVAL; | |
707 | ||
708 | gpu = priv->gpu; | |
709 | ||
710 | if (!gpu) | |
711 | return -ENXIO; | |
712 | ||
713 | return gpu->funcs->get_param(gpu, args->param, &args->value); | |
714 | } | |
715 | ||
716 | static int msm_ioctl_gem_new(struct drm_device *dev, void *data, | |
717 | struct drm_file *file) | |
718 | { | |
719 | struct drm_msm_gem_new *args = data; | |
93ddb0d3 RC |
720 | |
721 | if (args->flags & ~MSM_BO_FLAGS) { | |
722 | DRM_ERROR("invalid flags: %08x\n", args->flags); | |
723 | return -EINVAL; | |
724 | } | |
725 | ||
7198e6b0 | 726 | return msm_gem_new_handle(dev, file, args->size, |
0815d774 | 727 | args->flags, &args->handle, NULL); |
7198e6b0 RC |
728 | } |
729 | ||
56c2da83 RC |
730 | static inline ktime_t to_ktime(struct drm_msm_timespec timeout) |
731 | { | |
732 | return ktime_set(timeout.tv_sec, timeout.tv_nsec); | |
733 | } | |
7198e6b0 RC |
734 | |
735 | static int msm_ioctl_gem_cpu_prep(struct drm_device *dev, void *data, | |
736 | struct drm_file *file) | |
737 | { | |
738 | struct drm_msm_gem_cpu_prep *args = data; | |
739 | struct drm_gem_object *obj; | |
56c2da83 | 740 | ktime_t timeout = to_ktime(args->timeout); |
7198e6b0 RC |
741 | int ret; |
742 | ||
93ddb0d3 RC |
743 | if (args->op & ~MSM_PREP_FLAGS) { |
744 | DRM_ERROR("invalid op: %08x\n", args->op); | |
745 | return -EINVAL; | |
746 | } | |
747 | ||
a8ad0bd8 | 748 | obj = drm_gem_object_lookup(file, args->handle); |
7198e6b0 RC |
749 | if (!obj) |
750 | return -ENOENT; | |
751 | ||
56c2da83 | 752 | ret = msm_gem_cpu_prep(obj, args->op, &timeout); |
7198e6b0 | 753 | |
dc9a9b32 | 754 | drm_gem_object_put_unlocked(obj); |
7198e6b0 RC |
755 | |
756 | return ret; | |
757 | } | |
758 | ||
759 | static int msm_ioctl_gem_cpu_fini(struct drm_device *dev, void *data, | |
760 | struct drm_file *file) | |
761 | { | |
762 | struct drm_msm_gem_cpu_fini *args = data; | |
763 | struct drm_gem_object *obj; | |
764 | int ret; | |
765 | ||
a8ad0bd8 | 766 | obj = drm_gem_object_lookup(file, args->handle); |
7198e6b0 RC |
767 | if (!obj) |
768 | return -ENOENT; | |
769 | ||
770 | ret = msm_gem_cpu_fini(obj); | |
771 | ||
dc9a9b32 | 772 | drm_gem_object_put_unlocked(obj); |
7198e6b0 RC |
773 | |
774 | return ret; | |
775 | } | |
776 | ||
49fd08ba JC |
777 | static int msm_ioctl_gem_info_iova(struct drm_device *dev, |
778 | struct drm_gem_object *obj, uint64_t *iova) | |
779 | { | |
780 | struct msm_drm_private *priv = dev->dev_private; | |
781 | ||
782 | if (!priv->gpu) | |
783 | return -EINVAL; | |
784 | ||
9fe041f6 JC |
785 | /* |
786 | * Don't pin the memory here - just get an address so that userspace can | |
787 | * be productive | |
788 | */ | |
8bdcd949 | 789 | return msm_gem_get_iova(obj, priv->gpu->aspace, iova); |
49fd08ba JC |
790 | } |
791 | ||
7198e6b0 RC |
792 | static int msm_ioctl_gem_info(struct drm_device *dev, void *data, |
793 | struct drm_file *file) | |
794 | { | |
795 | struct drm_msm_gem_info *args = data; | |
796 | struct drm_gem_object *obj; | |
f05c83e7 RC |
797 | struct msm_gem_object *msm_obj; |
798 | int i, ret = 0; | |
7198e6b0 | 799 | |
789d2e5a | 800 | if (args->pad) |
7198e6b0 RC |
801 | return -EINVAL; |
802 | ||
789d2e5a RC |
803 | switch (args->info) { |
804 | case MSM_INFO_GET_OFFSET: | |
805 | case MSM_INFO_GET_IOVA: | |
806 | /* value returned as immediate, not pointer, so len==0: */ | |
807 | if (args->len) | |
808 | return -EINVAL; | |
809 | break; | |
f05c83e7 RC |
810 | case MSM_INFO_SET_NAME: |
811 | case MSM_INFO_GET_NAME: | |
812 | break; | |
789d2e5a | 813 | default: |
7198e6b0 | 814 | return -EINVAL; |
789d2e5a | 815 | } |
7198e6b0 | 816 | |
a8ad0bd8 | 817 | obj = drm_gem_object_lookup(file, args->handle); |
7198e6b0 RC |
818 | if (!obj) |
819 | return -ENOENT; | |
820 | ||
f05c83e7 | 821 | msm_obj = to_msm_bo(obj); |
49fd08ba | 822 | |
789d2e5a RC |
823 | switch (args->info) { |
824 | case MSM_INFO_GET_OFFSET: | |
825 | args->value = msm_gem_mmap_offset(obj); | |
826 | break; | |
827 | case MSM_INFO_GET_IOVA: | |
828 | ret = msm_ioctl_gem_info_iova(dev, obj, &args->value); | |
829 | break; | |
f05c83e7 RC |
830 | case MSM_INFO_SET_NAME: |
831 | /* length check should leave room for terminating null: */ | |
832 | if (args->len >= sizeof(msm_obj->name)) { | |
833 | ret = -EINVAL; | |
834 | break; | |
835 | } | |
7cce8e4e | 836 | if (copy_from_user(msm_obj->name, u64_to_user_ptr(args->value), |
860433ed JC |
837 | args->len)) { |
838 | msm_obj->name[0] = '\0'; | |
7cce8e4e | 839 | ret = -EFAULT; |
860433ed JC |
840 | break; |
841 | } | |
f05c83e7 RC |
842 | msm_obj->name[args->len] = '\0'; |
843 | for (i = 0; i < args->len; i++) { | |
844 | if (!isprint(msm_obj->name[i])) { | |
845 | msm_obj->name[i] = '\0'; | |
846 | break; | |
847 | } | |
848 | } | |
849 | break; | |
850 | case MSM_INFO_GET_NAME: | |
851 | if (args->value && (args->len < strlen(msm_obj->name))) { | |
852 | ret = -EINVAL; | |
853 | break; | |
854 | } | |
855 | args->len = strlen(msm_obj->name); | |
856 | if (args->value) { | |
7cce8e4e DC |
857 | if (copy_to_user(u64_to_user_ptr(args->value), |
858 | msm_obj->name, args->len)) | |
859 | ret = -EFAULT; | |
f05c83e7 RC |
860 | } |
861 | break; | |
49fd08ba | 862 | } |
7198e6b0 | 863 | |
dc9a9b32 | 864 | drm_gem_object_put_unlocked(obj); |
7198e6b0 RC |
865 | |
866 | return ret; | |
867 | } | |
868 | ||
869 | static int msm_ioctl_wait_fence(struct drm_device *dev, void *data, | |
870 | struct drm_file *file) | |
871 | { | |
ca762a8a | 872 | struct msm_drm_private *priv = dev->dev_private; |
7198e6b0 | 873 | struct drm_msm_wait_fence *args = data; |
56c2da83 | 874 | ktime_t timeout = to_ktime(args->timeout); |
f97decac JC |
875 | struct msm_gpu_submitqueue *queue; |
876 | struct msm_gpu *gpu = priv->gpu; | |
877 | int ret; | |
93ddb0d3 RC |
878 | |
879 | if (args->pad) { | |
880 | DRM_ERROR("invalid pad: %08x\n", args->pad); | |
881 | return -EINVAL; | |
882 | } | |
883 | ||
f97decac | 884 | if (!gpu) |
ca762a8a RC |
885 | return 0; |
886 | ||
f97decac JC |
887 | queue = msm_submitqueue_get(file->driver_priv, args->queueid); |
888 | if (!queue) | |
889 | return -ENOENT; | |
890 | ||
891 | ret = msm_wait_fence(gpu->rb[queue->prio]->fctx, args->fence, &timeout, | |
892 | true); | |
893 | ||
894 | msm_submitqueue_put(queue); | |
895 | return ret; | |
7198e6b0 RC |
896 | } |
897 | ||
4cd33c48 RC |
898 | static int msm_ioctl_gem_madvise(struct drm_device *dev, void *data, |
899 | struct drm_file *file) | |
900 | { | |
901 | struct drm_msm_gem_madvise *args = data; | |
902 | struct drm_gem_object *obj; | |
903 | int ret; | |
904 | ||
905 | switch (args->madv) { | |
906 | case MSM_MADV_DONTNEED: | |
907 | case MSM_MADV_WILLNEED: | |
908 | break; | |
909 | default: | |
910 | return -EINVAL; | |
911 | } | |
912 | ||
913 | ret = mutex_lock_interruptible(&dev->struct_mutex); | |
914 | if (ret) | |
915 | return ret; | |
916 | ||
917 | obj = drm_gem_object_lookup(file, args->handle); | |
918 | if (!obj) { | |
919 | ret = -ENOENT; | |
920 | goto unlock; | |
921 | } | |
922 | ||
923 | ret = msm_gem_madvise(obj, args->madv); | |
924 | if (ret >= 0) { | |
925 | args->retained = ret; | |
926 | ret = 0; | |
927 | } | |
928 | ||
dc9a9b32 | 929 | drm_gem_object_put(obj); |
4cd33c48 RC |
930 | |
931 | unlock: | |
932 | mutex_unlock(&dev->struct_mutex); | |
933 | return ret; | |
934 | } | |
935 | ||
f7de1545 JC |
936 | |
937 | static int msm_ioctl_submitqueue_new(struct drm_device *dev, void *data, | |
938 | struct drm_file *file) | |
939 | { | |
940 | struct drm_msm_submitqueue *args = data; | |
941 | ||
942 | if (args->flags & ~MSM_SUBMITQUEUE_FLAGS) | |
943 | return -EINVAL; | |
944 | ||
f97decac | 945 | return msm_submitqueue_create(dev, file->driver_priv, args->prio, |
f7de1545 JC |
946 | args->flags, &args->id); |
947 | } | |
948 | ||
b0fb6604 JC |
949 | static int msm_ioctl_submitqueue_query(struct drm_device *dev, void *data, |
950 | struct drm_file *file) | |
951 | { | |
952 | return msm_submitqueue_query(dev, file->driver_priv, data); | |
953 | } | |
f7de1545 JC |
954 | |
955 | static int msm_ioctl_submitqueue_close(struct drm_device *dev, void *data, | |
956 | struct drm_file *file) | |
957 | { | |
958 | u32 id = *(u32 *) data; | |
959 | ||
960 | return msm_submitqueue_remove(file->driver_priv, id); | |
961 | } | |
962 | ||
7198e6b0 | 963 | static const struct drm_ioctl_desc msm_ioctls[] = { |
34127c7a EV |
964 | DRM_IOCTL_DEF_DRV(MSM_GET_PARAM, msm_ioctl_get_param, DRM_RENDER_ALLOW), |
965 | DRM_IOCTL_DEF_DRV(MSM_GEM_NEW, msm_ioctl_gem_new, DRM_RENDER_ALLOW), | |
966 | DRM_IOCTL_DEF_DRV(MSM_GEM_INFO, msm_ioctl_gem_info, DRM_RENDER_ALLOW), | |
967 | DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_PREP, msm_ioctl_gem_cpu_prep, DRM_RENDER_ALLOW), | |
968 | DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_FINI, msm_ioctl_gem_cpu_fini, DRM_RENDER_ALLOW), | |
969 | DRM_IOCTL_DEF_DRV(MSM_GEM_SUBMIT, msm_ioctl_gem_submit, DRM_RENDER_ALLOW), | |
970 | DRM_IOCTL_DEF_DRV(MSM_WAIT_FENCE, msm_ioctl_wait_fence, DRM_RENDER_ALLOW), | |
971 | DRM_IOCTL_DEF_DRV(MSM_GEM_MADVISE, msm_ioctl_gem_madvise, DRM_RENDER_ALLOW), | |
972 | DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_NEW, msm_ioctl_submitqueue_new, DRM_RENDER_ALLOW), | |
973 | DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_CLOSE, msm_ioctl_submitqueue_close, DRM_RENDER_ALLOW), | |
974 | DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_QUERY, msm_ioctl_submitqueue_query, DRM_RENDER_ALLOW), | |
7198e6b0 RC |
975 | }; |
976 | ||
c8afe684 RC |
977 | static const struct vm_operations_struct vm_ops = { |
978 | .fault = msm_gem_fault, | |
979 | .open = drm_gem_vm_open, | |
980 | .close = drm_gem_vm_close, | |
981 | }; | |
982 | ||
983 | static const struct file_operations fops = { | |
984 | .owner = THIS_MODULE, | |
985 | .open = drm_open, | |
986 | .release = drm_release, | |
987 | .unlocked_ioctl = drm_ioctl, | |
c8afe684 | 988 | .compat_ioctl = drm_compat_ioctl, |
c8afe684 RC |
989 | .poll = drm_poll, |
990 | .read = drm_read, | |
991 | .llseek = no_llseek, | |
992 | .mmap = msm_gem_mmap, | |
993 | }; | |
994 | ||
995 | static struct drm_driver msm_driver = { | |
5b38e747 | 996 | .driver_features = DRIVER_GEM | |
b4b15c86 | 997 | DRIVER_RENDER | |
a5436e1d | 998 | DRIVER_ATOMIC | |
05b84911 | 999 | DRIVER_MODESET, |
7198e6b0 | 1000 | .open = msm_open, |
94df145c | 1001 | .postclose = msm_postclose, |
4ccbc6e5 | 1002 | .lastclose = drm_fb_helper_lastclose, |
c8afe684 RC |
1003 | .irq_handler = msm_irq, |
1004 | .irq_preinstall = msm_irq_preinstall, | |
1005 | .irq_postinstall = msm_irq_postinstall, | |
1006 | .irq_uninstall = msm_irq_uninstall, | |
c8afe684 RC |
1007 | .enable_vblank = msm_enable_vblank, |
1008 | .disable_vblank = msm_disable_vblank, | |
48e7f183 | 1009 | .gem_free_object_unlocked = msm_gem_free_object, |
c8afe684 RC |
1010 | .gem_vm_ops = &vm_ops, |
1011 | .dumb_create = msm_gem_dumb_create, | |
1012 | .dumb_map_offset = msm_gem_dumb_map_offset, | |
05b84911 RC |
1013 | .prime_handle_to_fd = drm_gem_prime_handle_to_fd, |
1014 | .prime_fd_to_handle = drm_gem_prime_fd_to_handle, | |
05b84911 RC |
1015 | .gem_prime_pin = msm_gem_prime_pin, |
1016 | .gem_prime_unpin = msm_gem_prime_unpin, | |
1017 | .gem_prime_get_sg_table = msm_gem_prime_get_sg_table, | |
1018 | .gem_prime_import_sg_table = msm_gem_prime_import_sg_table, | |
1019 | .gem_prime_vmap = msm_gem_prime_vmap, | |
1020 | .gem_prime_vunmap = msm_gem_prime_vunmap, | |
77a147e7 | 1021 | .gem_prime_mmap = msm_gem_prime_mmap, |
c8afe684 RC |
1022 | #ifdef CONFIG_DEBUG_FS |
1023 | .debugfs_init = msm_debugfs_init, | |
c8afe684 | 1024 | #endif |
7198e6b0 | 1025 | .ioctls = msm_ioctls, |
167b606a | 1026 | .num_ioctls = ARRAY_SIZE(msm_ioctls), |
c8afe684 RC |
1027 | .fops = &fops, |
1028 | .name = "msm", | |
1029 | .desc = "MSM Snapdragon DRM", | |
1030 | .date = "20130625", | |
a8d854c1 RC |
1031 | .major = MSM_VERSION_MAJOR, |
1032 | .minor = MSM_VERSION_MINOR, | |
1033 | .patchlevel = MSM_VERSION_PATCHLEVEL, | |
c8afe684 RC |
1034 | }; |
1035 | ||
1036 | #ifdef CONFIG_PM_SLEEP | |
1037 | static int msm_pm_suspend(struct device *dev) | |
1038 | { | |
1039 | struct drm_device *ddev = dev_get_drvdata(dev); | |
ec446d09 | 1040 | struct msm_drm_private *priv = ddev->dev_private; |
c8afe684 | 1041 | |
3750e78c BW |
1042 | if (WARN_ON(priv->pm_state)) |
1043 | drm_atomic_state_put(priv->pm_state); | |
c8afe684 | 1044 | |
ec446d09 DM |
1045 | priv->pm_state = drm_atomic_helper_suspend(ddev); |
1046 | if (IS_ERR(priv->pm_state)) { | |
3750e78c BW |
1047 | int ret = PTR_ERR(priv->pm_state); |
1048 | DRM_ERROR("Failed to suspend dpu, %d\n", ret); | |
1049 | return ret; | |
ec446d09 DM |
1050 | } |
1051 | ||
c8afe684 RC |
1052 | return 0; |
1053 | } | |
1054 | ||
1055 | static int msm_pm_resume(struct device *dev) | |
1056 | { | |
1057 | struct drm_device *ddev = dev_get_drvdata(dev); | |
ec446d09 | 1058 | struct msm_drm_private *priv = ddev->dev_private; |
3750e78c | 1059 | int ret; |
036bfeb3 | 1060 | |
3750e78c BW |
1061 | if (WARN_ON(!priv->pm_state)) |
1062 | return -ENOENT; | |
c8afe684 | 1063 | |
3750e78c BW |
1064 | ret = drm_atomic_helper_resume(ddev, priv->pm_state); |
1065 | if (!ret) | |
1066 | priv->pm_state = NULL; | |
c8afe684 | 1067 | |
3750e78c | 1068 | return ret; |
c8afe684 RC |
1069 | } |
1070 | #endif | |
1071 | ||
774e39ee AT |
1072 | #ifdef CONFIG_PM |
1073 | static int msm_runtime_suspend(struct device *dev) | |
1074 | { | |
1075 | struct drm_device *ddev = dev_get_drvdata(dev); | |
1076 | struct msm_drm_private *priv = ddev->dev_private; | |
bc3220be | 1077 | struct msm_mdss *mdss = priv->mdss; |
774e39ee AT |
1078 | |
1079 | DBG(""); | |
1080 | ||
bc3220be RY |
1081 | if (mdss && mdss->funcs) |
1082 | return mdss->funcs->disable(mdss); | |
774e39ee AT |
1083 | |
1084 | return 0; | |
1085 | } | |
1086 | ||
1087 | static int msm_runtime_resume(struct device *dev) | |
1088 | { | |
1089 | struct drm_device *ddev = dev_get_drvdata(dev); | |
1090 | struct msm_drm_private *priv = ddev->dev_private; | |
bc3220be | 1091 | struct msm_mdss *mdss = priv->mdss; |
774e39ee AT |
1092 | |
1093 | DBG(""); | |
1094 | ||
bc3220be RY |
1095 | if (mdss && mdss->funcs) |
1096 | return mdss->funcs->enable(mdss); | |
774e39ee AT |
1097 | |
1098 | return 0; | |
1099 | } | |
1100 | #endif | |
1101 | ||
c8afe684 RC |
1102 | static const struct dev_pm_ops msm_pm_ops = { |
1103 | SET_SYSTEM_SLEEP_PM_OPS(msm_pm_suspend, msm_pm_resume) | |
774e39ee | 1104 | SET_RUNTIME_PM_OPS(msm_runtime_suspend, msm_runtime_resume, NULL) |
c8afe684 RC |
1105 | }; |
1106 | ||
060530f1 RC |
1107 | /* |
1108 | * Componentized driver support: | |
1109 | */ | |
1110 | ||
e9fbdaf2 AT |
1111 | /* |
1112 | * NOTE: duplication of the same code as exynos or imx (or probably any other). | |
1113 | * so probably some room for some helpers | |
060530f1 RC |
1114 | */ |
1115 | static int compare_of(struct device *dev, void *data) | |
1116 | { | |
1117 | return dev->of_node == data; | |
1118 | } | |
41e69778 | 1119 | |
812070eb AT |
1120 | /* |
1121 | * Identify what components need to be added by parsing what remote-endpoints | |
1122 | * our MDP output ports are connected to. In the case of LVDS on MDP4, there | |
1123 | * is no external component that we need to add since LVDS is within MDP4 | |
1124 | * itself. | |
1125 | */ | |
1126 | static int add_components_mdp(struct device *mdp_dev, | |
1127 | struct component_match **matchptr) | |
1128 | { | |
1129 | struct device_node *np = mdp_dev->of_node; | |
1130 | struct device_node *ep_node; | |
54011e26 AT |
1131 | struct device *master_dev; |
1132 | ||
1133 | /* | |
1134 | * on MDP4 based platforms, the MDP platform device is the component | |
1135 | * master that adds other display interface components to itself. | |
1136 | * | |
1137 | * on MDP5 based platforms, the MDSS platform device is the component | |
1138 | * master that adds MDP5 and other display interface components to | |
1139 | * itself. | |
1140 | */ | |
1141 | if (of_device_is_compatible(np, "qcom,mdp4")) | |
1142 | master_dev = mdp_dev; | |
1143 | else | |
1144 | master_dev = mdp_dev->parent; | |
812070eb AT |
1145 | |
1146 | for_each_endpoint_of_node(np, ep_node) { | |
1147 | struct device_node *intf; | |
1148 | struct of_endpoint ep; | |
1149 | int ret; | |
1150 | ||
1151 | ret = of_graph_parse_endpoint(ep_node, &ep); | |
1152 | if (ret) { | |
6a41da17 | 1153 | DRM_DEV_ERROR(mdp_dev, "unable to parse port endpoint\n"); |
812070eb AT |
1154 | of_node_put(ep_node); |
1155 | return ret; | |
1156 | } | |
1157 | ||
1158 | /* | |
1159 | * The LCDC/LVDS port on MDP4 is a speacial case where the | |
1160 | * remote-endpoint isn't a component that we need to add | |
1161 | */ | |
1162 | if (of_device_is_compatible(np, "qcom,mdp4") && | |
d8dd8052 | 1163 | ep.port == 0) |
812070eb | 1164 | continue; |
812070eb AT |
1165 | |
1166 | /* | |
1167 | * It's okay if some of the ports don't have a remote endpoint | |
1168 | * specified. It just means that the port isn't connected to | |
1169 | * any external interface. | |
1170 | */ | |
1171 | intf = of_graph_get_remote_port_parent(ep_node); | |
d8dd8052 | 1172 | if (!intf) |
812070eb | 1173 | continue; |
812070eb | 1174 | |
d1d9d0e1 DA |
1175 | if (of_device_is_available(intf)) |
1176 | drm_of_component_match_add(master_dev, matchptr, | |
1177 | compare_of, intf); | |
1178 | ||
812070eb | 1179 | of_node_put(intf); |
812070eb AT |
1180 | } |
1181 | ||
1182 | return 0; | |
1183 | } | |
1184 | ||
54011e26 AT |
1185 | static int compare_name_mdp(struct device *dev, void *data) |
1186 | { | |
1187 | return (strstr(dev_name(dev), "mdp") != NULL); | |
1188 | } | |
1189 | ||
7d526fcf AT |
1190 | static int add_display_components(struct device *dev, |
1191 | struct component_match **matchptr) | |
1192 | { | |
54011e26 AT |
1193 | struct device *mdp_dev; |
1194 | int ret; | |
1195 | ||
1196 | /* | |
25fdd593 JS |
1197 | * MDP5/DPU based devices don't have a flat hierarchy. There is a top |
1198 | * level parent: MDSS, and children: MDP5/DPU, DSI, HDMI, eDP etc. | |
1199 | * Populate the children devices, find the MDP5/DPU node, and then add | |
1200 | * the interfaces to our components list. | |
54011e26 | 1201 | */ |
25fdd593 | 1202 | if (of_device_is_compatible(dev->of_node, "qcom,mdss") || |
7bdc0c4b KT |
1203 | of_device_is_compatible(dev->of_node, "qcom,sdm845-mdss") || |
1204 | of_device_is_compatible(dev->of_node, "qcom,sc7180-mdss")) { | |
54011e26 AT |
1205 | ret = of_platform_populate(dev->of_node, NULL, NULL, dev); |
1206 | if (ret) { | |
6a41da17 | 1207 | DRM_DEV_ERROR(dev, "failed to populate children devices\n"); |
54011e26 AT |
1208 | return ret; |
1209 | } | |
1210 | ||
1211 | mdp_dev = device_find_child(dev, NULL, compare_name_mdp); | |
1212 | if (!mdp_dev) { | |
6a41da17 | 1213 | DRM_DEV_ERROR(dev, "failed to find MDSS MDP node\n"); |
54011e26 AT |
1214 | of_platform_depopulate(dev); |
1215 | return -ENODEV; | |
1216 | } | |
1217 | ||
1218 | put_device(mdp_dev); | |
1219 | ||
1220 | /* add the MDP component itself */ | |
97ac0e47 RK |
1221 | drm_of_component_match_add(dev, matchptr, compare_of, |
1222 | mdp_dev->of_node); | |
54011e26 AT |
1223 | } else { |
1224 | /* MDP4 */ | |
1225 | mdp_dev = dev; | |
1226 | } | |
1227 | ||
1228 | ret = add_components_mdp(mdp_dev, matchptr); | |
1229 | if (ret) | |
1230 | of_platform_depopulate(dev); | |
1231 | ||
1232 | return ret; | |
7d526fcf AT |
1233 | } |
1234 | ||
dc3ea265 AT |
1235 | /* |
1236 | * We don't know what's the best binding to link the gpu with the drm device. | |
1237 | * Fow now, we just hunt for all the possible gpus that we support, and add them | |
1238 | * as components. | |
1239 | */ | |
1240 | static const struct of_device_id msm_gpu_match[] = { | |
1db7afa4 | 1241 | { .compatible = "qcom,adreno" }, |
dc3ea265 | 1242 | { .compatible = "qcom,adreno-3xx" }, |
e6f6d63e | 1243 | { .compatible = "amd,imageon" }, |
dc3ea265 AT |
1244 | { .compatible = "qcom,kgsl-3d0" }, |
1245 | { }, | |
1246 | }; | |
1247 | ||
7d526fcf AT |
1248 | static int add_gpu_components(struct device *dev, |
1249 | struct component_match **matchptr) | |
1250 | { | |
dc3ea265 AT |
1251 | struct device_node *np; |
1252 | ||
1253 | np = of_find_matching_node(NULL, msm_gpu_match); | |
1254 | if (!np) | |
1255 | return 0; | |
1256 | ||
9ca7ad6c JH |
1257 | if (of_device_is_available(np)) |
1258 | drm_of_component_match_add(dev, matchptr, compare_of, np); | |
dc3ea265 AT |
1259 | |
1260 | of_node_put(np); | |
1261 | ||
1262 | return 0; | |
7d526fcf AT |
1263 | } |
1264 | ||
84448288 RK |
1265 | static int msm_drm_bind(struct device *dev) |
1266 | { | |
2b669875 | 1267 | return msm_drm_init(dev, &msm_driver); |
84448288 RK |
1268 | } |
1269 | ||
1270 | static void msm_drm_unbind(struct device *dev) | |
1271 | { | |
2b669875 | 1272 | msm_drm_uninit(dev); |
84448288 RK |
1273 | } |
1274 | ||
1275 | static const struct component_master_ops msm_drm_ops = { | |
1276 | .bind = msm_drm_bind, | |
1277 | .unbind = msm_drm_unbind, | |
1278 | }; | |
1279 | ||
1280 | /* | |
1281 | * Platform driver: | |
1282 | */ | |
060530f1 | 1283 | |
84448288 | 1284 | static int msm_pdev_probe(struct platform_device *pdev) |
060530f1 | 1285 | { |
84448288 | 1286 | struct component_match *match = NULL; |
7d526fcf AT |
1287 | int ret; |
1288 | ||
e6f6d63e JM |
1289 | if (get_mdp_ver(pdev)) { |
1290 | ret = add_display_components(&pdev->dev, &match); | |
1291 | if (ret) | |
1292 | return ret; | |
1293 | } | |
e9fbdaf2 | 1294 | |
7d526fcf AT |
1295 | ret = add_gpu_components(&pdev->dev, &match); |
1296 | if (ret) | |
4368a153 | 1297 | goto fail; |
060530f1 | 1298 | |
c83ea576 RC |
1299 | /* on all devices that I am aware of, iommu's which can map |
1300 | * any address the cpu can see are used: | |
1301 | */ | |
1302 | ret = dma_set_mask_and_coherent(&pdev->dev, ~0); | |
1303 | if (ret) | |
4368a153 SP |
1304 | goto fail; |
1305 | ||
1306 | ret = component_master_add_with_match(&pdev->dev, &msm_drm_ops, match); | |
1307 | if (ret) | |
1308 | goto fail; | |
c83ea576 | 1309 | |
4368a153 SP |
1310 | return 0; |
1311 | ||
1312 | fail: | |
1313 | of_platform_depopulate(&pdev->dev); | |
1314 | return ret; | |
c8afe684 RC |
1315 | } |
1316 | ||
1317 | static int msm_pdev_remove(struct platform_device *pdev) | |
1318 | { | |
060530f1 | 1319 | component_master_del(&pdev->dev, &msm_drm_ops); |
54011e26 | 1320 | of_platform_depopulate(&pdev->dev); |
c8afe684 RC |
1321 | |
1322 | return 0; | |
1323 | } | |
1324 | ||
06c0dd96 | 1325 | static const struct of_device_id dt_match[] = { |
aaded2e3 JS |
1326 | { .compatible = "qcom,mdp4", .data = (void *)KMS_MDP4 }, |
1327 | { .compatible = "qcom,mdss", .data = (void *)KMS_MDP5 }, | |
25fdd593 | 1328 | { .compatible = "qcom,sdm845-mdss", .data = (void *)KMS_DPU }, |
7bdc0c4b | 1329 | { .compatible = "qcom,sc7180-mdss", .data = (void *)KMS_DPU }, |
06c0dd96 RC |
1330 | {} |
1331 | }; | |
1332 | MODULE_DEVICE_TABLE(of, dt_match); | |
1333 | ||
c8afe684 RC |
1334 | static struct platform_driver msm_platform_driver = { |
1335 | .probe = msm_pdev_probe, | |
1336 | .remove = msm_pdev_remove, | |
1337 | .driver = { | |
c8afe684 | 1338 | .name = "msm", |
06c0dd96 | 1339 | .of_match_table = dt_match, |
c8afe684 RC |
1340 | .pm = &msm_pm_ops, |
1341 | }, | |
c8afe684 RC |
1342 | }; |
1343 | ||
1344 | static int __init msm_drm_register(void) | |
1345 | { | |
ba4dd718 RC |
1346 | if (!modeset) |
1347 | return -EINVAL; | |
1348 | ||
c8afe684 | 1349 | DBG("init"); |
1dd0a0b1 | 1350 | msm_mdp_register(); |
25fdd593 | 1351 | msm_dpu_register(); |
d5af49c9 | 1352 | msm_dsi_register(); |
00453981 | 1353 | msm_edp_register(); |
fcda50c8 | 1354 | msm_hdmi_register(); |
bfd28b13 | 1355 | adreno_register(); |
c8afe684 RC |
1356 | return platform_driver_register(&msm_platform_driver); |
1357 | } | |
1358 | ||
1359 | static void __exit msm_drm_unregister(void) | |
1360 | { | |
1361 | DBG("fini"); | |
1362 | platform_driver_unregister(&msm_platform_driver); | |
fcda50c8 | 1363 | msm_hdmi_unregister(); |
bfd28b13 | 1364 | adreno_unregister(); |
00453981 | 1365 | msm_edp_unregister(); |
d5af49c9 | 1366 | msm_dsi_unregister(); |
1dd0a0b1 | 1367 | msm_mdp_unregister(); |
25fdd593 | 1368 | msm_dpu_unregister(); |
c8afe684 RC |
1369 | } |
1370 | ||
1371 | module_init(msm_drm_register); | |
1372 | module_exit(msm_drm_unregister); | |
1373 | ||
1374 | MODULE_AUTHOR("Rob Clark <robdclark@gmail.com"); | |
1375 | MODULE_DESCRIPTION("MSM DRM Driver"); | |
1376 | MODULE_LICENSE("GPL"); |