2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
18 #include <drm/drm_of.h>
21 #include "msm_debugfs.h"
22 #include "msm_fence.h"
29 * - 1.0.0 - initial interface
30 * - 1.1.0 - adds madvise, and support for submits with > 4 cmd buffers
31 * - 1.2.0 - adds explicit fence support for submit ioctl
33 #define MSM_VERSION_MAJOR 1
34 #define MSM_VERSION_MINOR 2
35 #define MSM_VERSION_PATCHLEVEL 0
37 static void msm_fb_output_poll_changed(struct drm_device
*dev
)
39 struct msm_drm_private
*priv
= dev
->dev_private
;
41 drm_fb_helper_hotplug_event(priv
->fbdev
);
44 static const struct drm_mode_config_funcs mode_config_funcs
= {
45 .fb_create
= msm_framebuffer_create
,
46 .output_poll_changed
= msm_fb_output_poll_changed
,
47 .atomic_check
= msm_atomic_check
,
48 .atomic_commit
= msm_atomic_commit
,
49 .atomic_state_alloc
= msm_atomic_state_alloc
,
50 .atomic_state_clear
= msm_atomic_state_clear
,
51 .atomic_state_free
= msm_atomic_state_free
,
54 int msm_register_address_space(struct drm_device
*dev
,
55 struct msm_gem_address_space
*aspace
)
57 struct msm_drm_private
*priv
= dev
->dev_private
;
58 int idx
= priv
->num_aspaces
++;
60 if (WARN_ON(idx
>= ARRAY_SIZE(priv
->aspace
)))
63 priv
->aspace
[idx
] = aspace
;
68 #ifdef CONFIG_DRM_MSM_REGISTER_LOGGING
69 static bool reglog
= false;
70 MODULE_PARM_DESC(reglog
, "Enable register read/write logging");
71 module_param(reglog
, bool, 0600);
76 #ifdef CONFIG_DRM_FBDEV_EMULATION
77 static bool fbdev
= true;
78 MODULE_PARM_DESC(fbdev
, "Enable fbdev compat layer");
79 module_param(fbdev
, bool, 0600);
82 static char *vram
= "16m";
83 MODULE_PARM_DESC(vram
, "Configure VRAM size (for devices without IOMMU/GPUMMU)");
84 module_param(vram
, charp
, 0);
86 bool dumpstate
= false;
87 MODULE_PARM_DESC(dumpstate
, "Dump KMS state on errors");
88 module_param(dumpstate
, bool, 0600);
94 struct clk
*msm_clk_get(struct platform_device
*pdev
, const char *name
)
99 clk
= devm_clk_get(&pdev
->dev
, name
);
100 if (!IS_ERR(clk
) || PTR_ERR(clk
) == -EPROBE_DEFER
)
103 snprintf(name2
, sizeof(name2
), "%s_clk", name
);
105 clk
= devm_clk_get(&pdev
->dev
, name2
);
107 dev_warn(&pdev
->dev
, "Using legacy clk name binding. Use "
108 "\"%s\" instead of \"%s\"\n", name
, name2
);
113 void __iomem
*msm_ioremap(struct platform_device
*pdev
, const char *name
,
116 struct resource
*res
;
121 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, name
);
123 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
126 dev_err(&pdev
->dev
, "failed to get memory resource: %s\n", name
);
127 return ERR_PTR(-EINVAL
);
130 size
= resource_size(res
);
132 ptr
= devm_ioremap_nocache(&pdev
->dev
, res
->start
, size
);
134 dev_err(&pdev
->dev
, "failed to ioremap: %s\n", name
);
135 return ERR_PTR(-ENOMEM
);
139 printk(KERN_DEBUG
"IO:region %s %p %08lx\n", dbgname
, ptr
, size
);
144 void msm_writel(u32 data
, void __iomem
*addr
)
147 printk(KERN_DEBUG
"IO:W %p %08x\n", addr
, data
);
151 u32
msm_readl(const void __iomem
*addr
)
153 u32 val
= readl(addr
);
155 printk(KERN_ERR
"IO:R %p %08x\n", addr
, val
);
159 struct vblank_event
{
160 struct list_head node
;
165 static void vblank_ctrl_worker(struct work_struct
*work
)
167 struct msm_vblank_ctrl
*vbl_ctrl
= container_of(work
,
168 struct msm_vblank_ctrl
, work
);
169 struct msm_drm_private
*priv
= container_of(vbl_ctrl
,
170 struct msm_drm_private
, vblank_ctrl
);
171 struct msm_kms
*kms
= priv
->kms
;
172 struct vblank_event
*vbl_ev
, *tmp
;
175 spin_lock_irqsave(&vbl_ctrl
->lock
, flags
);
176 list_for_each_entry_safe(vbl_ev
, tmp
, &vbl_ctrl
->event_list
, node
) {
177 list_del(&vbl_ev
->node
);
178 spin_unlock_irqrestore(&vbl_ctrl
->lock
, flags
);
181 kms
->funcs
->enable_vblank(kms
,
182 priv
->crtcs
[vbl_ev
->crtc_id
]);
184 kms
->funcs
->disable_vblank(kms
,
185 priv
->crtcs
[vbl_ev
->crtc_id
]);
189 spin_lock_irqsave(&vbl_ctrl
->lock
, flags
);
192 spin_unlock_irqrestore(&vbl_ctrl
->lock
, flags
);
195 static int vblank_ctrl_queue_work(struct msm_drm_private
*priv
,
196 int crtc_id
, bool enable
)
198 struct msm_vblank_ctrl
*vbl_ctrl
= &priv
->vblank_ctrl
;
199 struct vblank_event
*vbl_ev
;
202 vbl_ev
= kzalloc(sizeof(*vbl_ev
), GFP_ATOMIC
);
206 vbl_ev
->crtc_id
= crtc_id
;
207 vbl_ev
->enable
= enable
;
209 spin_lock_irqsave(&vbl_ctrl
->lock
, flags
);
210 list_add_tail(&vbl_ev
->node
, &vbl_ctrl
->event_list
);
211 spin_unlock_irqrestore(&vbl_ctrl
->lock
, flags
);
213 queue_work(priv
->wq
, &vbl_ctrl
->work
);
218 static int msm_drm_uninit(struct device
*dev
)
220 struct platform_device
*pdev
= to_platform_device(dev
);
221 struct drm_device
*ddev
= platform_get_drvdata(pdev
);
222 struct msm_drm_private
*priv
= ddev
->dev_private
;
223 struct msm_kms
*kms
= priv
->kms
;
224 struct msm_gpu
*gpu
= priv
->gpu
;
225 struct msm_vblank_ctrl
*vbl_ctrl
= &priv
->vblank_ctrl
;
226 struct vblank_event
*vbl_ev
, *tmp
;
228 /* We must cancel and cleanup any pending vblank enable/disable
229 * work before drm_irq_uninstall() to avoid work re-enabling an
230 * irq after uninstall has disabled it.
232 cancel_work_sync(&vbl_ctrl
->work
);
233 list_for_each_entry_safe(vbl_ev
, tmp
, &vbl_ctrl
->event_list
, node
) {
234 list_del(&vbl_ev
->node
);
238 msm_gem_shrinker_cleanup(ddev
);
240 drm_kms_helper_poll_fini(ddev
);
242 drm_dev_unregister(ddev
);
244 #ifdef CONFIG_DRM_FBDEV_EMULATION
245 if (fbdev
&& priv
->fbdev
)
246 msm_fbdev_free(ddev
);
248 drm_mode_config_cleanup(ddev
);
250 pm_runtime_get_sync(dev
);
251 drm_irq_uninstall(ddev
);
252 pm_runtime_put_sync(dev
);
254 flush_workqueue(priv
->wq
);
255 destroy_workqueue(priv
->wq
);
257 flush_workqueue(priv
->atomic_wq
);
258 destroy_workqueue(priv
->atomic_wq
);
260 if (kms
&& kms
->funcs
)
261 kms
->funcs
->destroy(kms
);
264 mutex_lock(&ddev
->struct_mutex
);
265 gpu
->funcs
->pm_suspend(gpu
);
266 mutex_unlock(&ddev
->struct_mutex
);
267 gpu
->funcs
->destroy(gpu
);
270 if (priv
->vram
.paddr
) {
271 unsigned long attrs
= DMA_ATTR_NO_KERNEL_MAPPING
;
272 drm_mm_takedown(&priv
->vram
.mm
);
273 dma_free_attrs(dev
, priv
->vram
.size
, NULL
,
274 priv
->vram
.paddr
, attrs
);
277 component_unbind_all(dev
, ddev
);
279 msm_mdss_destroy(ddev
);
281 ddev
->dev_private
= NULL
;
289 static int get_mdp_ver(struct platform_device
*pdev
)
291 struct device
*dev
= &pdev
->dev
;
293 return (int) (unsigned long) of_device_get_match_data(dev
);
296 #include <linux/of_address.h>
298 static int msm_init_vram(struct drm_device
*dev
)
300 struct msm_drm_private
*priv
= dev
->dev_private
;
301 struct device_node
*node
;
302 unsigned long size
= 0;
305 /* In the device-tree world, we could have a 'memory-region'
306 * phandle, which gives us a link to our "vram". Allocating
307 * is all nicely abstracted behind the dma api, but we need
308 * to know the entire size to allocate it all in one go. There
310 * 1) device with no IOMMU, in which case we need exclusive
311 * access to a VRAM carveout big enough for all gpu
313 * 2) device with IOMMU, but where the bootloader puts up
314 * a splash screen. In this case, the VRAM carveout
315 * need only be large enough for fbdev fb. But we need
316 * exclusive access to the buffer to avoid the kernel
317 * using those pages for other purposes (which appears
318 * as corruption on screen before we have a chance to
319 * load and do initial modeset)
322 node
= of_parse_phandle(dev
->dev
->of_node
, "memory-region", 0);
325 ret
= of_address_to_resource(node
, 0, &r
);
329 size
= r
.end
- r
.start
;
330 DRM_INFO("using VRAM carveout: %lx@%pa\n", size
, &r
.start
);
332 /* if we have no IOMMU, then we need to use carveout allocator.
333 * Grab the entire CMA chunk carved out in early startup in
336 } else if (!iommu_present(&platform_bus_type
)) {
337 DRM_INFO("using %s VRAM carveout\n", vram
);
338 size
= memparse(vram
, NULL
);
342 unsigned long attrs
= 0;
345 priv
->vram
.size
= size
;
347 drm_mm_init(&priv
->vram
.mm
, 0, (size
>> PAGE_SHIFT
) - 1);
349 attrs
|= DMA_ATTR_NO_KERNEL_MAPPING
;
350 attrs
|= DMA_ATTR_WRITE_COMBINE
;
352 /* note that for no-kernel-mapping, the vaddr returned
353 * is bogus, but non-null if allocation succeeded:
355 p
= dma_alloc_attrs(dev
->dev
, size
,
356 &priv
->vram
.paddr
, GFP_KERNEL
, attrs
);
358 dev_err(dev
->dev
, "failed to allocate VRAM\n");
359 priv
->vram
.paddr
= 0;
363 dev_info(dev
->dev
, "VRAM: %08x->%08x\n",
364 (uint32_t)priv
->vram
.paddr
,
365 (uint32_t)(priv
->vram
.paddr
+ size
));
371 static int msm_drm_init(struct device
*dev
, struct drm_driver
*drv
)
373 struct platform_device
*pdev
= to_platform_device(dev
);
374 struct drm_device
*ddev
;
375 struct msm_drm_private
*priv
;
379 ddev
= drm_dev_alloc(drv
, dev
);
381 dev_err(dev
, "failed to allocate drm_device\n");
382 return PTR_ERR(ddev
);
385 platform_set_drvdata(pdev
, ddev
);
386 ddev
->platformdev
= pdev
;
388 priv
= kzalloc(sizeof(*priv
), GFP_KERNEL
);
394 ddev
->dev_private
= priv
;
397 ret
= msm_mdss_init(ddev
);
404 priv
->wq
= alloc_ordered_workqueue("msm", 0);
405 priv
->atomic_wq
= alloc_ordered_workqueue("msm:atomic", 0);
406 init_waitqueue_head(&priv
->pending_crtcs_event
);
408 INIT_LIST_HEAD(&priv
->inactive_list
);
409 INIT_LIST_HEAD(&priv
->vblank_ctrl
.event_list
);
410 INIT_WORK(&priv
->vblank_ctrl
.work
, vblank_ctrl_worker
);
411 spin_lock_init(&priv
->vblank_ctrl
.lock
);
413 drm_mode_config_init(ddev
);
415 /* Bind all our sub-components: */
416 ret
= component_bind_all(dev
, ddev
);
418 msm_mdss_destroy(ddev
);
424 ret
= msm_init_vram(ddev
);
428 msm_gem_shrinker_init(ddev
);
430 switch (get_mdp_ver(pdev
)) {
432 kms
= mdp4_kms_init(ddev
);
436 kms
= mdp5_kms_init(ddev
);
439 kms
= ERR_PTR(-ENODEV
);
445 * NOTE: once we have GPU support, having no kms should not
446 * be considered fatal.. ideally we would still support gpu
447 * and (for example) use dmabuf/prime to share buffers with
448 * imx drm driver on iMX5
450 dev_err(dev
, "failed to load kms\n");
456 ret
= kms
->funcs
->hw_init(kms
);
458 dev_err(dev
, "kms hw init failed: %d\n", ret
);
463 ddev
->mode_config
.funcs
= &mode_config_funcs
;
465 ret
= drm_vblank_init(ddev
, priv
->num_crtcs
);
467 dev_err(dev
, "failed to initialize vblank\n");
472 pm_runtime_get_sync(dev
);
473 ret
= drm_irq_install(ddev
, kms
->irq
);
474 pm_runtime_put_sync(dev
);
476 dev_err(dev
, "failed to install IRQ handler\n");
481 ret
= drm_dev_register(ddev
, 0);
485 drm_mode_config_reset(ddev
);
487 #ifdef CONFIG_DRM_FBDEV_EMULATION
489 priv
->fbdev
= msm_fbdev_init(ddev
);
492 ret
= msm_debugfs_late_init(ddev
);
496 drm_kms_helper_poll_init(ddev
);
509 static void load_gpu(struct drm_device
*dev
)
511 static DEFINE_MUTEX(init_lock
);
512 struct msm_drm_private
*priv
= dev
->dev_private
;
514 mutex_lock(&init_lock
);
517 priv
->gpu
= adreno_load_gpu(dev
);
519 mutex_unlock(&init_lock
);
522 static int msm_open(struct drm_device
*dev
, struct drm_file
*file
)
524 struct msm_file_private
*ctx
;
526 /* For now, load gpu on open.. to avoid the requirement of having
527 * firmware in the initrd.
531 ctx
= kzalloc(sizeof(*ctx
), GFP_KERNEL
);
535 file
->driver_priv
= ctx
;
540 static void msm_preclose(struct drm_device
*dev
, struct drm_file
*file
)
542 struct msm_drm_private
*priv
= dev
->dev_private
;
543 struct msm_file_private
*ctx
= file
->driver_priv
;
545 mutex_lock(&dev
->struct_mutex
);
546 if (ctx
== priv
->lastctx
)
547 priv
->lastctx
= NULL
;
548 mutex_unlock(&dev
->struct_mutex
);
553 static void msm_lastclose(struct drm_device
*dev
)
555 struct msm_drm_private
*priv
= dev
->dev_private
;
557 drm_fb_helper_restore_fbdev_mode_unlocked(priv
->fbdev
);
560 static irqreturn_t
msm_irq(int irq
, void *arg
)
562 struct drm_device
*dev
= arg
;
563 struct msm_drm_private
*priv
= dev
->dev_private
;
564 struct msm_kms
*kms
= priv
->kms
;
566 return kms
->funcs
->irq(kms
);
569 static void msm_irq_preinstall(struct drm_device
*dev
)
571 struct msm_drm_private
*priv
= dev
->dev_private
;
572 struct msm_kms
*kms
= priv
->kms
;
574 kms
->funcs
->irq_preinstall(kms
);
577 static int msm_irq_postinstall(struct drm_device
*dev
)
579 struct msm_drm_private
*priv
= dev
->dev_private
;
580 struct msm_kms
*kms
= priv
->kms
;
582 return kms
->funcs
->irq_postinstall(kms
);
585 static void msm_irq_uninstall(struct drm_device
*dev
)
587 struct msm_drm_private
*priv
= dev
->dev_private
;
588 struct msm_kms
*kms
= priv
->kms
;
590 kms
->funcs
->irq_uninstall(kms
);
593 static int msm_enable_vblank(struct drm_device
*dev
, unsigned int pipe
)
595 struct msm_drm_private
*priv
= dev
->dev_private
;
596 struct msm_kms
*kms
= priv
->kms
;
599 DBG("dev=%p, crtc=%u", dev
, pipe
);
600 return vblank_ctrl_queue_work(priv
, pipe
, true);
603 static void msm_disable_vblank(struct drm_device
*dev
, unsigned int pipe
)
605 struct msm_drm_private
*priv
= dev
->dev_private
;
606 struct msm_kms
*kms
= priv
->kms
;
609 DBG("dev=%p, crtc=%u", dev
, pipe
);
610 vblank_ctrl_queue_work(priv
, pipe
, false);
617 static int msm_ioctl_get_param(struct drm_device
*dev
, void *data
,
618 struct drm_file
*file
)
620 struct msm_drm_private
*priv
= dev
->dev_private
;
621 struct drm_msm_param
*args
= data
;
624 /* for now, we just have 3d pipe.. eventually this would need to
625 * be more clever to dispatch to appropriate gpu module:
627 if (args
->pipe
!= MSM_PIPE_3D0
)
635 return gpu
->funcs
->get_param(gpu
, args
->param
, &args
->value
);
638 static int msm_ioctl_gem_new(struct drm_device
*dev
, void *data
,
639 struct drm_file
*file
)
641 struct drm_msm_gem_new
*args
= data
;
643 if (args
->flags
& ~MSM_BO_FLAGS
) {
644 DRM_ERROR("invalid flags: %08x\n", args
->flags
);
648 return msm_gem_new_handle(dev
, file
, args
->size
,
649 args
->flags
, &args
->handle
);
652 static inline ktime_t
to_ktime(struct drm_msm_timespec timeout
)
654 return ktime_set(timeout
.tv_sec
, timeout
.tv_nsec
);
657 static int msm_ioctl_gem_cpu_prep(struct drm_device
*dev
, void *data
,
658 struct drm_file
*file
)
660 struct drm_msm_gem_cpu_prep
*args
= data
;
661 struct drm_gem_object
*obj
;
662 ktime_t timeout
= to_ktime(args
->timeout
);
665 if (args
->op
& ~MSM_PREP_FLAGS
) {
666 DRM_ERROR("invalid op: %08x\n", args
->op
);
670 obj
= drm_gem_object_lookup(file
, args
->handle
);
674 ret
= msm_gem_cpu_prep(obj
, args
->op
, &timeout
);
676 drm_gem_object_unreference_unlocked(obj
);
681 static int msm_ioctl_gem_cpu_fini(struct drm_device
*dev
, void *data
,
682 struct drm_file
*file
)
684 struct drm_msm_gem_cpu_fini
*args
= data
;
685 struct drm_gem_object
*obj
;
688 obj
= drm_gem_object_lookup(file
, args
->handle
);
692 ret
= msm_gem_cpu_fini(obj
);
694 drm_gem_object_unreference_unlocked(obj
);
699 static int msm_ioctl_gem_info(struct drm_device
*dev
, void *data
,
700 struct drm_file
*file
)
702 struct drm_msm_gem_info
*args
= data
;
703 struct drm_gem_object
*obj
;
709 obj
= drm_gem_object_lookup(file
, args
->handle
);
713 args
->offset
= msm_gem_mmap_offset(obj
);
715 drm_gem_object_unreference_unlocked(obj
);
720 static int msm_ioctl_wait_fence(struct drm_device
*dev
, void *data
,
721 struct drm_file
*file
)
723 struct msm_drm_private
*priv
= dev
->dev_private
;
724 struct drm_msm_wait_fence
*args
= data
;
725 ktime_t timeout
= to_ktime(args
->timeout
);
728 DRM_ERROR("invalid pad: %08x\n", args
->pad
);
735 return msm_wait_fence(priv
->gpu
->fctx
, args
->fence
, &timeout
, true);
738 static int msm_ioctl_gem_madvise(struct drm_device
*dev
, void *data
,
739 struct drm_file
*file
)
741 struct drm_msm_gem_madvise
*args
= data
;
742 struct drm_gem_object
*obj
;
745 switch (args
->madv
) {
746 case MSM_MADV_DONTNEED
:
747 case MSM_MADV_WILLNEED
:
753 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
757 obj
= drm_gem_object_lookup(file
, args
->handle
);
763 ret
= msm_gem_madvise(obj
, args
->madv
);
765 args
->retained
= ret
;
769 drm_gem_object_unreference(obj
);
772 mutex_unlock(&dev
->struct_mutex
);
776 static const struct drm_ioctl_desc msm_ioctls
[] = {
777 DRM_IOCTL_DEF_DRV(MSM_GET_PARAM
, msm_ioctl_get_param
, DRM_AUTH
|DRM_RENDER_ALLOW
),
778 DRM_IOCTL_DEF_DRV(MSM_GEM_NEW
, msm_ioctl_gem_new
, DRM_AUTH
|DRM_RENDER_ALLOW
),
779 DRM_IOCTL_DEF_DRV(MSM_GEM_INFO
, msm_ioctl_gem_info
, DRM_AUTH
|DRM_RENDER_ALLOW
),
780 DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_PREP
, msm_ioctl_gem_cpu_prep
, DRM_AUTH
|DRM_RENDER_ALLOW
),
781 DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_FINI
, msm_ioctl_gem_cpu_fini
, DRM_AUTH
|DRM_RENDER_ALLOW
),
782 DRM_IOCTL_DEF_DRV(MSM_GEM_SUBMIT
, msm_ioctl_gem_submit
, DRM_AUTH
|DRM_RENDER_ALLOW
),
783 DRM_IOCTL_DEF_DRV(MSM_WAIT_FENCE
, msm_ioctl_wait_fence
, DRM_AUTH
|DRM_RENDER_ALLOW
),
784 DRM_IOCTL_DEF_DRV(MSM_GEM_MADVISE
, msm_ioctl_gem_madvise
, DRM_AUTH
|DRM_RENDER_ALLOW
),
787 static const struct vm_operations_struct vm_ops
= {
788 .fault
= msm_gem_fault
,
789 .open
= drm_gem_vm_open
,
790 .close
= drm_gem_vm_close
,
793 static const struct file_operations fops
= {
794 .owner
= THIS_MODULE
,
796 .release
= drm_release
,
797 .unlocked_ioctl
= drm_ioctl
,
798 .compat_ioctl
= drm_compat_ioctl
,
802 .mmap
= msm_gem_mmap
,
805 static struct drm_driver msm_driver
= {
806 .driver_features
= DRIVER_HAVE_IRQ
|
813 .preclose
= msm_preclose
,
814 .lastclose
= msm_lastclose
,
815 .irq_handler
= msm_irq
,
816 .irq_preinstall
= msm_irq_preinstall
,
817 .irq_postinstall
= msm_irq_postinstall
,
818 .irq_uninstall
= msm_irq_uninstall
,
819 .enable_vblank
= msm_enable_vblank
,
820 .disable_vblank
= msm_disable_vblank
,
821 .gem_free_object
= msm_gem_free_object
,
822 .gem_vm_ops
= &vm_ops
,
823 .dumb_create
= msm_gem_dumb_create
,
824 .dumb_map_offset
= msm_gem_dumb_map_offset
,
825 .dumb_destroy
= drm_gem_dumb_destroy
,
826 .prime_handle_to_fd
= drm_gem_prime_handle_to_fd
,
827 .prime_fd_to_handle
= drm_gem_prime_fd_to_handle
,
828 .gem_prime_export
= drm_gem_prime_export
,
829 .gem_prime_import
= drm_gem_prime_import
,
830 .gem_prime_pin
= msm_gem_prime_pin
,
831 .gem_prime_unpin
= msm_gem_prime_unpin
,
832 .gem_prime_get_sg_table
= msm_gem_prime_get_sg_table
,
833 .gem_prime_import_sg_table
= msm_gem_prime_import_sg_table
,
834 .gem_prime_vmap
= msm_gem_prime_vmap
,
835 .gem_prime_vunmap
= msm_gem_prime_vunmap
,
836 .gem_prime_mmap
= msm_gem_prime_mmap
,
837 #ifdef CONFIG_DEBUG_FS
838 .debugfs_init
= msm_debugfs_init
,
839 .debugfs_cleanup
= msm_debugfs_cleanup
,
841 .ioctls
= msm_ioctls
,
842 .num_ioctls
= DRM_MSM_NUM_IOCTLS
,
845 .desc
= "MSM Snapdragon DRM",
847 .major
= MSM_VERSION_MAJOR
,
848 .minor
= MSM_VERSION_MINOR
,
849 .patchlevel
= MSM_VERSION_PATCHLEVEL
,
852 #ifdef CONFIG_PM_SLEEP
853 static int msm_pm_suspend(struct device
*dev
)
855 struct drm_device
*ddev
= dev_get_drvdata(dev
);
857 drm_kms_helper_poll_disable(ddev
);
862 static int msm_pm_resume(struct device
*dev
)
864 struct drm_device
*ddev
= dev_get_drvdata(dev
);
866 drm_kms_helper_poll_enable(ddev
);
872 static const struct dev_pm_ops msm_pm_ops
= {
873 SET_SYSTEM_SLEEP_PM_OPS(msm_pm_suspend
, msm_pm_resume
)
877 * Componentized driver support:
881 * NOTE: duplication of the same code as exynos or imx (or probably any other).
882 * so probably some room for some helpers
884 static int compare_of(struct device
*dev
, void *data
)
886 return dev
->of_node
== data
;
890 * Identify what components need to be added by parsing what remote-endpoints
891 * our MDP output ports are connected to. In the case of LVDS on MDP4, there
892 * is no external component that we need to add since LVDS is within MDP4
895 static int add_components_mdp(struct device
*mdp_dev
,
896 struct component_match
**matchptr
)
898 struct device_node
*np
= mdp_dev
->of_node
;
899 struct device_node
*ep_node
;
900 struct device
*master_dev
;
903 * on MDP4 based platforms, the MDP platform device is the component
904 * master that adds other display interface components to itself.
906 * on MDP5 based platforms, the MDSS platform device is the component
907 * master that adds MDP5 and other display interface components to
910 if (of_device_is_compatible(np
, "qcom,mdp4"))
911 master_dev
= mdp_dev
;
913 master_dev
= mdp_dev
->parent
;
915 for_each_endpoint_of_node(np
, ep_node
) {
916 struct device_node
*intf
;
917 struct of_endpoint ep
;
920 ret
= of_graph_parse_endpoint(ep_node
, &ep
);
922 dev_err(mdp_dev
, "unable to parse port endpoint\n");
923 of_node_put(ep_node
);
928 * The LCDC/LVDS port on MDP4 is a speacial case where the
929 * remote-endpoint isn't a component that we need to add
931 if (of_device_is_compatible(np
, "qcom,mdp4") &&
936 * It's okay if some of the ports don't have a remote endpoint
937 * specified. It just means that the port isn't connected to
938 * any external interface.
940 intf
= of_graph_get_remote_port_parent(ep_node
);
944 drm_of_component_match_add(master_dev
, matchptr
, compare_of
,
952 static int compare_name_mdp(struct device
*dev
, void *data
)
954 return (strstr(dev_name(dev
), "mdp") != NULL
);
957 static int add_display_components(struct device
*dev
,
958 struct component_match
**matchptr
)
960 struct device
*mdp_dev
;
964 * MDP5 based devices don't have a flat hierarchy. There is a top level
965 * parent: MDSS, and children: MDP5, DSI, HDMI, eDP etc. Populate the
966 * children devices, find the MDP5 node, and then add the interfaces
967 * to our components list.
969 if (of_device_is_compatible(dev
->of_node
, "qcom,mdss")) {
970 ret
= of_platform_populate(dev
->of_node
, NULL
, NULL
, dev
);
972 dev_err(dev
, "failed to populate children devices\n");
976 mdp_dev
= device_find_child(dev
, NULL
, compare_name_mdp
);
978 dev_err(dev
, "failed to find MDSS MDP node\n");
979 of_platform_depopulate(dev
);
985 /* add the MDP component itself */
986 drm_of_component_match_add(dev
, matchptr
, compare_of
,
993 ret
= add_components_mdp(mdp_dev
, matchptr
);
995 of_platform_depopulate(dev
);
1001 * We don't know what's the best binding to link the gpu with the drm device.
1002 * Fow now, we just hunt for all the possible gpus that we support, and add them
1005 static const struct of_device_id msm_gpu_match
[] = {
1006 { .compatible
= "qcom,adreno" },
1007 { .compatible
= "qcom,adreno-3xx" },
1008 { .compatible
= "qcom,kgsl-3d0" },
1012 static int add_gpu_components(struct device
*dev
,
1013 struct component_match
**matchptr
)
1015 struct device_node
*np
;
1017 np
= of_find_matching_node(NULL
, msm_gpu_match
);
1021 drm_of_component_match_add(dev
, matchptr
, compare_of
, np
);
1028 static int msm_drm_bind(struct device
*dev
)
1030 return msm_drm_init(dev
, &msm_driver
);
1033 static void msm_drm_unbind(struct device
*dev
)
1035 msm_drm_uninit(dev
);
1038 static const struct component_master_ops msm_drm_ops
= {
1039 .bind
= msm_drm_bind
,
1040 .unbind
= msm_drm_unbind
,
1047 static int msm_pdev_probe(struct platform_device
*pdev
)
1049 struct component_match
*match
= NULL
;
1052 ret
= add_display_components(&pdev
->dev
, &match
);
1056 ret
= add_gpu_components(&pdev
->dev
, &match
);
1060 /* on all devices that I am aware of, iommu's which can map
1061 * any address the cpu can see are used:
1063 ret
= dma_set_mask_and_coherent(&pdev
->dev
, ~0);
1067 return component_master_add_with_match(&pdev
->dev
, &msm_drm_ops
, match
);
1070 static int msm_pdev_remove(struct platform_device
*pdev
)
1072 component_master_del(&pdev
->dev
, &msm_drm_ops
);
1073 of_platform_depopulate(&pdev
->dev
);
1078 static const struct of_device_id dt_match
[] = {
1079 { .compatible
= "qcom,mdp4", .data
= (void *)4 }, /* MDP4 */
1080 { .compatible
= "qcom,mdss", .data
= (void *)5 }, /* MDP5 MDSS */
1083 MODULE_DEVICE_TABLE(of
, dt_match
);
1085 static struct platform_driver msm_platform_driver
= {
1086 .probe
= msm_pdev_probe
,
1087 .remove
= msm_pdev_remove
,
1090 .of_match_table
= dt_match
,
1095 static int __init
msm_drm_register(void)
1101 msm_hdmi_register();
1103 return platform_driver_register(&msm_platform_driver
);
1106 static void __exit
msm_drm_unregister(void)
1109 platform_driver_unregister(&msm_platform_driver
);
1110 msm_hdmi_unregister();
1111 adreno_unregister();
1112 msm_edp_unregister();
1113 msm_dsi_unregister();
1114 msm_mdp_unregister();
1117 module_init(msm_drm_register
);
1118 module_exit(msm_drm_unregister
);
1120 MODULE_AUTHOR("Rob Clark <robdclark@gmail.com");
1121 MODULE_DESCRIPTION("MSM DRM Driver");
1122 MODULE_LICENSE("GPL");