2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
21 static void msm_fb_output_poll_changed(struct drm_device
*dev
)
23 struct msm_drm_private
*priv
= dev
->dev_private
;
25 drm_fb_helper_hotplug_event(priv
->fbdev
);
28 static const struct drm_mode_config_funcs mode_config_funcs
= {
29 .fb_create
= msm_framebuffer_create
,
30 .output_poll_changed
= msm_fb_output_poll_changed
,
33 static int msm_fault_handler(struct iommu_domain
*iommu
, struct device
*dev
,
34 unsigned long iova
, int flags
, void *arg
)
36 DBG("*** fault: iova=%08lx, flags=%d", iova
, flags
);
40 int msm_register_iommu(struct drm_device
*dev
, struct iommu_domain
*iommu
)
42 struct msm_drm_private
*priv
= dev
->dev_private
;
43 int idx
= priv
->num_iommus
++;
45 if (WARN_ON(idx
>= ARRAY_SIZE(priv
->iommus
)))
48 priv
->iommus
[idx
] = iommu
;
50 iommu_set_fault_handler(iommu
, msm_fault_handler
, dev
);
52 /* need to iommu_attach_device() somewhere?? on resume?? */
57 int msm_iommu_attach(struct drm_device
*dev
, struct iommu_domain
*iommu
,
58 const char **names
, int cnt
)
62 for (i
= 0; i
< cnt
; i
++) {
63 /* TODO maybe some day msm iommu won't require this hack: */
64 struct device
*msm_iommu_get_ctx(const char *ctx_name
);
65 struct device
*ctx
= msm_iommu_get_ctx(names
[i
]);
68 ret
= iommu_attach_device(iommu
, ctx
);
70 dev_warn(dev
->dev
, "could not attach iommu to %s", names
[i
]);
77 #ifdef CONFIG_DRM_MSM_REGISTER_LOGGING
78 static bool reglog
= false;
79 MODULE_PARM_DESC(reglog
, "Enable register read/write logging");
80 module_param(reglog
, bool, 0600);
85 void __iomem
*msm_ioremap(struct platform_device
*pdev
, const char *name
,
93 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, name
);
95 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
98 dev_err(&pdev
->dev
, "failed to get memory resource: %s\n", name
);
99 return ERR_PTR(-EINVAL
);
102 size
= resource_size(res
);
104 ptr
= devm_ioremap_nocache(&pdev
->dev
, res
->start
, size
);
106 dev_err(&pdev
->dev
, "failed to ioremap: %s\n", name
);
107 return ERR_PTR(-ENOMEM
);
111 printk(KERN_DEBUG
"IO:region %s %08x %08lx\n", dbgname
, (u32
)ptr
, size
);
116 void msm_writel(u32 data
, void __iomem
*addr
)
119 printk(KERN_DEBUG
"IO:W %08x %08x\n", (u32
)addr
, data
);
123 u32
msm_readl(const void __iomem
*addr
)
125 u32 val
= readl(addr
);
127 printk(KERN_ERR
"IO:R %08x %08x\n", (u32
)addr
, val
);
135 static int msm_unload(struct drm_device
*dev
)
137 struct msm_drm_private
*priv
= dev
->dev_private
;
138 struct msm_kms
*kms
= priv
->kms
;
139 struct msm_gpu
*gpu
= priv
->gpu
;
141 drm_kms_helper_poll_fini(dev
);
142 drm_mode_config_cleanup(dev
);
143 drm_vblank_cleanup(dev
);
145 pm_runtime_get_sync(dev
->dev
);
146 drm_irq_uninstall(dev
);
147 pm_runtime_put_sync(dev
->dev
);
149 flush_workqueue(priv
->wq
);
150 destroy_workqueue(priv
->wq
);
153 pm_runtime_disable(dev
->dev
);
154 kms
->funcs
->destroy(kms
);
158 mutex_lock(&dev
->struct_mutex
);
159 gpu
->funcs
->pm_suspend(gpu
);
160 gpu
->funcs
->destroy(gpu
);
161 mutex_unlock(&dev
->struct_mutex
);
164 dev
->dev_private
= NULL
;
171 static int msm_load(struct drm_device
*dev
, unsigned long flags
)
173 struct platform_device
*pdev
= dev
->platformdev
;
174 struct msm_drm_private
*priv
;
178 priv
= kzalloc(sizeof(*priv
), GFP_KERNEL
);
180 dev_err(dev
->dev
, "failed to allocate private data\n");
184 dev
->dev_private
= priv
;
186 priv
->wq
= alloc_ordered_workqueue("msm", 0);
187 init_waitqueue_head(&priv
->fence_event
);
189 INIT_LIST_HEAD(&priv
->inactive_list
);
190 INIT_LIST_HEAD(&priv
->fence_cbs
);
192 drm_mode_config_init(dev
);
194 kms
= mdp4_kms_init(dev
);
197 * NOTE: once we have GPU support, having no kms should not
198 * be considered fatal.. ideally we would still support gpu
199 * and (for example) use dmabuf/prime to share buffers with
200 * imx drm driver on iMX5
202 dev_err(dev
->dev
, "failed to load kms\n");
210 pm_runtime_enable(dev
->dev
);
211 ret
= kms
->funcs
->hw_init(kms
);
213 dev_err(dev
->dev
, "kms hw init failed: %d\n", ret
);
218 dev
->mode_config
.min_width
= 0;
219 dev
->mode_config
.min_height
= 0;
220 dev
->mode_config
.max_width
= 2048;
221 dev
->mode_config
.max_height
= 2048;
222 dev
->mode_config
.funcs
= &mode_config_funcs
;
224 ret
= drm_vblank_init(dev
, 1);
226 dev_err(dev
->dev
, "failed to initialize vblank\n");
230 pm_runtime_get_sync(dev
->dev
);
231 ret
= drm_irq_install(dev
);
232 pm_runtime_put_sync(dev
->dev
);
234 dev_err(dev
->dev
, "failed to install IRQ handler\n");
238 platform_set_drvdata(pdev
, dev
);
240 #ifdef CONFIG_DRM_MSM_FBDEV
241 priv
->fbdev
= msm_fbdev_init(dev
);
244 drm_kms_helper_poll_init(dev
);
253 static void load_gpu(struct drm_device
*dev
)
255 struct msm_drm_private
*priv
= dev
->dev_private
;
261 mutex_lock(&dev
->struct_mutex
);
262 gpu
= a3xx_gpu_init(dev
);
264 dev_warn(dev
->dev
, "failed to load a3xx gpu\n");
268 mutex_unlock(&dev
->struct_mutex
);
272 gpu
->funcs
->pm_resume(gpu
);
273 ret
= gpu
->funcs
->hw_init(gpu
);
275 dev_err(dev
->dev
, "gpu hw init failed: %d\n", ret
);
276 gpu
->funcs
->destroy(gpu
);
284 static int msm_open(struct drm_device
*dev
, struct drm_file
*file
)
286 struct msm_file_private
*ctx
;
288 /* For now, load gpu on open.. to avoid the requirement of having
289 * firmware in the initrd.
293 ctx
= kzalloc(sizeof(*ctx
), GFP_KERNEL
);
297 file
->driver_priv
= ctx
;
302 static void msm_preclose(struct drm_device
*dev
, struct drm_file
*file
)
304 struct msm_drm_private
*priv
= dev
->dev_private
;
305 struct msm_file_private
*ctx
= file
->driver_priv
;
306 struct msm_kms
*kms
= priv
->kms
;
309 kms
->funcs
->preclose(kms
, file
);
311 mutex_lock(&dev
->struct_mutex
);
312 if (ctx
== priv
->lastctx
)
313 priv
->lastctx
= NULL
;
314 mutex_unlock(&dev
->struct_mutex
);
319 static void msm_lastclose(struct drm_device
*dev
)
321 struct msm_drm_private
*priv
= dev
->dev_private
;
323 drm_modeset_lock_all(dev
);
324 drm_fb_helper_restore_fbdev_mode(priv
->fbdev
);
325 drm_modeset_unlock_all(dev
);
329 static irqreturn_t
msm_irq(DRM_IRQ_ARGS
)
331 struct drm_device
*dev
= arg
;
332 struct msm_drm_private
*priv
= dev
->dev_private
;
333 struct msm_kms
*kms
= priv
->kms
;
335 return kms
->funcs
->irq(kms
);
338 static void msm_irq_preinstall(struct drm_device
*dev
)
340 struct msm_drm_private
*priv
= dev
->dev_private
;
341 struct msm_kms
*kms
= priv
->kms
;
343 kms
->funcs
->irq_preinstall(kms
);
346 static int msm_irq_postinstall(struct drm_device
*dev
)
348 struct msm_drm_private
*priv
= dev
->dev_private
;
349 struct msm_kms
*kms
= priv
->kms
;
351 return kms
->funcs
->irq_postinstall(kms
);
354 static void msm_irq_uninstall(struct drm_device
*dev
)
356 struct msm_drm_private
*priv
= dev
->dev_private
;
357 struct msm_kms
*kms
= priv
->kms
;
359 kms
->funcs
->irq_uninstall(kms
);
362 static int msm_enable_vblank(struct drm_device
*dev
, int crtc_id
)
364 struct msm_drm_private
*priv
= dev
->dev_private
;
365 struct msm_kms
*kms
= priv
->kms
;
368 DBG("dev=%p, crtc=%d", dev
, crtc_id
);
369 return kms
->funcs
->enable_vblank(kms
, priv
->crtcs
[crtc_id
]);
372 static void msm_disable_vblank(struct drm_device
*dev
, int crtc_id
)
374 struct msm_drm_private
*priv
= dev
->dev_private
;
375 struct msm_kms
*kms
= priv
->kms
;
378 DBG("dev=%p, crtc=%d", dev
, crtc_id
);
379 kms
->funcs
->disable_vblank(kms
, priv
->crtcs
[crtc_id
]);
386 #ifdef CONFIG_DEBUG_FS
387 static int msm_gpu_show(struct drm_device
*dev
, struct seq_file
*m
)
389 struct msm_drm_private
*priv
= dev
->dev_private
;
390 struct msm_gpu
*gpu
= priv
->gpu
;
393 seq_printf(m
, "%s Status:\n", gpu
->name
);
394 gpu
->funcs
->show(gpu
, m
);
400 static int msm_gem_show(struct drm_device
*dev
, struct seq_file
*m
)
402 struct msm_drm_private
*priv
= dev
->dev_private
;
403 struct msm_gpu
*gpu
= priv
->gpu
;
406 seq_printf(m
, "Active Objects (%s):\n", gpu
->name
);
407 msm_gem_describe_objects(&gpu
->active_list
, m
);
410 seq_printf(m
, "Inactive Objects:\n");
411 msm_gem_describe_objects(&priv
->inactive_list
, m
);
416 static int msm_mm_show(struct drm_device
*dev
, struct seq_file
*m
)
418 return drm_mm_dump_table(m
, dev
->mm_private
);
421 static int msm_fb_show(struct drm_device
*dev
, struct seq_file
*m
)
423 struct msm_drm_private
*priv
= dev
->dev_private
;
424 struct drm_framebuffer
*fb
, *fbdev_fb
= NULL
;
427 seq_printf(m
, "fbcon ");
428 fbdev_fb
= priv
->fbdev
->fb
;
429 msm_framebuffer_describe(fbdev_fb
, m
);
432 mutex_lock(&dev
->mode_config
.fb_lock
);
433 list_for_each_entry(fb
, &dev
->mode_config
.fb_list
, head
) {
437 seq_printf(m
, "user ");
438 msm_framebuffer_describe(fb
, m
);
440 mutex_unlock(&dev
->mode_config
.fb_lock
);
445 static int show_locked(struct seq_file
*m
, void *arg
)
447 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
448 struct drm_device
*dev
= node
->minor
->dev
;
449 int (*show
)(struct drm_device
*dev
, struct seq_file
*m
) =
450 node
->info_ent
->data
;
453 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
459 mutex_unlock(&dev
->struct_mutex
);
464 static struct drm_info_list msm_debugfs_list
[] = {
465 {"gpu", show_locked
, 0, msm_gpu_show
},
466 {"gem", show_locked
, 0, msm_gem_show
},
467 { "mm", show_locked
, 0, msm_mm_show
},
468 { "fb", show_locked
, 0, msm_fb_show
},
471 static int msm_debugfs_init(struct drm_minor
*minor
)
473 struct drm_device
*dev
= minor
->dev
;
476 ret
= drm_debugfs_create_files(msm_debugfs_list
,
477 ARRAY_SIZE(msm_debugfs_list
),
478 minor
->debugfs_root
, minor
);
481 dev_err(dev
->dev
, "could not install msm_debugfs_list\n");
488 static void msm_debugfs_cleanup(struct drm_minor
*minor
)
490 drm_debugfs_remove_files(msm_debugfs_list
,
491 ARRAY_SIZE(msm_debugfs_list
), minor
);
499 int msm_wait_fence_interruptable(struct drm_device
*dev
, uint32_t fence
,
500 struct timespec
*timeout
)
502 struct msm_drm_private
*priv
= dev
->dev_private
;
508 if (fence
> priv
->gpu
->submitted_fence
) {
509 DRM_ERROR("waiting on invalid fence: %u (of %u)\n",
510 fence
, priv
->gpu
->submitted_fence
);
516 ret
= fence_completed(dev
, fence
) ? 0 : -EBUSY
;
518 unsigned long timeout_jiffies
= timespec_to_jiffies(timeout
);
519 unsigned long start_jiffies
= jiffies
;
520 unsigned long remaining_jiffies
;
522 if (time_after(start_jiffies
, timeout_jiffies
))
523 remaining_jiffies
= 0;
525 remaining_jiffies
= timeout_jiffies
- start_jiffies
;
527 ret
= wait_event_interruptible_timeout(priv
->fence_event
,
528 fence_completed(dev
, fence
),
532 DBG("timeout waiting for fence: %u (completed: %u)",
533 fence
, priv
->completed_fence
);
535 } else if (ret
!= -ERESTARTSYS
) {
543 /* called from workqueue */
544 void msm_update_fence(struct drm_device
*dev
, uint32_t fence
)
546 struct msm_drm_private
*priv
= dev
->dev_private
;
548 mutex_lock(&dev
->struct_mutex
);
549 priv
->completed_fence
= max(fence
, priv
->completed_fence
);
551 while (!list_empty(&priv
->fence_cbs
)) {
552 struct msm_fence_cb
*cb
;
554 cb
= list_first_entry(&priv
->fence_cbs
,
555 struct msm_fence_cb
, work
.entry
);
557 if (cb
->fence
> priv
->completed_fence
)
560 list_del_init(&cb
->work
.entry
);
561 queue_work(priv
->wq
, &cb
->work
);
564 mutex_unlock(&dev
->struct_mutex
);
566 wake_up_all(&priv
->fence_event
);
569 void __msm_fence_worker(struct work_struct
*work
)
571 struct msm_fence_cb
*cb
= container_of(work
, struct msm_fence_cb
, work
);
579 static int msm_ioctl_get_param(struct drm_device
*dev
, void *data
,
580 struct drm_file
*file
)
582 struct msm_drm_private
*priv
= dev
->dev_private
;
583 struct drm_msm_param
*args
= data
;
586 /* for now, we just have 3d pipe.. eventually this would need to
587 * be more clever to dispatch to appropriate gpu module:
589 if (args
->pipe
!= MSM_PIPE_3D0
)
597 return gpu
->funcs
->get_param(gpu
, args
->param
, &args
->value
);
600 static int msm_ioctl_gem_new(struct drm_device
*dev
, void *data
,
601 struct drm_file
*file
)
603 struct drm_msm_gem_new
*args
= data
;
604 return msm_gem_new_handle(dev
, file
, args
->size
,
605 args
->flags
, &args
->handle
);
608 #define TS(t) ((struct timespec){ .tv_sec = (t).tv_sec, .tv_nsec = (t).tv_nsec })
610 static int msm_ioctl_gem_cpu_prep(struct drm_device
*dev
, void *data
,
611 struct drm_file
*file
)
613 struct drm_msm_gem_cpu_prep
*args
= data
;
614 struct drm_gem_object
*obj
;
617 obj
= drm_gem_object_lookup(dev
, file
, args
->handle
);
621 ret
= msm_gem_cpu_prep(obj
, args
->op
, &TS(args
->timeout
));
623 drm_gem_object_unreference_unlocked(obj
);
628 static int msm_ioctl_gem_cpu_fini(struct drm_device
*dev
, void *data
,
629 struct drm_file
*file
)
631 struct drm_msm_gem_cpu_fini
*args
= data
;
632 struct drm_gem_object
*obj
;
635 obj
= drm_gem_object_lookup(dev
, file
, args
->handle
);
639 ret
= msm_gem_cpu_fini(obj
);
641 drm_gem_object_unreference_unlocked(obj
);
646 static int msm_ioctl_gem_info(struct drm_device
*dev
, void *data
,
647 struct drm_file
*file
)
649 struct drm_msm_gem_info
*args
= data
;
650 struct drm_gem_object
*obj
;
656 obj
= drm_gem_object_lookup(dev
, file
, args
->handle
);
660 args
->offset
= msm_gem_mmap_offset(obj
);
662 drm_gem_object_unreference_unlocked(obj
);
667 static int msm_ioctl_wait_fence(struct drm_device
*dev
, void *data
,
668 struct drm_file
*file
)
670 struct drm_msm_wait_fence
*args
= data
;
671 return msm_wait_fence_interruptable(dev
, args
->fence
, &TS(args
->timeout
));
674 static const struct drm_ioctl_desc msm_ioctls
[] = {
675 DRM_IOCTL_DEF_DRV(MSM_GET_PARAM
, msm_ioctl_get_param
, DRM_UNLOCKED
|DRM_AUTH
|DRM_RENDER_ALLOW
),
676 DRM_IOCTL_DEF_DRV(MSM_GEM_NEW
, msm_ioctl_gem_new
, DRM_UNLOCKED
|DRM_AUTH
|DRM_RENDER_ALLOW
),
677 DRM_IOCTL_DEF_DRV(MSM_GEM_INFO
, msm_ioctl_gem_info
, DRM_UNLOCKED
|DRM_AUTH
|DRM_RENDER_ALLOW
),
678 DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_PREP
, msm_ioctl_gem_cpu_prep
, DRM_UNLOCKED
|DRM_AUTH
|DRM_RENDER_ALLOW
),
679 DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_FINI
, msm_ioctl_gem_cpu_fini
, DRM_UNLOCKED
|DRM_AUTH
|DRM_RENDER_ALLOW
),
680 DRM_IOCTL_DEF_DRV(MSM_GEM_SUBMIT
, msm_ioctl_gem_submit
, DRM_UNLOCKED
|DRM_AUTH
|DRM_RENDER_ALLOW
),
681 DRM_IOCTL_DEF_DRV(MSM_WAIT_FENCE
, msm_ioctl_wait_fence
, DRM_UNLOCKED
|DRM_AUTH
|DRM_RENDER_ALLOW
),
684 static const struct vm_operations_struct vm_ops
= {
685 .fault
= msm_gem_fault
,
686 .open
= drm_gem_vm_open
,
687 .close
= drm_gem_vm_close
,
690 static const struct file_operations fops
= {
691 .owner
= THIS_MODULE
,
693 .release
= drm_release
,
694 .unlocked_ioctl
= drm_ioctl
,
696 .compat_ioctl
= drm_compat_ioctl
,
701 .mmap
= msm_gem_mmap
,
704 static struct drm_driver msm_driver
= {
705 .driver_features
= DRIVER_HAVE_IRQ
|
711 .unload
= msm_unload
,
713 .preclose
= msm_preclose
,
714 .lastclose
= msm_lastclose
,
715 .irq_handler
= msm_irq
,
716 .irq_preinstall
= msm_irq_preinstall
,
717 .irq_postinstall
= msm_irq_postinstall
,
718 .irq_uninstall
= msm_irq_uninstall
,
719 .get_vblank_counter
= drm_vblank_count
,
720 .enable_vblank
= msm_enable_vblank
,
721 .disable_vblank
= msm_disable_vblank
,
722 .gem_free_object
= msm_gem_free_object
,
723 .gem_vm_ops
= &vm_ops
,
724 .dumb_create
= msm_gem_dumb_create
,
725 .dumb_map_offset
= msm_gem_dumb_map_offset
,
726 .dumb_destroy
= drm_gem_dumb_destroy
,
727 .prime_handle_to_fd
= drm_gem_prime_handle_to_fd
,
728 .prime_fd_to_handle
= drm_gem_prime_fd_to_handle
,
729 .gem_prime_export
= drm_gem_prime_export
,
730 .gem_prime_import
= drm_gem_prime_import
,
731 .gem_prime_pin
= msm_gem_prime_pin
,
732 .gem_prime_unpin
= msm_gem_prime_unpin
,
733 .gem_prime_get_sg_table
= msm_gem_prime_get_sg_table
,
734 .gem_prime_import_sg_table
= msm_gem_prime_import_sg_table
,
735 .gem_prime_vmap
= msm_gem_prime_vmap
,
736 .gem_prime_vunmap
= msm_gem_prime_vunmap
,
737 #ifdef CONFIG_DEBUG_FS
738 .debugfs_init
= msm_debugfs_init
,
739 .debugfs_cleanup
= msm_debugfs_cleanup
,
741 .ioctls
= msm_ioctls
,
742 .num_ioctls
= DRM_MSM_NUM_IOCTLS
,
745 .desc
= "MSM Snapdragon DRM",
751 #ifdef CONFIG_PM_SLEEP
752 static int msm_pm_suspend(struct device
*dev
)
754 struct drm_device
*ddev
= dev_get_drvdata(dev
);
756 drm_kms_helper_poll_disable(ddev
);
761 static int msm_pm_resume(struct device
*dev
)
763 struct drm_device
*ddev
= dev_get_drvdata(dev
);
765 drm_kms_helper_poll_enable(ddev
);
771 static const struct dev_pm_ops msm_pm_ops
= {
772 SET_SYSTEM_SLEEP_PM_OPS(msm_pm_suspend
, msm_pm_resume
)
779 static int msm_pdev_probe(struct platform_device
*pdev
)
781 return drm_platform_init(&msm_driver
, pdev
);
784 static int msm_pdev_remove(struct platform_device
*pdev
)
786 drm_platform_exit(&msm_driver
, pdev
);
791 static const struct platform_device_id msm_id
[] = {
796 static struct platform_driver msm_platform_driver
= {
797 .probe
= msm_pdev_probe
,
798 .remove
= msm_pdev_remove
,
800 .owner
= THIS_MODULE
,
807 static int __init
msm_drm_register(void)
812 return platform_driver_register(&msm_platform_driver
);
815 static void __exit
msm_drm_unregister(void)
818 platform_driver_unregister(&msm_platform_driver
);
823 module_init(msm_drm_register
);
824 module_exit(msm_drm_unregister
);
826 MODULE_AUTHOR("Rob Clark <robdclark@gmail.com");
827 MODULE_DESCRIPTION("MSM DRM Driver");
828 MODULE_LICENSE("GPL");