1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2012 Avionic Design GmbH
4 * Copyright (C) 2012-2016 NVIDIA CORPORATION. All rights reserved.
7 #include <linux/bitops.h>
8 #include <linux/host1x.h>
10 #include <linux/iommu.h>
11 #include <linux/module.h>
12 #include <linux/platform_device.h>
14 #include <drm/drm_atomic.h>
15 #include <drm/drm_atomic_helper.h>
16 #include <drm/drm_debugfs.h>
17 #include <drm/drm_drv.h>
18 #include <drm/drm_fourcc.h>
19 #include <drm/drm_ioctl.h>
20 #include <drm/drm_prime.h>
21 #include <drm/drm_vblank.h>
26 #define DRIVER_NAME "tegra"
27 #define DRIVER_DESC "NVIDIA Tegra graphics"
28 #define DRIVER_DATE "20120330"
29 #define DRIVER_MAJOR 0
30 #define DRIVER_MINOR 0
31 #define DRIVER_PATCHLEVEL 0
33 #define CARVEOUT_SZ SZ_64M
34 #define CDMA_GATHER_FETCHES_MAX_NB 16383
36 struct tegra_drm_file
{
41 static int tegra_atomic_check(struct drm_device
*drm
,
42 struct drm_atomic_state
*state
)
46 err
= drm_atomic_helper_check(drm
, state
);
50 return tegra_display_hub_atomic_check(drm
, state
);
53 static const struct drm_mode_config_funcs tegra_drm_mode_config_funcs
= {
54 .fb_create
= tegra_fb_create
,
55 #ifdef CONFIG_DRM_FBDEV_EMULATION
56 .output_poll_changed
= drm_fb_helper_output_poll_changed
,
58 .atomic_check
= tegra_atomic_check
,
59 .atomic_commit
= drm_atomic_helper_commit
,
62 static void tegra_atomic_commit_tail(struct drm_atomic_state
*old_state
)
64 struct drm_device
*drm
= old_state
->dev
;
65 struct tegra_drm
*tegra
= drm
->dev_private
;
68 bool fence_cookie
= dma_fence_begin_signalling();
70 drm_atomic_helper_commit_modeset_disables(drm
, old_state
);
71 tegra_display_hub_atomic_commit(drm
, old_state
);
72 drm_atomic_helper_commit_planes(drm
, old_state
, 0);
73 drm_atomic_helper_commit_modeset_enables(drm
, old_state
);
74 drm_atomic_helper_commit_hw_done(old_state
);
75 dma_fence_end_signalling(fence_cookie
);
76 drm_atomic_helper_wait_for_vblanks(drm
, old_state
);
77 drm_atomic_helper_cleanup_planes(drm
, old_state
);
79 drm_atomic_helper_commit_tail_rpm(old_state
);
83 static const struct drm_mode_config_helper_funcs
84 tegra_drm_mode_config_helpers
= {
85 .atomic_commit_tail
= tegra_atomic_commit_tail
,
88 static int tegra_drm_open(struct drm_device
*drm
, struct drm_file
*filp
)
90 struct tegra_drm_file
*fpriv
;
92 fpriv
= kzalloc(sizeof(*fpriv
), GFP_KERNEL
);
96 idr_init_base(&fpriv
->contexts
, 1);
97 mutex_init(&fpriv
->lock
);
98 filp
->driver_priv
= fpriv
;
103 static void tegra_drm_context_free(struct tegra_drm_context
*context
)
105 context
->client
->ops
->close_channel(context
);
109 static struct host1x_bo
*
110 host1x_bo_lookup(struct drm_file
*file
, u32 handle
)
112 struct drm_gem_object
*gem
;
115 gem
= drm_gem_object_lookup(file
, handle
);
119 bo
= to_tegra_bo(gem
);
123 static int host1x_reloc_copy_from_user(struct host1x_reloc
*dest
,
124 struct drm_tegra_reloc __user
*src
,
125 struct drm_device
*drm
,
126 struct drm_file
*file
)
131 err
= get_user(cmdbuf
, &src
->cmdbuf
.handle
);
135 err
= get_user(dest
->cmdbuf
.offset
, &src
->cmdbuf
.offset
);
139 err
= get_user(target
, &src
->target
.handle
);
143 err
= get_user(dest
->target
.offset
, &src
->target
.offset
);
147 err
= get_user(dest
->shift
, &src
->shift
);
151 dest
->flags
= HOST1X_RELOC_READ
| HOST1X_RELOC_WRITE
;
153 dest
->cmdbuf
.bo
= host1x_bo_lookup(file
, cmdbuf
);
154 if (!dest
->cmdbuf
.bo
)
157 dest
->target
.bo
= host1x_bo_lookup(file
, target
);
158 if (!dest
->target
.bo
)
164 int tegra_drm_submit(struct tegra_drm_context
*context
,
165 struct drm_tegra_submit
*args
, struct drm_device
*drm
,
166 struct drm_file
*file
)
168 struct host1x_client
*client
= &context
->client
->base
;
169 unsigned int num_cmdbufs
= args
->num_cmdbufs
;
170 unsigned int num_relocs
= args
->num_relocs
;
171 struct drm_tegra_cmdbuf __user
*user_cmdbufs
;
172 struct drm_tegra_reloc __user
*user_relocs
;
173 struct drm_tegra_syncpt __user
*user_syncpt
;
174 struct drm_tegra_syncpt syncpt
;
175 struct host1x
*host1x
= dev_get_drvdata(drm
->dev
->parent
);
176 struct drm_gem_object
**refs
;
177 struct host1x_syncpt
*sp
= NULL
;
178 struct host1x_job
*job
;
179 unsigned int num_refs
;
182 user_cmdbufs
= u64_to_user_ptr(args
->cmdbufs
);
183 user_relocs
= u64_to_user_ptr(args
->relocs
);
184 user_syncpt
= u64_to_user_ptr(args
->syncpts
);
186 /* We don't yet support other than one syncpt_incr struct per submit */
187 if (args
->num_syncpts
!= 1)
190 /* We don't yet support waitchks */
191 if (args
->num_waitchks
!= 0)
194 job
= host1x_job_alloc(context
->channel
, args
->num_cmdbufs
,
199 job
->num_relocs
= args
->num_relocs
;
200 job
->client
= client
;
201 job
->class = client
->class;
202 job
->serialize
= true;
205 * Track referenced BOs so that they can be unreferenced after the
206 * submission is complete.
208 num_refs
= num_cmdbufs
+ num_relocs
* 2;
210 refs
= kmalloc_array(num_refs
, sizeof(*refs
), GFP_KERNEL
);
216 /* reuse as an iterator later */
219 while (num_cmdbufs
) {
220 struct drm_tegra_cmdbuf cmdbuf
;
221 struct host1x_bo
*bo
;
222 struct tegra_bo
*obj
;
225 if (copy_from_user(&cmdbuf
, user_cmdbufs
, sizeof(cmdbuf
))) {
231 * The maximum number of CDMA gather fetches is 16383, a higher
232 * value means the words count is malformed.
234 if (cmdbuf
.words
> CDMA_GATHER_FETCHES_MAX_NB
) {
239 bo
= host1x_bo_lookup(file
, cmdbuf
.handle
);
245 offset
= (u64
)cmdbuf
.offset
+ (u64
)cmdbuf
.words
* sizeof(u32
);
246 obj
= host1x_to_tegra_bo(bo
);
247 refs
[num_refs
++] = &obj
->gem
;
250 * Gather buffer base address must be 4-bytes aligned,
251 * unaligned offset is malformed and cause commands stream
252 * corruption on the buffer address relocation.
254 if (offset
& 3 || offset
> obj
->gem
.size
) {
259 host1x_job_add_gather(job
, bo
, cmdbuf
.words
, cmdbuf
.offset
);
264 /* copy and resolve relocations from submit */
265 while (num_relocs
--) {
266 struct host1x_reloc
*reloc
;
267 struct tegra_bo
*obj
;
269 err
= host1x_reloc_copy_from_user(&job
->relocs
[num_relocs
],
270 &user_relocs
[num_relocs
], drm
,
275 reloc
= &job
->relocs
[num_relocs
];
276 obj
= host1x_to_tegra_bo(reloc
->cmdbuf
.bo
);
277 refs
[num_refs
++] = &obj
->gem
;
280 * The unaligned cmdbuf offset will cause an unaligned write
281 * during of the relocations patching, corrupting the commands
284 if (reloc
->cmdbuf
.offset
& 3 ||
285 reloc
->cmdbuf
.offset
>= obj
->gem
.size
) {
290 obj
= host1x_to_tegra_bo(reloc
->target
.bo
);
291 refs
[num_refs
++] = &obj
->gem
;
293 if (reloc
->target
.offset
>= obj
->gem
.size
) {
299 if (copy_from_user(&syncpt
, user_syncpt
, sizeof(syncpt
))) {
304 /* Syncpoint ref will be dropped on job release. */
305 sp
= host1x_syncpt_get_by_id(host1x
, syncpt
.id
);
311 job
->is_addr_reg
= context
->client
->ops
->is_addr_reg
;
312 job
->is_valid_class
= context
->client
->ops
->is_valid_class
;
313 job
->syncpt_incrs
= syncpt
.incrs
;
315 job
->timeout
= 10000;
317 if (args
->timeout
&& args
->timeout
< 10000)
318 job
->timeout
= args
->timeout
;
320 err
= host1x_job_pin(job
, context
->client
->base
.dev
);
324 err
= host1x_job_submit(job
);
326 host1x_job_unpin(job
);
330 args
->fence
= job
->syncpt_end
;
334 drm_gem_object_put(refs
[num_refs
]);
344 #ifdef CONFIG_DRM_TEGRA_STAGING
345 static int tegra_gem_create(struct drm_device
*drm
, void *data
,
346 struct drm_file
*file
)
348 struct drm_tegra_gem_create
*args
= data
;
351 bo
= tegra_bo_create_with_handle(file
, drm
, args
->size
, args
->flags
,
359 static int tegra_gem_mmap(struct drm_device
*drm
, void *data
,
360 struct drm_file
*file
)
362 struct drm_tegra_gem_mmap
*args
= data
;
363 struct drm_gem_object
*gem
;
366 gem
= drm_gem_object_lookup(file
, args
->handle
);
370 bo
= to_tegra_bo(gem
);
372 args
->offset
= drm_vma_node_offset_addr(&bo
->gem
.vma_node
);
374 drm_gem_object_put(gem
);
379 static int tegra_syncpt_read(struct drm_device
*drm
, void *data
,
380 struct drm_file
*file
)
382 struct host1x
*host
= dev_get_drvdata(drm
->dev
->parent
);
383 struct drm_tegra_syncpt_read
*args
= data
;
384 struct host1x_syncpt
*sp
;
386 sp
= host1x_syncpt_get_by_id_noref(host
, args
->id
);
390 args
->value
= host1x_syncpt_read_min(sp
);
394 static int tegra_syncpt_incr(struct drm_device
*drm
, void *data
,
395 struct drm_file
*file
)
397 struct host1x
*host1x
= dev_get_drvdata(drm
->dev
->parent
);
398 struct drm_tegra_syncpt_incr
*args
= data
;
399 struct host1x_syncpt
*sp
;
401 sp
= host1x_syncpt_get_by_id_noref(host1x
, args
->id
);
405 return host1x_syncpt_incr(sp
);
408 static int tegra_syncpt_wait(struct drm_device
*drm
, void *data
,
409 struct drm_file
*file
)
411 struct host1x
*host1x
= dev_get_drvdata(drm
->dev
->parent
);
412 struct drm_tegra_syncpt_wait
*args
= data
;
413 struct host1x_syncpt
*sp
;
415 sp
= host1x_syncpt_get_by_id_noref(host1x
, args
->id
);
419 return host1x_syncpt_wait(sp
, args
->thresh
,
420 msecs_to_jiffies(args
->timeout
),
424 static int tegra_client_open(struct tegra_drm_file
*fpriv
,
425 struct tegra_drm_client
*client
,
426 struct tegra_drm_context
*context
)
430 err
= client
->ops
->open_channel(client
, context
);
434 err
= idr_alloc(&fpriv
->contexts
, context
, 1, 0, GFP_KERNEL
);
436 client
->ops
->close_channel(context
);
440 context
->client
= client
;
446 static int tegra_open_channel(struct drm_device
*drm
, void *data
,
447 struct drm_file
*file
)
449 struct tegra_drm_file
*fpriv
= file
->driver_priv
;
450 struct tegra_drm
*tegra
= drm
->dev_private
;
451 struct drm_tegra_open_channel
*args
= data
;
452 struct tegra_drm_context
*context
;
453 struct tegra_drm_client
*client
;
456 context
= kzalloc(sizeof(*context
), GFP_KERNEL
);
460 mutex_lock(&fpriv
->lock
);
462 list_for_each_entry(client
, &tegra
->clients
, list
)
463 if (client
->base
.class == args
->client
) {
464 err
= tegra_client_open(fpriv
, client
, context
);
468 args
->context
= context
->id
;
475 mutex_unlock(&fpriv
->lock
);
479 static int tegra_close_channel(struct drm_device
*drm
, void *data
,
480 struct drm_file
*file
)
482 struct tegra_drm_file
*fpriv
= file
->driver_priv
;
483 struct drm_tegra_close_channel
*args
= data
;
484 struct tegra_drm_context
*context
;
487 mutex_lock(&fpriv
->lock
);
489 context
= idr_find(&fpriv
->contexts
, args
->context
);
495 idr_remove(&fpriv
->contexts
, context
->id
);
496 tegra_drm_context_free(context
);
499 mutex_unlock(&fpriv
->lock
);
503 static int tegra_get_syncpt(struct drm_device
*drm
, void *data
,
504 struct drm_file
*file
)
506 struct tegra_drm_file
*fpriv
= file
->driver_priv
;
507 struct drm_tegra_get_syncpt
*args
= data
;
508 struct tegra_drm_context
*context
;
509 struct host1x_syncpt
*syncpt
;
512 mutex_lock(&fpriv
->lock
);
514 context
= idr_find(&fpriv
->contexts
, args
->context
);
520 if (args
->index
>= context
->client
->base
.num_syncpts
) {
525 syncpt
= context
->client
->base
.syncpts
[args
->index
];
526 args
->id
= host1x_syncpt_id(syncpt
);
529 mutex_unlock(&fpriv
->lock
);
533 static int tegra_submit(struct drm_device
*drm
, void *data
,
534 struct drm_file
*file
)
536 struct tegra_drm_file
*fpriv
= file
->driver_priv
;
537 struct drm_tegra_submit
*args
= data
;
538 struct tegra_drm_context
*context
;
541 mutex_lock(&fpriv
->lock
);
543 context
= idr_find(&fpriv
->contexts
, args
->context
);
549 err
= context
->client
->ops
->submit(context
, args
, drm
, file
);
552 mutex_unlock(&fpriv
->lock
);
556 static int tegra_get_syncpt_base(struct drm_device
*drm
, void *data
,
557 struct drm_file
*file
)
559 struct tegra_drm_file
*fpriv
= file
->driver_priv
;
560 struct drm_tegra_get_syncpt_base
*args
= data
;
561 struct tegra_drm_context
*context
;
562 struct host1x_syncpt_base
*base
;
563 struct host1x_syncpt
*syncpt
;
566 mutex_lock(&fpriv
->lock
);
568 context
= idr_find(&fpriv
->contexts
, args
->context
);
574 if (args
->syncpt
>= context
->client
->base
.num_syncpts
) {
579 syncpt
= context
->client
->base
.syncpts
[args
->syncpt
];
581 base
= host1x_syncpt_get_base(syncpt
);
587 args
->id
= host1x_syncpt_base_id(base
);
590 mutex_unlock(&fpriv
->lock
);
594 static int tegra_gem_set_tiling(struct drm_device
*drm
, void *data
,
595 struct drm_file
*file
)
597 struct drm_tegra_gem_set_tiling
*args
= data
;
598 enum tegra_bo_tiling_mode mode
;
599 struct drm_gem_object
*gem
;
600 unsigned long value
= 0;
603 switch (args
->mode
) {
604 case DRM_TEGRA_GEM_TILING_MODE_PITCH
:
605 mode
= TEGRA_BO_TILING_MODE_PITCH
;
607 if (args
->value
!= 0)
612 case DRM_TEGRA_GEM_TILING_MODE_TILED
:
613 mode
= TEGRA_BO_TILING_MODE_TILED
;
615 if (args
->value
!= 0)
620 case DRM_TEGRA_GEM_TILING_MODE_BLOCK
:
621 mode
= TEGRA_BO_TILING_MODE_BLOCK
;
633 gem
= drm_gem_object_lookup(file
, args
->handle
);
637 bo
= to_tegra_bo(gem
);
639 bo
->tiling
.mode
= mode
;
640 bo
->tiling
.value
= value
;
642 drm_gem_object_put(gem
);
647 static int tegra_gem_get_tiling(struct drm_device
*drm
, void *data
,
648 struct drm_file
*file
)
650 struct drm_tegra_gem_get_tiling
*args
= data
;
651 struct drm_gem_object
*gem
;
655 gem
= drm_gem_object_lookup(file
, args
->handle
);
659 bo
= to_tegra_bo(gem
);
661 switch (bo
->tiling
.mode
) {
662 case TEGRA_BO_TILING_MODE_PITCH
:
663 args
->mode
= DRM_TEGRA_GEM_TILING_MODE_PITCH
;
667 case TEGRA_BO_TILING_MODE_TILED
:
668 args
->mode
= DRM_TEGRA_GEM_TILING_MODE_TILED
;
672 case TEGRA_BO_TILING_MODE_BLOCK
:
673 args
->mode
= DRM_TEGRA_GEM_TILING_MODE_BLOCK
;
674 args
->value
= bo
->tiling
.value
;
682 drm_gem_object_put(gem
);
687 static int tegra_gem_set_flags(struct drm_device
*drm
, void *data
,
688 struct drm_file
*file
)
690 struct drm_tegra_gem_set_flags
*args
= data
;
691 struct drm_gem_object
*gem
;
694 if (args
->flags
& ~DRM_TEGRA_GEM_FLAGS
)
697 gem
= drm_gem_object_lookup(file
, args
->handle
);
701 bo
= to_tegra_bo(gem
);
704 if (args
->flags
& DRM_TEGRA_GEM_BOTTOM_UP
)
705 bo
->flags
|= TEGRA_BO_BOTTOM_UP
;
707 drm_gem_object_put(gem
);
712 static int tegra_gem_get_flags(struct drm_device
*drm
, void *data
,
713 struct drm_file
*file
)
715 struct drm_tegra_gem_get_flags
*args
= data
;
716 struct drm_gem_object
*gem
;
719 gem
= drm_gem_object_lookup(file
, args
->handle
);
723 bo
= to_tegra_bo(gem
);
726 if (bo
->flags
& TEGRA_BO_BOTTOM_UP
)
727 args
->flags
|= DRM_TEGRA_GEM_BOTTOM_UP
;
729 drm_gem_object_put(gem
);
735 static const struct drm_ioctl_desc tegra_drm_ioctls
[] = {
736 #ifdef CONFIG_DRM_TEGRA_STAGING
737 DRM_IOCTL_DEF_DRV(TEGRA_GEM_CREATE
, tegra_gem_create
,
739 DRM_IOCTL_DEF_DRV(TEGRA_GEM_MMAP
, tegra_gem_mmap
,
741 DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_READ
, tegra_syncpt_read
,
743 DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_INCR
, tegra_syncpt_incr
,
745 DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_WAIT
, tegra_syncpt_wait
,
747 DRM_IOCTL_DEF_DRV(TEGRA_OPEN_CHANNEL
, tegra_open_channel
,
749 DRM_IOCTL_DEF_DRV(TEGRA_CLOSE_CHANNEL
, tegra_close_channel
,
751 DRM_IOCTL_DEF_DRV(TEGRA_GET_SYNCPT
, tegra_get_syncpt
,
753 DRM_IOCTL_DEF_DRV(TEGRA_SUBMIT
, tegra_submit
,
755 DRM_IOCTL_DEF_DRV(TEGRA_GET_SYNCPT_BASE
, tegra_get_syncpt_base
,
757 DRM_IOCTL_DEF_DRV(TEGRA_GEM_SET_TILING
, tegra_gem_set_tiling
,
759 DRM_IOCTL_DEF_DRV(TEGRA_GEM_GET_TILING
, tegra_gem_get_tiling
,
761 DRM_IOCTL_DEF_DRV(TEGRA_GEM_SET_FLAGS
, tegra_gem_set_flags
,
763 DRM_IOCTL_DEF_DRV(TEGRA_GEM_GET_FLAGS
, tegra_gem_get_flags
,
768 static const struct file_operations tegra_drm_fops
= {
769 .owner
= THIS_MODULE
,
771 .release
= drm_release
,
772 .unlocked_ioctl
= drm_ioctl
,
773 .mmap
= tegra_drm_mmap
,
776 .compat_ioctl
= drm_compat_ioctl
,
777 .llseek
= noop_llseek
,
780 static int tegra_drm_context_cleanup(int id
, void *p
, void *data
)
782 struct tegra_drm_context
*context
= p
;
784 tegra_drm_context_free(context
);
789 static void tegra_drm_postclose(struct drm_device
*drm
, struct drm_file
*file
)
791 struct tegra_drm_file
*fpriv
= file
->driver_priv
;
793 mutex_lock(&fpriv
->lock
);
794 idr_for_each(&fpriv
->contexts
, tegra_drm_context_cleanup
, NULL
);
795 mutex_unlock(&fpriv
->lock
);
797 idr_destroy(&fpriv
->contexts
);
798 mutex_destroy(&fpriv
->lock
);
802 #ifdef CONFIG_DEBUG_FS
803 static int tegra_debugfs_framebuffers(struct seq_file
*s
, void *data
)
805 struct drm_info_node
*node
= (struct drm_info_node
*)s
->private;
806 struct drm_device
*drm
= node
->minor
->dev
;
807 struct drm_framebuffer
*fb
;
809 mutex_lock(&drm
->mode_config
.fb_lock
);
811 list_for_each_entry(fb
, &drm
->mode_config
.fb_list
, head
) {
812 seq_printf(s
, "%3d: user size: %d x %d, depth %d, %d bpp, refcount %d\n",
813 fb
->base
.id
, fb
->width
, fb
->height
,
815 fb
->format
->cpp
[0] * 8,
816 drm_framebuffer_read_refcount(fb
));
819 mutex_unlock(&drm
->mode_config
.fb_lock
);
824 static int tegra_debugfs_iova(struct seq_file
*s
, void *data
)
826 struct drm_info_node
*node
= (struct drm_info_node
*)s
->private;
827 struct drm_device
*drm
= node
->minor
->dev
;
828 struct tegra_drm
*tegra
= drm
->dev_private
;
829 struct drm_printer p
= drm_seq_file_printer(s
);
832 mutex_lock(&tegra
->mm_lock
);
833 drm_mm_print(&tegra
->mm
, &p
);
834 mutex_unlock(&tegra
->mm_lock
);
840 static struct drm_info_list tegra_debugfs_list
[] = {
841 { "framebuffers", tegra_debugfs_framebuffers
, 0 },
842 { "iova", tegra_debugfs_iova
, 0 },
845 static void tegra_debugfs_init(struct drm_minor
*minor
)
847 drm_debugfs_create_files(tegra_debugfs_list
,
848 ARRAY_SIZE(tegra_debugfs_list
),
849 minor
->debugfs_root
, minor
);
853 static const struct drm_driver tegra_drm_driver
= {
854 .driver_features
= DRIVER_MODESET
| DRIVER_GEM
|
855 DRIVER_ATOMIC
| DRIVER_RENDER
,
856 .open
= tegra_drm_open
,
857 .postclose
= tegra_drm_postclose
,
858 .lastclose
= drm_fb_helper_lastclose
,
860 #if defined(CONFIG_DEBUG_FS)
861 .debugfs_init
= tegra_debugfs_init
,
864 .prime_handle_to_fd
= drm_gem_prime_handle_to_fd
,
865 .prime_fd_to_handle
= drm_gem_prime_fd_to_handle
,
866 .gem_prime_import
= tegra_gem_prime_import
,
868 .dumb_create
= tegra_bo_dumb_create
,
870 .ioctls
= tegra_drm_ioctls
,
871 .num_ioctls
= ARRAY_SIZE(tegra_drm_ioctls
),
872 .fops
= &tegra_drm_fops
,
877 .major
= DRIVER_MAJOR
,
878 .minor
= DRIVER_MINOR
,
879 .patchlevel
= DRIVER_PATCHLEVEL
,
882 int tegra_drm_register_client(struct tegra_drm
*tegra
,
883 struct tegra_drm_client
*client
)
885 mutex_lock(&tegra
->clients_lock
);
886 list_add_tail(&client
->list
, &tegra
->clients
);
888 mutex_unlock(&tegra
->clients_lock
);
893 int tegra_drm_unregister_client(struct tegra_drm
*tegra
,
894 struct tegra_drm_client
*client
)
896 mutex_lock(&tegra
->clients_lock
);
897 list_del_init(&client
->list
);
899 mutex_unlock(&tegra
->clients_lock
);
904 int host1x_client_iommu_attach(struct host1x_client
*client
)
906 struct iommu_domain
*domain
= iommu_get_domain_for_dev(client
->dev
);
907 struct drm_device
*drm
= dev_get_drvdata(client
->host
);
908 struct tegra_drm
*tegra
= drm
->dev_private
;
909 struct iommu_group
*group
= NULL
;
913 * If the host1x client is already attached to an IOMMU domain that is
914 * not the shared IOMMU domain, don't try to attach it to a different
915 * domain. This allows using the IOMMU-backed DMA API.
917 if (domain
&& domain
!= tegra
->domain
)
921 group
= iommu_group_get(client
->dev
);
925 if (domain
!= tegra
->domain
) {
926 err
= iommu_attach_group(tegra
->domain
, group
);
928 iommu_group_put(group
);
933 tegra
->use_explicit_iommu
= true;
936 client
->group
= group
;
941 void host1x_client_iommu_detach(struct host1x_client
*client
)
943 struct drm_device
*drm
= dev_get_drvdata(client
->host
);
944 struct tegra_drm
*tegra
= drm
->dev_private
;
945 struct iommu_domain
*domain
;
949 * Devices that are part of the same group may no longer be
950 * attached to a domain at this point because their group may
951 * have been detached by an earlier client.
953 domain
= iommu_get_domain_for_dev(client
->dev
);
955 iommu_detach_group(tegra
->domain
, client
->group
);
957 iommu_group_put(client
->group
);
958 client
->group
= NULL
;
962 void *tegra_drm_alloc(struct tegra_drm
*tegra
, size_t size
, dma_addr_t
*dma
)
970 size
= iova_align(&tegra
->carveout
.domain
, size
);
972 size
= PAGE_ALIGN(size
);
974 gfp
= GFP_KERNEL
| __GFP_ZERO
;
975 if (!tegra
->domain
) {
977 * Many units only support 32-bit addresses, even on 64-bit
978 * SoCs. If there is no IOMMU to translate into a 32-bit IO
979 * virtual address space, force allocations to be in the
980 * lower 32-bit range.
985 virt
= (void *)__get_free_pages(gfp
, get_order(size
));
987 return ERR_PTR(-ENOMEM
);
989 if (!tegra
->domain
) {
991 * If IOMMU is disabled, devices address physical memory
994 *dma
= virt_to_phys(virt
);
998 alloc
= alloc_iova(&tegra
->carveout
.domain
,
999 size
>> tegra
->carveout
.shift
,
1000 tegra
->carveout
.limit
, true);
1006 *dma
= iova_dma_addr(&tegra
->carveout
.domain
, alloc
);
1007 err
= iommu_map(tegra
->domain
, *dma
, virt_to_phys(virt
),
1008 size
, IOMMU_READ
| IOMMU_WRITE
);
1015 __free_iova(&tegra
->carveout
.domain
, alloc
);
1017 free_pages((unsigned long)virt
, get_order(size
));
1019 return ERR_PTR(err
);
1022 void tegra_drm_free(struct tegra_drm
*tegra
, size_t size
, void *virt
,
1026 size
= iova_align(&tegra
->carveout
.domain
, size
);
1028 size
= PAGE_ALIGN(size
);
1030 if (tegra
->domain
) {
1031 iommu_unmap(tegra
->domain
, dma
, size
);
1032 free_iova(&tegra
->carveout
.domain
,
1033 iova_pfn(&tegra
->carveout
.domain
, dma
));
1036 free_pages((unsigned long)virt
, get_order(size
));
1039 static bool host1x_drm_wants_iommu(struct host1x_device
*dev
)
1041 struct host1x
*host1x
= dev_get_drvdata(dev
->dev
.parent
);
1042 struct iommu_domain
*domain
;
1045 * If the Tegra DRM clients are backed by an IOMMU, push buffers are
1046 * likely to be allocated beyond the 32-bit boundary if sufficient
1047 * system memory is available. This is problematic on earlier Tegra
1048 * generations where host1x supports a maximum of 32 address bits in
1049 * the GATHER opcode. In this case, unless host1x is behind an IOMMU
1050 * as well it won't be able to process buffers allocated beyond the
1053 * The DMA API will use bounce buffers in this case, so that could
1054 * perhaps still be made to work, even if less efficient, but there
1055 * is another catch: in order to perform cache maintenance on pages
1056 * allocated for discontiguous buffers we need to map and unmap the
1057 * SG table representing these buffers. This is fine for something
1058 * small like a push buffer, but it exhausts the bounce buffer pool
1059 * (typically on the order of a few MiB) for framebuffers (many MiB
1060 * for any modern resolution).
1062 * Work around this by making sure that Tegra DRM clients only use
1063 * an IOMMU if the parent host1x also uses an IOMMU.
1065 * Note that there's still a small gap here that we don't cover: if
1066 * the DMA API is backed by an IOMMU there's no way to control which
1067 * device is attached to an IOMMU and which isn't, except via wiring
1068 * up the device tree appropriately. This is considered an problem
1069 * of integration, so care must be taken for the DT to be consistent.
1071 domain
= iommu_get_domain_for_dev(dev
->dev
.parent
);
1074 * Tegra20 and Tegra30 don't support addressing memory beyond the
1075 * 32-bit boundary, so the regular GATHER opcodes will always be
1076 * sufficient and whether or not the host1x is attached to an IOMMU
1079 if (!domain
&& host1x_get_dma_mask(host1x
) <= DMA_BIT_MASK(32))
1082 return domain
!= NULL
;
1085 static int host1x_drm_probe(struct host1x_device
*dev
)
1087 struct tegra_drm
*tegra
;
1088 struct drm_device
*drm
;
1091 drm
= drm_dev_alloc(&tegra_drm_driver
, &dev
->dev
);
1093 return PTR_ERR(drm
);
1095 tegra
= kzalloc(sizeof(*tegra
), GFP_KERNEL
);
1101 if (host1x_drm_wants_iommu(dev
) && iommu_present(&platform_bus_type
)) {
1102 tegra
->domain
= iommu_domain_alloc(&platform_bus_type
);
1103 if (!tegra
->domain
) {
1108 err
= iova_cache_get();
1113 mutex_init(&tegra
->clients_lock
);
1114 INIT_LIST_HEAD(&tegra
->clients
);
1116 dev_set_drvdata(&dev
->dev
, drm
);
1117 drm
->dev_private
= tegra
;
1120 drm_mode_config_init(drm
);
1122 drm
->mode_config
.min_width
= 0;
1123 drm
->mode_config
.min_height
= 0;
1124 drm
->mode_config
.max_width
= 0;
1125 drm
->mode_config
.max_height
= 0;
1127 drm
->mode_config
.allow_fb_modifiers
= true;
1129 drm
->mode_config
.normalize_zpos
= true;
1131 drm
->mode_config
.funcs
= &tegra_drm_mode_config_funcs
;
1132 drm
->mode_config
.helper_private
= &tegra_drm_mode_config_helpers
;
1134 err
= tegra_drm_fb_prepare(drm
);
1138 drm_kms_helper_poll_init(drm
);
1140 err
= host1x_device_init(dev
);
1145 * Now that all display controller have been initialized, the maximum
1146 * supported resolution is known and the bitmask for horizontal and
1147 * vertical bitfields can be computed.
1149 tegra
->hmask
= drm
->mode_config
.max_width
- 1;
1150 tegra
->vmask
= drm
->mode_config
.max_height
- 1;
1152 if (tegra
->use_explicit_iommu
) {
1153 u64 carveout_start
, carveout_end
, gem_start
, gem_end
;
1154 u64 dma_mask
= dma_get_mask(&dev
->dev
);
1155 dma_addr_t start
, end
;
1156 unsigned long order
;
1158 start
= tegra
->domain
->geometry
.aperture_start
& dma_mask
;
1159 end
= tegra
->domain
->geometry
.aperture_end
& dma_mask
;
1162 gem_end
= end
- CARVEOUT_SZ
;
1163 carveout_start
= gem_end
+ 1;
1166 order
= __ffs(tegra
->domain
->pgsize_bitmap
);
1167 init_iova_domain(&tegra
->carveout
.domain
, 1UL << order
,
1168 carveout_start
>> order
);
1170 tegra
->carveout
.shift
= iova_shift(&tegra
->carveout
.domain
);
1171 tegra
->carveout
.limit
= carveout_end
>> tegra
->carveout
.shift
;
1173 drm_mm_init(&tegra
->mm
, gem_start
, gem_end
- gem_start
+ 1);
1174 mutex_init(&tegra
->mm_lock
);
1176 DRM_DEBUG_DRIVER("IOMMU apertures:\n");
1177 DRM_DEBUG_DRIVER(" GEM: %#llx-%#llx\n", gem_start
, gem_end
);
1178 DRM_DEBUG_DRIVER(" Carveout: %#llx-%#llx\n", carveout_start
,
1180 } else if (tegra
->domain
) {
1181 iommu_domain_free(tegra
->domain
);
1182 tegra
->domain
= NULL
;
1187 err
= tegra_display_hub_prepare(tegra
->hub
);
1193 * We don't use the drm_irq_install() helpers provided by the DRM
1194 * core, so we need to set this manually in order to allow the
1195 * DRM_IOCTL_WAIT_VBLANK to operate correctly.
1197 drm
->irq_enabled
= true;
1199 /* syncpoints are used for full 32-bit hardware VBLANK counters */
1200 drm
->max_vblank_count
= 0xffffffff;
1202 err
= drm_vblank_init(drm
, drm
->mode_config
.num_crtc
);
1206 drm_mode_config_reset(drm
);
1208 err
= drm_fb_helper_remove_conflicting_framebuffers(NULL
, "tegradrmfb",
1213 err
= tegra_drm_fb_init(drm
);
1217 err
= drm_dev_register(drm
, 0);
1224 tegra_drm_fb_exit(drm
);
1227 tegra_display_hub_cleanup(tegra
->hub
);
1229 if (tegra
->domain
) {
1230 mutex_destroy(&tegra
->mm_lock
);
1231 drm_mm_takedown(&tegra
->mm
);
1232 put_iova_domain(&tegra
->carveout
.domain
);
1236 host1x_device_exit(dev
);
1238 drm_kms_helper_poll_fini(drm
);
1239 tegra_drm_fb_free(drm
);
1241 drm_mode_config_cleanup(drm
);
1244 iommu_domain_free(tegra
->domain
);
1252 static int host1x_drm_remove(struct host1x_device
*dev
)
1254 struct drm_device
*drm
= dev_get_drvdata(&dev
->dev
);
1255 struct tegra_drm
*tegra
= drm
->dev_private
;
1258 drm_dev_unregister(drm
);
1260 drm_kms_helper_poll_fini(drm
);
1261 tegra_drm_fb_exit(drm
);
1262 drm_atomic_helper_shutdown(drm
);
1263 drm_mode_config_cleanup(drm
);
1266 tegra_display_hub_cleanup(tegra
->hub
);
1268 err
= host1x_device_exit(dev
);
1270 dev_err(&dev
->dev
, "host1x device cleanup failed: %d\n", err
);
1272 if (tegra
->domain
) {
1273 mutex_destroy(&tegra
->mm_lock
);
1274 drm_mm_takedown(&tegra
->mm
);
1275 put_iova_domain(&tegra
->carveout
.domain
);
1277 iommu_domain_free(tegra
->domain
);
1286 #ifdef CONFIG_PM_SLEEP
1287 static int host1x_drm_suspend(struct device
*dev
)
1289 struct drm_device
*drm
= dev_get_drvdata(dev
);
1291 return drm_mode_config_helper_suspend(drm
);
1294 static int host1x_drm_resume(struct device
*dev
)
1296 struct drm_device
*drm
= dev_get_drvdata(dev
);
1298 return drm_mode_config_helper_resume(drm
);
1302 static SIMPLE_DEV_PM_OPS(host1x_drm_pm_ops
, host1x_drm_suspend
,
1305 static const struct of_device_id host1x_drm_subdevs
[] = {
1306 { .compatible
= "nvidia,tegra20-dc", },
1307 { .compatible
= "nvidia,tegra20-hdmi", },
1308 { .compatible
= "nvidia,tegra20-gr2d", },
1309 { .compatible
= "nvidia,tegra20-gr3d", },
1310 { .compatible
= "nvidia,tegra30-dc", },
1311 { .compatible
= "nvidia,tegra30-hdmi", },
1312 { .compatible
= "nvidia,tegra30-gr2d", },
1313 { .compatible
= "nvidia,tegra30-gr3d", },
1314 { .compatible
= "nvidia,tegra114-dc", },
1315 { .compatible
= "nvidia,tegra114-dsi", },
1316 { .compatible
= "nvidia,tegra114-hdmi", },
1317 { .compatible
= "nvidia,tegra114-gr2d", },
1318 { .compatible
= "nvidia,tegra114-gr3d", },
1319 { .compatible
= "nvidia,tegra124-dc", },
1320 { .compatible
= "nvidia,tegra124-sor", },
1321 { .compatible
= "nvidia,tegra124-hdmi", },
1322 { .compatible
= "nvidia,tegra124-dsi", },
1323 { .compatible
= "nvidia,tegra124-vic", },
1324 { .compatible
= "nvidia,tegra132-dsi", },
1325 { .compatible
= "nvidia,tegra210-dc", },
1326 { .compatible
= "nvidia,tegra210-dsi", },
1327 { .compatible
= "nvidia,tegra210-sor", },
1328 { .compatible
= "nvidia,tegra210-sor1", },
1329 { .compatible
= "nvidia,tegra210-vic", },
1330 { .compatible
= "nvidia,tegra186-display", },
1331 { .compatible
= "nvidia,tegra186-dc", },
1332 { .compatible
= "nvidia,tegra186-sor", },
1333 { .compatible
= "nvidia,tegra186-sor1", },
1334 { .compatible
= "nvidia,tegra186-vic", },
1335 { .compatible
= "nvidia,tegra194-display", },
1336 { .compatible
= "nvidia,tegra194-dc", },
1337 { .compatible
= "nvidia,tegra194-sor", },
1338 { .compatible
= "nvidia,tegra194-vic", },
1342 static struct host1x_driver host1x_drm_driver
= {
1345 .pm
= &host1x_drm_pm_ops
,
1347 .probe
= host1x_drm_probe
,
1348 .remove
= host1x_drm_remove
,
1349 .subdevs
= host1x_drm_subdevs
,
1352 static struct platform_driver
* const drivers
[] = {
1353 &tegra_display_hub_driver
,
1357 &tegra_dpaux_driver
,
1364 static int __init
host1x_drm_init(void)
1368 err
= host1x_driver_register(&host1x_drm_driver
);
1372 err
= platform_register_drivers(drivers
, ARRAY_SIZE(drivers
));
1374 goto unregister_host1x
;
1379 host1x_driver_unregister(&host1x_drm_driver
);
1382 module_init(host1x_drm_init
);
1384 static void __exit
host1x_drm_exit(void)
1386 platform_unregister_drivers(drivers
, ARRAY_SIZE(drivers
));
1387 host1x_driver_unregister(&host1x_drm_driver
);
1389 module_exit(host1x_drm_exit
);
1391 MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>");
1392 MODULE_DESCRIPTION("NVIDIA Tegra DRM driver");
1393 MODULE_LICENSE("GPL v2");