2 * Copyright 2009 Jerome Glisse.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
28 * Jerome Glisse <glisse@freedesktop.org>
29 * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
32 #include <drm/ttm/ttm_bo_api.h>
33 #include <drm/ttm/ttm_bo_driver.h>
34 #include <drm/ttm/ttm_placement.h>
35 #include <drm/ttm/ttm_module.h>
36 #include <drm/ttm/ttm_page_alloc.h>
38 #include <drm/amdgpu_drm.h>
39 #include <linux/seq_file.h>
40 #include <linux/slab.h>
41 #include <linux/swiotlb.h>
42 #include <linux/swap.h>
43 #include <linux/pagemap.h>
44 #include <linux/debugfs.h>
45 #include <linux/iommu.h>
47 #include "amdgpu_object.h"
48 #include "amdgpu_trace.h"
49 #include "amdgpu_amdkfd.h"
50 #include "bif/bif_4_1_d.h"
52 #define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
54 static int amdgpu_map_buffer(struct ttm_buffer_object
*bo
,
55 struct ttm_mem_reg
*mem
, unsigned num_pages
,
56 uint64_t offset
, unsigned window
,
57 struct amdgpu_ring
*ring
,
60 static int amdgpu_ttm_debugfs_init(struct amdgpu_device
*adev
);
61 static void amdgpu_ttm_debugfs_fini(struct amdgpu_device
*adev
);
66 static int amdgpu_ttm_mem_global_init(struct drm_global_reference
*ref
)
68 return ttm_mem_global_init(ref
->object
);
71 static void amdgpu_ttm_mem_global_release(struct drm_global_reference
*ref
)
73 ttm_mem_global_release(ref
->object
);
76 static int amdgpu_ttm_global_init(struct amdgpu_device
*adev
)
78 struct drm_global_reference
*global_ref
;
79 struct amdgpu_ring
*ring
;
80 struct drm_sched_rq
*rq
;
83 adev
->mman
.mem_global_referenced
= false;
84 global_ref
= &adev
->mman
.mem_global_ref
;
85 global_ref
->global_type
= DRM_GLOBAL_TTM_MEM
;
86 global_ref
->size
= sizeof(struct ttm_mem_global
);
87 global_ref
->init
= &amdgpu_ttm_mem_global_init
;
88 global_ref
->release
= &amdgpu_ttm_mem_global_release
;
89 r
= drm_global_item_ref(global_ref
);
91 DRM_ERROR("Failed setting up TTM memory accounting "
96 adev
->mman
.bo_global_ref
.mem_glob
=
97 adev
->mman
.mem_global_ref
.object
;
98 global_ref
= &adev
->mman
.bo_global_ref
.ref
;
99 global_ref
->global_type
= DRM_GLOBAL_TTM_BO
;
100 global_ref
->size
= sizeof(struct ttm_bo_global
);
101 global_ref
->init
= &ttm_bo_global_init
;
102 global_ref
->release
= &ttm_bo_global_release
;
103 r
= drm_global_item_ref(global_ref
);
105 DRM_ERROR("Failed setting up TTM BO subsystem.\n");
109 mutex_init(&adev
->mman
.gtt_window_lock
);
111 ring
= adev
->mman
.buffer_funcs_ring
;
112 rq
= &ring
->sched
.sched_rq
[DRM_SCHED_PRIORITY_KERNEL
];
113 r
= drm_sched_entity_init(&ring
->sched
, &adev
->mman
.entity
,
114 rq
, amdgpu_sched_jobs
, NULL
);
116 DRM_ERROR("Failed setting up TTM BO move run queue.\n");
120 adev
->mman
.mem_global_referenced
= true;
125 drm_global_item_unref(&adev
->mman
.bo_global_ref
.ref
);
127 drm_global_item_unref(&adev
->mman
.mem_global_ref
);
132 static void amdgpu_ttm_global_fini(struct amdgpu_device
*adev
)
134 if (adev
->mman
.mem_global_referenced
) {
135 drm_sched_entity_fini(adev
->mman
.entity
.sched
,
137 mutex_destroy(&adev
->mman
.gtt_window_lock
);
138 drm_global_item_unref(&adev
->mman
.bo_global_ref
.ref
);
139 drm_global_item_unref(&adev
->mman
.mem_global_ref
);
140 adev
->mman
.mem_global_referenced
= false;
144 static int amdgpu_invalidate_caches(struct ttm_bo_device
*bdev
, uint32_t flags
)
149 static int amdgpu_init_mem_type(struct ttm_bo_device
*bdev
, uint32_t type
,
150 struct ttm_mem_type_manager
*man
)
152 struct amdgpu_device
*adev
;
154 adev
= amdgpu_ttm_adev(bdev
);
159 man
->flags
= TTM_MEMTYPE_FLAG_MAPPABLE
;
160 man
->available_caching
= TTM_PL_MASK_CACHING
;
161 man
->default_caching
= TTM_PL_FLAG_CACHED
;
164 man
->func
= &amdgpu_gtt_mgr_func
;
165 man
->gpu_offset
= adev
->gmc
.gart_start
;
166 man
->available_caching
= TTM_PL_MASK_CACHING
;
167 man
->default_caching
= TTM_PL_FLAG_CACHED
;
168 man
->flags
= TTM_MEMTYPE_FLAG_MAPPABLE
| TTM_MEMTYPE_FLAG_CMA
;
171 /* "On-card" video ram */
172 man
->func
= &amdgpu_vram_mgr_func
;
173 man
->gpu_offset
= adev
->gmc
.vram_start
;
174 man
->flags
= TTM_MEMTYPE_FLAG_FIXED
|
175 TTM_MEMTYPE_FLAG_MAPPABLE
;
176 man
->available_caching
= TTM_PL_FLAG_UNCACHED
| TTM_PL_FLAG_WC
;
177 man
->default_caching
= TTM_PL_FLAG_WC
;
182 /* On-chip GDS memory*/
183 man
->func
= &ttm_bo_manager_func
;
185 man
->flags
= TTM_MEMTYPE_FLAG_FIXED
| TTM_MEMTYPE_FLAG_CMA
;
186 man
->available_caching
= TTM_PL_FLAG_UNCACHED
;
187 man
->default_caching
= TTM_PL_FLAG_UNCACHED
;
190 DRM_ERROR("Unsupported memory type %u\n", (unsigned)type
);
196 static void amdgpu_evict_flags(struct ttm_buffer_object
*bo
,
197 struct ttm_placement
*placement
)
199 struct amdgpu_device
*adev
= amdgpu_ttm_adev(bo
->bdev
);
200 struct amdgpu_bo
*abo
;
201 static const struct ttm_place placements
= {
204 .flags
= TTM_PL_MASK_CACHING
| TTM_PL_FLAG_SYSTEM
207 if (bo
->type
== ttm_bo_type_sg
) {
208 placement
->num_placement
= 0;
209 placement
->num_busy_placement
= 0;
213 if (!amdgpu_ttm_bo_is_amdgpu_bo(bo
)) {
214 placement
->placement
= &placements
;
215 placement
->busy_placement
= &placements
;
216 placement
->num_placement
= 1;
217 placement
->num_busy_placement
= 1;
220 abo
= ttm_to_amdgpu_bo(bo
);
221 switch (bo
->mem
.mem_type
) {
223 if (!adev
->mman
.buffer_funcs_enabled
) {
224 amdgpu_ttm_placement_from_domain(abo
, AMDGPU_GEM_DOMAIN_CPU
);
225 } else if (adev
->gmc
.visible_vram_size
< adev
->gmc
.real_vram_size
&&
226 !(abo
->flags
& AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED
)) {
227 unsigned fpfn
= adev
->gmc
.visible_vram_size
>> PAGE_SHIFT
;
228 struct drm_mm_node
*node
= bo
->mem
.mm_node
;
229 unsigned long pages_left
;
231 for (pages_left
= bo
->mem
.num_pages
;
233 pages_left
-= node
->size
, node
++) {
234 if (node
->start
< fpfn
)
241 /* Try evicting to the CPU inaccessible part of VRAM
242 * first, but only set GTT as busy placement, so this
243 * BO will be evicted to GTT rather than causing other
244 * BOs to be evicted from VRAM
246 amdgpu_ttm_placement_from_domain(abo
, AMDGPU_GEM_DOMAIN_VRAM
|
247 AMDGPU_GEM_DOMAIN_GTT
);
248 abo
->placements
[0].fpfn
= fpfn
;
249 abo
->placements
[0].lpfn
= 0;
250 abo
->placement
.busy_placement
= &abo
->placements
[1];
251 abo
->placement
.num_busy_placement
= 1;
254 amdgpu_ttm_placement_from_domain(abo
, AMDGPU_GEM_DOMAIN_GTT
);
259 amdgpu_ttm_placement_from_domain(abo
, AMDGPU_GEM_DOMAIN_CPU
);
261 *placement
= abo
->placement
;
264 static int amdgpu_verify_access(struct ttm_buffer_object
*bo
, struct file
*filp
)
266 struct amdgpu_bo
*abo
= ttm_to_amdgpu_bo(bo
);
269 * Don't verify access for KFD BOs. They don't have a GEM
270 * object associated with them.
275 if (amdgpu_ttm_tt_get_usermm(bo
->ttm
))
277 return drm_vma_node_verify_access(&abo
->gem_base
.vma_node
,
281 static void amdgpu_move_null(struct ttm_buffer_object
*bo
,
282 struct ttm_mem_reg
*new_mem
)
284 struct ttm_mem_reg
*old_mem
= &bo
->mem
;
286 BUG_ON(old_mem
->mm_node
!= NULL
);
288 new_mem
->mm_node
= NULL
;
291 static uint64_t amdgpu_mm_node_addr(struct ttm_buffer_object
*bo
,
292 struct drm_mm_node
*mm_node
,
293 struct ttm_mem_reg
*mem
)
297 if (mem
->mem_type
!= TTM_PL_TT
|| amdgpu_gtt_mgr_has_gart_addr(mem
)) {
298 addr
= mm_node
->start
<< PAGE_SHIFT
;
299 addr
+= bo
->bdev
->man
[mem
->mem_type
].gpu_offset
;
305 * amdgpu_find_mm_node - Helper function finds the drm_mm_node
306 * corresponding to @offset. It also modifies the offset to be
307 * within the drm_mm_node returned
309 static struct drm_mm_node
*amdgpu_find_mm_node(struct ttm_mem_reg
*mem
,
310 unsigned long *offset
)
312 struct drm_mm_node
*mm_node
= mem
->mm_node
;
314 while (*offset
>= (mm_node
->size
<< PAGE_SHIFT
)) {
315 *offset
-= (mm_node
->size
<< PAGE_SHIFT
);
322 * amdgpu_copy_ttm_mem_to_mem - Helper function for copy
324 * The function copies @size bytes from {src->mem + src->offset} to
325 * {dst->mem + dst->offset}. src->bo and dst->bo could be same BO for a
326 * move and different for a BO to BO copy.
328 * @f: Returns the last fence if multiple jobs are submitted.
330 int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device
*adev
,
331 struct amdgpu_copy_mem
*src
,
332 struct amdgpu_copy_mem
*dst
,
334 struct reservation_object
*resv
,
335 struct dma_fence
**f
)
337 struct amdgpu_ring
*ring
= adev
->mman
.buffer_funcs_ring
;
338 struct drm_mm_node
*src_mm
, *dst_mm
;
339 uint64_t src_node_start
, dst_node_start
, src_node_size
,
340 dst_node_size
, src_page_offset
, dst_page_offset
;
341 struct dma_fence
*fence
= NULL
;
343 const uint64_t GTT_MAX_BYTES
= (AMDGPU_GTT_MAX_TRANSFER_SIZE
*
344 AMDGPU_GPU_PAGE_SIZE
);
346 if (!adev
->mman
.buffer_funcs_enabled
) {
347 DRM_ERROR("Trying to move memory with ring turned off.\n");
351 src_mm
= amdgpu_find_mm_node(src
->mem
, &src
->offset
);
352 src_node_start
= amdgpu_mm_node_addr(src
->bo
, src_mm
, src
->mem
) +
354 src_node_size
= (src_mm
->size
<< PAGE_SHIFT
) - src
->offset
;
355 src_page_offset
= src_node_start
& (PAGE_SIZE
- 1);
357 dst_mm
= amdgpu_find_mm_node(dst
->mem
, &dst
->offset
);
358 dst_node_start
= amdgpu_mm_node_addr(dst
->bo
, dst_mm
, dst
->mem
) +
360 dst_node_size
= (dst_mm
->size
<< PAGE_SHIFT
) - dst
->offset
;
361 dst_page_offset
= dst_node_start
& (PAGE_SIZE
- 1);
363 mutex_lock(&adev
->mman
.gtt_window_lock
);
366 unsigned long cur_size
;
367 uint64_t from
= src_node_start
, to
= dst_node_start
;
368 struct dma_fence
*next
;
370 /* Copy size cannot exceed GTT_MAX_BYTES. So if src or dst
371 * begins at an offset, then adjust the size accordingly
373 cur_size
= min3(min(src_node_size
, dst_node_size
), size
,
375 if (cur_size
+ src_page_offset
> GTT_MAX_BYTES
||
376 cur_size
+ dst_page_offset
> GTT_MAX_BYTES
)
377 cur_size
-= max(src_page_offset
, dst_page_offset
);
379 /* Map only what needs to be accessed. Map src to window 0 and
382 if (src
->mem
->mem_type
== TTM_PL_TT
&&
383 !amdgpu_gtt_mgr_has_gart_addr(src
->mem
)) {
384 r
= amdgpu_map_buffer(src
->bo
, src
->mem
,
385 PFN_UP(cur_size
+ src_page_offset
),
386 src_node_start
, 0, ring
,
390 /* Adjust the offset because amdgpu_map_buffer returns
391 * start of mapped page
393 from
+= src_page_offset
;
396 if (dst
->mem
->mem_type
== TTM_PL_TT
&&
397 !amdgpu_gtt_mgr_has_gart_addr(dst
->mem
)) {
398 r
= amdgpu_map_buffer(dst
->bo
, dst
->mem
,
399 PFN_UP(cur_size
+ dst_page_offset
),
400 dst_node_start
, 1, ring
,
404 to
+= dst_page_offset
;
407 r
= amdgpu_copy_buffer(ring
, from
, to
, cur_size
,
408 resv
, &next
, false, true);
412 dma_fence_put(fence
);
419 src_node_size
-= cur_size
;
420 if (!src_node_size
) {
421 src_node_start
= amdgpu_mm_node_addr(src
->bo
, ++src_mm
,
423 src_node_size
= (src_mm
->size
<< PAGE_SHIFT
);
425 src_node_start
+= cur_size
;
426 src_page_offset
= src_node_start
& (PAGE_SIZE
- 1);
428 dst_node_size
-= cur_size
;
429 if (!dst_node_size
) {
430 dst_node_start
= amdgpu_mm_node_addr(dst
->bo
, ++dst_mm
,
432 dst_node_size
= (dst_mm
->size
<< PAGE_SHIFT
);
434 dst_node_start
+= cur_size
;
435 dst_page_offset
= dst_node_start
& (PAGE_SIZE
- 1);
439 mutex_unlock(&adev
->mman
.gtt_window_lock
);
441 *f
= dma_fence_get(fence
);
442 dma_fence_put(fence
);
447 static int amdgpu_move_blit(struct ttm_buffer_object
*bo
,
448 bool evict
, bool no_wait_gpu
,
449 struct ttm_mem_reg
*new_mem
,
450 struct ttm_mem_reg
*old_mem
)
452 struct amdgpu_device
*adev
= amdgpu_ttm_adev(bo
->bdev
);
453 struct amdgpu_copy_mem src
, dst
;
454 struct dma_fence
*fence
= NULL
;
464 r
= amdgpu_ttm_copy_mem_to_mem(adev
, &src
, &dst
,
465 new_mem
->num_pages
<< PAGE_SHIFT
,
470 r
= ttm_bo_pipeline_move(bo
, fence
, evict
, new_mem
);
471 dma_fence_put(fence
);
476 dma_fence_wait(fence
, false);
477 dma_fence_put(fence
);
481 static int amdgpu_move_vram_ram(struct ttm_buffer_object
*bo
, bool evict
,
482 struct ttm_operation_ctx
*ctx
,
483 struct ttm_mem_reg
*new_mem
)
485 struct amdgpu_device
*adev
;
486 struct ttm_mem_reg
*old_mem
= &bo
->mem
;
487 struct ttm_mem_reg tmp_mem
;
488 struct ttm_place placements
;
489 struct ttm_placement placement
;
492 adev
= amdgpu_ttm_adev(bo
->bdev
);
494 tmp_mem
.mm_node
= NULL
;
495 placement
.num_placement
= 1;
496 placement
.placement
= &placements
;
497 placement
.num_busy_placement
= 1;
498 placement
.busy_placement
= &placements
;
501 placements
.flags
= TTM_PL_MASK_CACHING
| TTM_PL_FLAG_TT
;
502 r
= ttm_bo_mem_space(bo
, &placement
, &tmp_mem
, ctx
);
507 r
= ttm_tt_set_placement_caching(bo
->ttm
, tmp_mem
.placement
);
512 r
= ttm_tt_bind(bo
->ttm
, &tmp_mem
, ctx
);
516 r
= amdgpu_move_blit(bo
, true, ctx
->no_wait_gpu
, &tmp_mem
, old_mem
);
520 r
= ttm_bo_move_ttm(bo
, ctx
, new_mem
);
522 ttm_bo_mem_put(bo
, &tmp_mem
);
526 static int amdgpu_move_ram_vram(struct ttm_buffer_object
*bo
, bool evict
,
527 struct ttm_operation_ctx
*ctx
,
528 struct ttm_mem_reg
*new_mem
)
530 struct amdgpu_device
*adev
;
531 struct ttm_mem_reg
*old_mem
= &bo
->mem
;
532 struct ttm_mem_reg tmp_mem
;
533 struct ttm_placement placement
;
534 struct ttm_place placements
;
537 adev
= amdgpu_ttm_adev(bo
->bdev
);
539 tmp_mem
.mm_node
= NULL
;
540 placement
.num_placement
= 1;
541 placement
.placement
= &placements
;
542 placement
.num_busy_placement
= 1;
543 placement
.busy_placement
= &placements
;
546 placements
.flags
= TTM_PL_MASK_CACHING
| TTM_PL_FLAG_TT
;
547 r
= ttm_bo_mem_space(bo
, &placement
, &tmp_mem
, ctx
);
551 r
= ttm_bo_move_ttm(bo
, ctx
, &tmp_mem
);
555 r
= amdgpu_move_blit(bo
, true, ctx
->no_wait_gpu
, new_mem
, old_mem
);
560 ttm_bo_mem_put(bo
, &tmp_mem
);
564 static int amdgpu_bo_move(struct ttm_buffer_object
*bo
, bool evict
,
565 struct ttm_operation_ctx
*ctx
,
566 struct ttm_mem_reg
*new_mem
)
568 struct amdgpu_device
*adev
;
569 struct amdgpu_bo
*abo
;
570 struct ttm_mem_reg
*old_mem
= &bo
->mem
;
573 /* Can't move a pinned BO */
574 abo
= ttm_to_amdgpu_bo(bo
);
575 if (WARN_ON_ONCE(abo
->pin_count
> 0))
578 adev
= amdgpu_ttm_adev(bo
->bdev
);
580 if (old_mem
->mem_type
== TTM_PL_SYSTEM
&& bo
->ttm
== NULL
) {
581 amdgpu_move_null(bo
, new_mem
);
584 if ((old_mem
->mem_type
== TTM_PL_TT
&&
585 new_mem
->mem_type
== TTM_PL_SYSTEM
) ||
586 (old_mem
->mem_type
== TTM_PL_SYSTEM
&&
587 new_mem
->mem_type
== TTM_PL_TT
)) {
589 amdgpu_move_null(bo
, new_mem
);
593 if (!adev
->mman
.buffer_funcs_enabled
)
596 if (old_mem
->mem_type
== TTM_PL_VRAM
&&
597 new_mem
->mem_type
== TTM_PL_SYSTEM
) {
598 r
= amdgpu_move_vram_ram(bo
, evict
, ctx
, new_mem
);
599 } else if (old_mem
->mem_type
== TTM_PL_SYSTEM
&&
600 new_mem
->mem_type
== TTM_PL_VRAM
) {
601 r
= amdgpu_move_ram_vram(bo
, evict
, ctx
, new_mem
);
603 r
= amdgpu_move_blit(bo
, evict
, ctx
->no_wait_gpu
,
609 r
= ttm_bo_move_memcpy(bo
, ctx
, new_mem
);
615 if (bo
->type
== ttm_bo_type_device
&&
616 new_mem
->mem_type
== TTM_PL_VRAM
&&
617 old_mem
->mem_type
!= TTM_PL_VRAM
) {
618 /* amdgpu_bo_fault_reserve_notify will re-set this if the CPU
619 * accesses the BO after it's moved.
621 abo
->flags
&= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED
;
624 /* update statistics */
625 atomic64_add((u64
)bo
->num_pages
<< PAGE_SHIFT
, &adev
->num_bytes_moved
);
629 static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device
*bdev
, struct ttm_mem_reg
*mem
)
631 struct ttm_mem_type_manager
*man
= &bdev
->man
[mem
->mem_type
];
632 struct amdgpu_device
*adev
= amdgpu_ttm_adev(bdev
);
633 struct drm_mm_node
*mm_node
= mem
->mm_node
;
635 mem
->bus
.addr
= NULL
;
637 mem
->bus
.size
= mem
->num_pages
<< PAGE_SHIFT
;
639 mem
->bus
.is_iomem
= false;
640 if (!(man
->flags
& TTM_MEMTYPE_FLAG_MAPPABLE
))
642 switch (mem
->mem_type
) {
649 mem
->bus
.offset
= mem
->start
<< PAGE_SHIFT
;
650 /* check if it's visible */
651 if ((mem
->bus
.offset
+ mem
->bus
.size
) > adev
->gmc
.visible_vram_size
)
653 /* Only physically contiguous buffers apply. In a contiguous
654 * buffer, size of the first mm_node would match the number of
655 * pages in ttm_mem_reg.
657 if (adev
->mman
.aper_base_kaddr
&&
658 (mm_node
->size
== mem
->num_pages
))
659 mem
->bus
.addr
= (u8
*)adev
->mman
.aper_base_kaddr
+
662 mem
->bus
.base
= adev
->gmc
.aper_base
;
663 mem
->bus
.is_iomem
= true;
671 static void amdgpu_ttm_io_mem_free(struct ttm_bo_device
*bdev
, struct ttm_mem_reg
*mem
)
675 static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object
*bo
,
676 unsigned long page_offset
)
678 struct drm_mm_node
*mm
;
679 unsigned long offset
= (page_offset
<< PAGE_SHIFT
);
681 mm
= amdgpu_find_mm_node(&bo
->mem
, &offset
);
682 return (bo
->mem
.bus
.base
>> PAGE_SHIFT
) + mm
->start
+
683 (offset
>> PAGE_SHIFT
);
687 * TTM backend functions.
689 struct amdgpu_ttm_gup_task_list
{
690 struct list_head list
;
691 struct task_struct
*task
;
694 struct amdgpu_ttm_tt
{
695 struct ttm_dma_tt ttm
;
698 struct mm_struct
*usermm
;
700 spinlock_t guptasklock
;
701 struct list_head guptasks
;
702 atomic_t mmu_invalidations
;
703 uint32_t last_set_pages
;
706 int amdgpu_ttm_tt_get_user_pages(struct ttm_tt
*ttm
, struct page
**pages
)
708 struct amdgpu_ttm_tt
*gtt
= (void *)ttm
;
709 unsigned int flags
= 0;
713 if (!(gtt
->userflags
& AMDGPU_GEM_USERPTR_READONLY
))
716 down_read(¤t
->mm
->mmap_sem
);
718 if (gtt
->userflags
& AMDGPU_GEM_USERPTR_ANONONLY
) {
719 /* check that we only use anonymous memory
720 to prevent problems with writeback */
721 unsigned long end
= gtt
->userptr
+ ttm
->num_pages
* PAGE_SIZE
;
722 struct vm_area_struct
*vma
;
724 vma
= find_vma(gtt
->usermm
, gtt
->userptr
);
725 if (!vma
|| vma
->vm_file
|| vma
->vm_end
< end
) {
726 up_read(¤t
->mm
->mmap_sem
);
732 unsigned num_pages
= ttm
->num_pages
- pinned
;
733 uint64_t userptr
= gtt
->userptr
+ pinned
* PAGE_SIZE
;
734 struct page
**p
= pages
+ pinned
;
735 struct amdgpu_ttm_gup_task_list guptask
;
737 guptask
.task
= current
;
738 spin_lock(>t
->guptasklock
);
739 list_add(&guptask
.list
, >t
->guptasks
);
740 spin_unlock(>t
->guptasklock
);
742 r
= get_user_pages(userptr
, num_pages
, flags
, p
, NULL
);
744 spin_lock(>t
->guptasklock
);
745 list_del(&guptask
.list
);
746 spin_unlock(>t
->guptasklock
);
753 } while (pinned
< ttm
->num_pages
);
755 up_read(¤t
->mm
->mmap_sem
);
759 release_pages(pages
, pinned
);
760 up_read(¤t
->mm
->mmap_sem
);
764 void amdgpu_ttm_tt_set_user_pages(struct ttm_tt
*ttm
, struct page
**pages
)
766 struct amdgpu_ttm_tt
*gtt
= (void *)ttm
;
769 gtt
->last_set_pages
= atomic_read(>t
->mmu_invalidations
);
770 for (i
= 0; i
< ttm
->num_pages
; ++i
) {
772 put_page(ttm
->pages
[i
]);
774 ttm
->pages
[i
] = pages
? pages
[i
] : NULL
;
778 void amdgpu_ttm_tt_mark_user_pages(struct ttm_tt
*ttm
)
780 struct amdgpu_ttm_tt
*gtt
= (void *)ttm
;
783 for (i
= 0; i
< ttm
->num_pages
; ++i
) {
784 struct page
*page
= ttm
->pages
[i
];
789 if (!(gtt
->userflags
& AMDGPU_GEM_USERPTR_READONLY
))
790 set_page_dirty(page
);
792 mark_page_accessed(page
);
796 /* prepare the sg table with the user pages */
797 static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt
*ttm
)
799 struct amdgpu_device
*adev
= amdgpu_ttm_adev(ttm
->bdev
);
800 struct amdgpu_ttm_tt
*gtt
= (void *)ttm
;
804 int write
= !(gtt
->userflags
& AMDGPU_GEM_USERPTR_READONLY
);
805 enum dma_data_direction direction
= write
?
806 DMA_BIDIRECTIONAL
: DMA_TO_DEVICE
;
808 r
= sg_alloc_table_from_pages(ttm
->sg
, ttm
->pages
, ttm
->num_pages
, 0,
809 ttm
->num_pages
<< PAGE_SHIFT
,
815 nents
= dma_map_sg(adev
->dev
, ttm
->sg
->sgl
, ttm
->sg
->nents
, direction
);
816 if (nents
!= ttm
->sg
->nents
)
819 drm_prime_sg_to_page_addr_arrays(ttm
->sg
, ttm
->pages
,
820 gtt
->ttm
.dma_address
, ttm
->num_pages
);
829 static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt
*ttm
)
831 struct amdgpu_device
*adev
= amdgpu_ttm_adev(ttm
->bdev
);
832 struct amdgpu_ttm_tt
*gtt
= (void *)ttm
;
834 int write
= !(gtt
->userflags
& AMDGPU_GEM_USERPTR_READONLY
);
835 enum dma_data_direction direction
= write
?
836 DMA_BIDIRECTIONAL
: DMA_TO_DEVICE
;
838 /* double check that we don't free the table twice */
842 /* free the sg table and pages again */
843 dma_unmap_sg(adev
->dev
, ttm
->sg
->sgl
, ttm
->sg
->nents
, direction
);
845 amdgpu_ttm_tt_mark_user_pages(ttm
);
847 sg_free_table(ttm
->sg
);
850 static int amdgpu_ttm_backend_bind(struct ttm_tt
*ttm
,
851 struct ttm_mem_reg
*bo_mem
)
853 struct amdgpu_device
*adev
= amdgpu_ttm_adev(ttm
->bdev
);
854 struct amdgpu_ttm_tt
*gtt
= (void*)ttm
;
859 r
= amdgpu_ttm_tt_pin_userptr(ttm
);
861 DRM_ERROR("failed to pin userptr\n");
865 if (!ttm
->num_pages
) {
866 WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
867 ttm
->num_pages
, bo_mem
, ttm
);
870 if (bo_mem
->mem_type
== AMDGPU_PL_GDS
||
871 bo_mem
->mem_type
== AMDGPU_PL_GWS
||
872 bo_mem
->mem_type
== AMDGPU_PL_OA
)
875 if (!amdgpu_gtt_mgr_has_gart_addr(bo_mem
)) {
876 gtt
->offset
= AMDGPU_BO_INVALID_OFFSET
;
880 flags
= amdgpu_ttm_tt_pte_flags(adev
, ttm
, bo_mem
);
881 gtt
->offset
= (u64
)bo_mem
->start
<< PAGE_SHIFT
;
882 r
= amdgpu_gart_bind(adev
, gtt
->offset
, ttm
->num_pages
,
883 ttm
->pages
, gtt
->ttm
.dma_address
, flags
);
886 DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
887 ttm
->num_pages
, gtt
->offset
);
891 int amdgpu_ttm_alloc_gart(struct ttm_buffer_object
*bo
)
893 struct amdgpu_device
*adev
= amdgpu_ttm_adev(bo
->bdev
);
894 struct ttm_operation_ctx ctx
= { false, false };
895 struct amdgpu_ttm_tt
*gtt
= (void*)bo
->ttm
;
896 struct ttm_mem_reg tmp
;
897 struct ttm_placement placement
;
898 struct ttm_place placements
;
902 if (bo
->mem
.mem_type
!= TTM_PL_TT
||
903 amdgpu_gtt_mgr_has_gart_addr(&bo
->mem
))
908 placement
.num_placement
= 1;
909 placement
.placement
= &placements
;
910 placement
.num_busy_placement
= 1;
911 placement
.busy_placement
= &placements
;
913 placements
.lpfn
= adev
->gmc
.gart_size
>> PAGE_SHIFT
;
914 placements
.flags
= (bo
->mem
.placement
& ~TTM_PL_MASK_MEM
) |
917 r
= ttm_bo_mem_space(bo
, &placement
, &tmp
, &ctx
);
921 flags
= amdgpu_ttm_tt_pte_flags(adev
, bo
->ttm
, &tmp
);
922 gtt
->offset
= (u64
)tmp
.start
<< PAGE_SHIFT
;
923 r
= amdgpu_gart_bind(adev
, gtt
->offset
, bo
->ttm
->num_pages
,
924 bo
->ttm
->pages
, gtt
->ttm
.dma_address
, flags
);
926 ttm_bo_mem_put(bo
, &tmp
);
930 ttm_bo_mem_put(bo
, &bo
->mem
);
932 bo
->offset
= (bo
->mem
.start
<< PAGE_SHIFT
) +
933 bo
->bdev
->man
[bo
->mem
.mem_type
].gpu_offset
;
938 int amdgpu_ttm_recover_gart(struct ttm_buffer_object
*tbo
)
940 struct amdgpu_device
*adev
= amdgpu_ttm_adev(tbo
->bdev
);
941 struct amdgpu_ttm_tt
*gtt
= (void *)tbo
->ttm
;
948 flags
= amdgpu_ttm_tt_pte_flags(adev
, >t
->ttm
.ttm
, &tbo
->mem
);
949 r
= amdgpu_gart_bind(adev
, gtt
->offset
, gtt
->ttm
.ttm
.num_pages
,
950 gtt
->ttm
.ttm
.pages
, gtt
->ttm
.dma_address
, flags
);
952 DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
953 gtt
->ttm
.ttm
.num_pages
, gtt
->offset
);
957 static int amdgpu_ttm_backend_unbind(struct ttm_tt
*ttm
)
959 struct amdgpu_device
*adev
= amdgpu_ttm_adev(ttm
->bdev
);
960 struct amdgpu_ttm_tt
*gtt
= (void *)ttm
;
964 amdgpu_ttm_tt_unpin_userptr(ttm
);
966 if (gtt
->offset
== AMDGPU_BO_INVALID_OFFSET
)
969 /* unbind shouldn't be done for GDS/GWS/OA in ttm_bo_clean_mm */
970 r
= amdgpu_gart_unbind(adev
, gtt
->offset
, ttm
->num_pages
);
972 DRM_ERROR("failed to unbind %lu pages at 0x%08llX\n",
973 gtt
->ttm
.ttm
.num_pages
, gtt
->offset
);
977 static void amdgpu_ttm_backend_destroy(struct ttm_tt
*ttm
)
979 struct amdgpu_ttm_tt
*gtt
= (void *)ttm
;
981 ttm_dma_tt_fini(>t
->ttm
);
985 static struct ttm_backend_func amdgpu_backend_func
= {
986 .bind
= &amdgpu_ttm_backend_bind
,
987 .unbind
= &amdgpu_ttm_backend_unbind
,
988 .destroy
= &amdgpu_ttm_backend_destroy
,
991 static struct ttm_tt
*amdgpu_ttm_tt_create(struct ttm_buffer_object
*bo
,
994 struct amdgpu_device
*adev
;
995 struct amdgpu_ttm_tt
*gtt
;
997 adev
= amdgpu_ttm_adev(bo
->bdev
);
999 gtt
= kzalloc(sizeof(struct amdgpu_ttm_tt
), GFP_KERNEL
);
1003 gtt
->ttm
.ttm
.func
= &amdgpu_backend_func
;
1004 if (ttm_sg_tt_init(>t
->ttm
, bo
, page_flags
)) {
1008 return >t
->ttm
.ttm
;
1011 static int amdgpu_ttm_tt_populate(struct ttm_tt
*ttm
,
1012 struct ttm_operation_ctx
*ctx
)
1014 struct amdgpu_device
*adev
= amdgpu_ttm_adev(ttm
->bdev
);
1015 struct amdgpu_ttm_tt
*gtt
= (void *)ttm
;
1016 bool slave
= !!(ttm
->page_flags
& TTM_PAGE_FLAG_SG
);
1018 if (gtt
&& gtt
->userptr
) {
1019 ttm
->sg
= kzalloc(sizeof(struct sg_table
), GFP_KERNEL
);
1023 ttm
->page_flags
|= TTM_PAGE_FLAG_SG
;
1024 ttm
->state
= tt_unbound
;
1028 if (slave
&& ttm
->sg
) {
1029 drm_prime_sg_to_page_addr_arrays(ttm
->sg
, ttm
->pages
,
1030 gtt
->ttm
.dma_address
,
1032 ttm
->state
= tt_unbound
;
1036 #ifdef CONFIG_SWIOTLB
1037 if (adev
->need_swiotlb
&& swiotlb_nr_tbl()) {
1038 return ttm_dma_populate(>t
->ttm
, adev
->dev
, ctx
);
1042 return ttm_populate_and_map_pages(adev
->dev
, >t
->ttm
, ctx
);
1045 static void amdgpu_ttm_tt_unpopulate(struct ttm_tt
*ttm
)
1047 struct amdgpu_device
*adev
;
1048 struct amdgpu_ttm_tt
*gtt
= (void *)ttm
;
1049 bool slave
= !!(ttm
->page_flags
& TTM_PAGE_FLAG_SG
);
1051 if (gtt
&& gtt
->userptr
) {
1052 amdgpu_ttm_tt_set_user_pages(ttm
, NULL
);
1054 ttm
->page_flags
&= ~TTM_PAGE_FLAG_SG
;
1061 adev
= amdgpu_ttm_adev(ttm
->bdev
);
1063 #ifdef CONFIG_SWIOTLB
1064 if (adev
->need_swiotlb
&& swiotlb_nr_tbl()) {
1065 ttm_dma_unpopulate(>t
->ttm
, adev
->dev
);
1070 ttm_unmap_and_unpopulate_pages(adev
->dev
, >t
->ttm
);
1073 int amdgpu_ttm_tt_set_userptr(struct ttm_tt
*ttm
, uint64_t addr
,
1076 struct amdgpu_ttm_tt
*gtt
= (void *)ttm
;
1081 gtt
->userptr
= addr
;
1082 gtt
->usermm
= current
->mm
;
1083 gtt
->userflags
= flags
;
1084 spin_lock_init(>t
->guptasklock
);
1085 INIT_LIST_HEAD(>t
->guptasks
);
1086 atomic_set(>t
->mmu_invalidations
, 0);
1087 gtt
->last_set_pages
= 0;
1092 struct mm_struct
*amdgpu_ttm_tt_get_usermm(struct ttm_tt
*ttm
)
1094 struct amdgpu_ttm_tt
*gtt
= (void *)ttm
;
1102 bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt
*ttm
, unsigned long start
,
1105 struct amdgpu_ttm_tt
*gtt
= (void *)ttm
;
1106 struct amdgpu_ttm_gup_task_list
*entry
;
1109 if (gtt
== NULL
|| !gtt
->userptr
)
1112 size
= (unsigned long)gtt
->ttm
.ttm
.num_pages
* PAGE_SIZE
;
1113 if (gtt
->userptr
> end
|| gtt
->userptr
+ size
<= start
)
1116 spin_lock(>t
->guptasklock
);
1117 list_for_each_entry(entry
, >t
->guptasks
, list
) {
1118 if (entry
->task
== current
) {
1119 spin_unlock(>t
->guptasklock
);
1123 spin_unlock(>t
->guptasklock
);
1125 atomic_inc(>t
->mmu_invalidations
);
1130 bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt
*ttm
,
1131 int *last_invalidated
)
1133 struct amdgpu_ttm_tt
*gtt
= (void *)ttm
;
1134 int prev_invalidated
= *last_invalidated
;
1136 *last_invalidated
= atomic_read(>t
->mmu_invalidations
);
1137 return prev_invalidated
!= *last_invalidated
;
1140 bool amdgpu_ttm_tt_userptr_needs_pages(struct ttm_tt
*ttm
)
1142 struct amdgpu_ttm_tt
*gtt
= (void *)ttm
;
1144 if (gtt
== NULL
|| !gtt
->userptr
)
1147 return atomic_read(>t
->mmu_invalidations
) != gtt
->last_set_pages
;
1150 bool amdgpu_ttm_tt_is_readonly(struct ttm_tt
*ttm
)
1152 struct amdgpu_ttm_tt
*gtt
= (void *)ttm
;
1157 return !!(gtt
->userflags
& AMDGPU_GEM_USERPTR_READONLY
);
1160 uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device
*adev
, struct ttm_tt
*ttm
,
1161 struct ttm_mem_reg
*mem
)
1165 if (mem
&& mem
->mem_type
!= TTM_PL_SYSTEM
)
1166 flags
|= AMDGPU_PTE_VALID
;
1168 if (mem
&& mem
->mem_type
== TTM_PL_TT
) {
1169 flags
|= AMDGPU_PTE_SYSTEM
;
1171 if (ttm
->caching_state
== tt_cached
)
1172 flags
|= AMDGPU_PTE_SNOOPED
;
1175 flags
|= adev
->gart
.gart_pte_flags
;
1176 flags
|= AMDGPU_PTE_READABLE
;
1178 if (!amdgpu_ttm_tt_is_readonly(ttm
))
1179 flags
|= AMDGPU_PTE_WRITEABLE
;
1184 static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object
*bo
,
1185 const struct ttm_place
*place
)
1187 unsigned long num_pages
= bo
->mem
.num_pages
;
1188 struct drm_mm_node
*node
= bo
->mem
.mm_node
;
1189 struct reservation_object_list
*flist
;
1190 struct dma_fence
*f
;
1193 /* If bo is a KFD BO, check if the bo belongs to the current process.
1194 * If true, then return false as any KFD process needs all its BOs to
1195 * be resident to run successfully
1197 flist
= reservation_object_get_list(bo
->resv
);
1199 for (i
= 0; i
< flist
->shared_count
; ++i
) {
1200 f
= rcu_dereference_protected(flist
->shared
[i
],
1201 reservation_object_held(bo
->resv
));
1202 if (amdkfd_fence_check_mm(f
, current
->mm
))
1207 switch (bo
->mem
.mem_type
) {
1212 /* Check each drm MM node individually */
1214 if (place
->fpfn
< (node
->start
+ node
->size
) &&
1215 !(place
->lpfn
&& place
->lpfn
<= node
->start
))
1218 num_pages
-= node
->size
;
1227 return ttm_bo_eviction_valuable(bo
, place
);
1230 static int amdgpu_ttm_access_memory(struct ttm_buffer_object
*bo
,
1231 unsigned long offset
,
1232 void *buf
, int len
, int write
)
1234 struct amdgpu_bo
*abo
= ttm_to_amdgpu_bo(bo
);
1235 struct amdgpu_device
*adev
= amdgpu_ttm_adev(abo
->tbo
.bdev
);
1236 struct drm_mm_node
*nodes
;
1240 unsigned long flags
;
1242 if (bo
->mem
.mem_type
!= TTM_PL_VRAM
)
1245 nodes
= amdgpu_find_mm_node(&abo
->tbo
.mem
, &offset
);
1246 pos
= (nodes
->start
<< PAGE_SHIFT
) + offset
;
1248 while (len
&& pos
< adev
->gmc
.mc_vram_size
) {
1249 uint64_t aligned_pos
= pos
& ~(uint64_t)3;
1250 uint32_t bytes
= 4 - (pos
& 3);
1251 uint32_t shift
= (pos
& 3) * 8;
1252 uint32_t mask
= 0xffffffff << shift
;
1255 mask
&= 0xffffffff >> (bytes
- len
) * 8;
1259 spin_lock_irqsave(&adev
->mmio_idx_lock
, flags
);
1260 WREG32_NO_KIQ(mmMM_INDEX
, ((uint32_t)aligned_pos
) | 0x80000000);
1261 WREG32_NO_KIQ(mmMM_INDEX_HI
, aligned_pos
>> 31);
1262 if (!write
|| mask
!= 0xffffffff)
1263 value
= RREG32_NO_KIQ(mmMM_DATA
);
1266 value
|= (*(uint32_t *)buf
<< shift
) & mask
;
1267 WREG32_NO_KIQ(mmMM_DATA
, value
);
1269 spin_unlock_irqrestore(&adev
->mmio_idx_lock
, flags
);
1271 value
= (value
& mask
) >> shift
;
1272 memcpy(buf
, &value
, bytes
);
1276 buf
= (uint8_t *)buf
+ bytes
;
1279 if (pos
>= (nodes
->start
+ nodes
->size
) << PAGE_SHIFT
) {
1281 pos
= (nodes
->start
<< PAGE_SHIFT
);
1288 static struct ttm_bo_driver amdgpu_bo_driver
= {
1289 .ttm_tt_create
= &amdgpu_ttm_tt_create
,
1290 .ttm_tt_populate
= &amdgpu_ttm_tt_populate
,
1291 .ttm_tt_unpopulate
= &amdgpu_ttm_tt_unpopulate
,
1292 .invalidate_caches
= &amdgpu_invalidate_caches
,
1293 .init_mem_type
= &amdgpu_init_mem_type
,
1294 .eviction_valuable
= amdgpu_ttm_bo_eviction_valuable
,
1295 .evict_flags
= &amdgpu_evict_flags
,
1296 .move
= &amdgpu_bo_move
,
1297 .verify_access
= &amdgpu_verify_access
,
1298 .move_notify
= &amdgpu_bo_move_notify
,
1299 .fault_reserve_notify
= &amdgpu_bo_fault_reserve_notify
,
1300 .io_mem_reserve
= &amdgpu_ttm_io_mem_reserve
,
1301 .io_mem_free
= &amdgpu_ttm_io_mem_free
,
1302 .io_mem_pfn
= amdgpu_ttm_io_mem_pfn
,
1303 .access_memory
= &amdgpu_ttm_access_memory
1307 * Firmware Reservation functions
1310 * amdgpu_ttm_fw_reserve_vram_fini - free fw reserved vram
1312 * @adev: amdgpu_device pointer
1314 * free fw reserved vram if it has been reserved.
1316 static void amdgpu_ttm_fw_reserve_vram_fini(struct amdgpu_device
*adev
)
1318 amdgpu_bo_free_kernel(&adev
->fw_vram_usage
.reserved_bo
,
1319 NULL
, &adev
->fw_vram_usage
.va
);
1323 * amdgpu_ttm_fw_reserve_vram_init - create bo vram reservation from fw
1325 * @adev: amdgpu_device pointer
1327 * create bo vram reservation from fw.
1329 static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device
*adev
)
1331 struct ttm_operation_ctx ctx
= { false, false };
1334 u64 vram_size
= adev
->gmc
.visible_vram_size
;
1335 u64 offset
= adev
->fw_vram_usage
.start_offset
;
1336 u64 size
= adev
->fw_vram_usage
.size
;
1337 struct amdgpu_bo
*bo
;
1339 adev
->fw_vram_usage
.va
= NULL
;
1340 adev
->fw_vram_usage
.reserved_bo
= NULL
;
1342 if (adev
->fw_vram_usage
.size
> 0 &&
1343 adev
->fw_vram_usage
.size
<= vram_size
) {
1345 r
= amdgpu_bo_create(adev
, adev
->fw_vram_usage
.size
, PAGE_SIZE
,
1346 AMDGPU_GEM_DOMAIN_VRAM
,
1347 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED
|
1348 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS
,
1349 ttm_bo_type_kernel
, NULL
,
1350 &adev
->fw_vram_usage
.reserved_bo
);
1354 r
= amdgpu_bo_reserve(adev
->fw_vram_usage
.reserved_bo
, false);
1358 /* remove the original mem node and create a new one at the
1361 bo
= adev
->fw_vram_usage
.reserved_bo
;
1362 offset
= ALIGN(offset
, PAGE_SIZE
);
1363 for (i
= 0; i
< bo
->placement
.num_placement
; ++i
) {
1364 bo
->placements
[i
].fpfn
= offset
>> PAGE_SHIFT
;
1365 bo
->placements
[i
].lpfn
= (offset
+ size
) >> PAGE_SHIFT
;
1368 ttm_bo_mem_put(&bo
->tbo
, &bo
->tbo
.mem
);
1369 r
= ttm_bo_mem_space(&bo
->tbo
, &bo
->placement
,
1370 &bo
->tbo
.mem
, &ctx
);
1374 r
= amdgpu_bo_pin_restricted(adev
->fw_vram_usage
.reserved_bo
,
1375 AMDGPU_GEM_DOMAIN_VRAM
,
1376 adev
->fw_vram_usage
.start_offset
,
1377 (adev
->fw_vram_usage
.start_offset
+
1378 adev
->fw_vram_usage
.size
), NULL
);
1381 r
= amdgpu_bo_kmap(adev
->fw_vram_usage
.reserved_bo
,
1382 &adev
->fw_vram_usage
.va
);
1386 amdgpu_bo_unreserve(adev
->fw_vram_usage
.reserved_bo
);
1391 amdgpu_bo_unpin(adev
->fw_vram_usage
.reserved_bo
);
1393 amdgpu_bo_unreserve(adev
->fw_vram_usage
.reserved_bo
);
1395 amdgpu_bo_unref(&adev
->fw_vram_usage
.reserved_bo
);
1397 adev
->fw_vram_usage
.va
= NULL
;
1398 adev
->fw_vram_usage
.reserved_bo
= NULL
;
1402 int amdgpu_ttm_init(struct amdgpu_device
*adev
)
1408 r
= amdgpu_ttm_global_init(adev
);
1412 /* No others user of address space so set it to 0 */
1413 r
= ttm_bo_device_init(&adev
->mman
.bdev
,
1414 adev
->mman
.bo_global_ref
.ref
.object
,
1416 adev
->ddev
->anon_inode
->i_mapping
,
1417 DRM_FILE_PAGE_OFFSET
,
1420 DRM_ERROR("failed initializing buffer object driver(%d).\n", r
);
1423 adev
->mman
.initialized
= true;
1425 /* We opt to avoid OOM on system pages allocations */
1426 adev
->mman
.bdev
.no_retry
= true;
1428 r
= ttm_bo_init_mm(&adev
->mman
.bdev
, TTM_PL_VRAM
,
1429 adev
->gmc
.real_vram_size
>> PAGE_SHIFT
);
1431 DRM_ERROR("Failed initializing VRAM heap.\n");
1435 /* Reduce size of CPU-visible VRAM if requested */
1436 vis_vram_limit
= (u64
)amdgpu_vis_vram_limit
* 1024 * 1024;
1437 if (amdgpu_vis_vram_limit
> 0 &&
1438 vis_vram_limit
<= adev
->gmc
.visible_vram_size
)
1439 adev
->gmc
.visible_vram_size
= vis_vram_limit
;
1441 /* Change the size here instead of the init above so only lpfn is affected */
1442 amdgpu_ttm_set_buffer_funcs_status(adev
, false);
1444 adev
->mman
.aper_base_kaddr
= ioremap_wc(adev
->gmc
.aper_base
,
1445 adev
->gmc
.visible_vram_size
);
1449 *The reserved vram for firmware must be pinned to the specified
1450 *place on the VRAM, so reserve it early.
1452 r
= amdgpu_ttm_fw_reserve_vram_init(adev
);
1457 r
= amdgpu_bo_create_kernel(adev
, adev
->gmc
.stolen_size
, PAGE_SIZE
,
1458 AMDGPU_GEM_DOMAIN_VRAM
,
1459 &adev
->stolen_vga_memory
,
1463 DRM_INFO("amdgpu: %uM of VRAM memory ready\n",
1464 (unsigned) (adev
->gmc
.real_vram_size
/ (1024 * 1024)));
1466 if (amdgpu_gtt_size
== -1) {
1470 gtt_size
= min(max((AMDGPU_DEFAULT_GTT_SIZE_MB
<< 20),
1471 adev
->gmc
.mc_vram_size
),
1472 ((uint64_t)si
.totalram
* si
.mem_unit
* 3/4));
1475 gtt_size
= (uint64_t)amdgpu_gtt_size
<< 20;
1476 r
= ttm_bo_init_mm(&adev
->mman
.bdev
, TTM_PL_TT
, gtt_size
>> PAGE_SHIFT
);
1478 DRM_ERROR("Failed initializing GTT heap.\n");
1481 DRM_INFO("amdgpu: %uM of GTT memory ready.\n",
1482 (unsigned)(gtt_size
/ (1024 * 1024)));
1484 adev
->gds
.mem
.total_size
= adev
->gds
.mem
.total_size
<< AMDGPU_GDS_SHIFT
;
1485 adev
->gds
.mem
.gfx_partition_size
= adev
->gds
.mem
.gfx_partition_size
<< AMDGPU_GDS_SHIFT
;
1486 adev
->gds
.mem
.cs_partition_size
= adev
->gds
.mem
.cs_partition_size
<< AMDGPU_GDS_SHIFT
;
1487 adev
->gds
.gws
.total_size
= adev
->gds
.gws
.total_size
<< AMDGPU_GWS_SHIFT
;
1488 adev
->gds
.gws
.gfx_partition_size
= adev
->gds
.gws
.gfx_partition_size
<< AMDGPU_GWS_SHIFT
;
1489 adev
->gds
.gws
.cs_partition_size
= adev
->gds
.gws
.cs_partition_size
<< AMDGPU_GWS_SHIFT
;
1490 adev
->gds
.oa
.total_size
= adev
->gds
.oa
.total_size
<< AMDGPU_OA_SHIFT
;
1491 adev
->gds
.oa
.gfx_partition_size
= adev
->gds
.oa
.gfx_partition_size
<< AMDGPU_OA_SHIFT
;
1492 adev
->gds
.oa
.cs_partition_size
= adev
->gds
.oa
.cs_partition_size
<< AMDGPU_OA_SHIFT
;
1494 if (adev
->gds
.mem
.total_size
) {
1495 r
= ttm_bo_init_mm(&adev
->mman
.bdev
, AMDGPU_PL_GDS
,
1496 adev
->gds
.mem
.total_size
>> PAGE_SHIFT
);
1498 DRM_ERROR("Failed initializing GDS heap.\n");
1504 if (adev
->gds
.gws
.total_size
) {
1505 r
= ttm_bo_init_mm(&adev
->mman
.bdev
, AMDGPU_PL_GWS
,
1506 adev
->gds
.gws
.total_size
>> PAGE_SHIFT
);
1508 DRM_ERROR("Failed initializing gws heap.\n");
1514 if (adev
->gds
.oa
.total_size
) {
1515 r
= ttm_bo_init_mm(&adev
->mman
.bdev
, AMDGPU_PL_OA
,
1516 adev
->gds
.oa
.total_size
>> PAGE_SHIFT
);
1518 DRM_ERROR("Failed initializing oa heap.\n");
1523 r
= amdgpu_ttm_debugfs_init(adev
);
1525 DRM_ERROR("Failed to init debugfs\n");
1531 void amdgpu_ttm_fini(struct amdgpu_device
*adev
)
1533 if (!adev
->mman
.initialized
)
1536 amdgpu_ttm_debugfs_fini(adev
);
1537 amdgpu_bo_free_kernel(&adev
->stolen_vga_memory
, NULL
, NULL
);
1538 amdgpu_ttm_fw_reserve_vram_fini(adev
);
1539 if (adev
->mman
.aper_base_kaddr
)
1540 iounmap(adev
->mman
.aper_base_kaddr
);
1541 adev
->mman
.aper_base_kaddr
= NULL
;
1543 ttm_bo_clean_mm(&adev
->mman
.bdev
, TTM_PL_VRAM
);
1544 ttm_bo_clean_mm(&adev
->mman
.bdev
, TTM_PL_TT
);
1545 if (adev
->gds
.mem
.total_size
)
1546 ttm_bo_clean_mm(&adev
->mman
.bdev
, AMDGPU_PL_GDS
);
1547 if (adev
->gds
.gws
.total_size
)
1548 ttm_bo_clean_mm(&adev
->mman
.bdev
, AMDGPU_PL_GWS
);
1549 if (adev
->gds
.oa
.total_size
)
1550 ttm_bo_clean_mm(&adev
->mman
.bdev
, AMDGPU_PL_OA
);
1551 ttm_bo_device_release(&adev
->mman
.bdev
);
1552 amdgpu_ttm_global_fini(adev
);
1553 adev
->mman
.initialized
= false;
1554 DRM_INFO("amdgpu: ttm finalized\n");
1558 * amdgpu_ttm_set_buffer_funcs_status - enable/disable use of buffer functions
1560 * @adev: amdgpu_device pointer
1561 * @enable: true when we can use buffer functions.
1563 * Enable/disable use of buffer functions during suspend/resume. This should
1564 * only be called at bootup or when userspace isn't running.
1566 void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device
*adev
, bool enable
)
1568 struct ttm_mem_type_manager
*man
= &adev
->mman
.bdev
.man
[TTM_PL_VRAM
];
1571 if (!adev
->mman
.initialized
|| adev
->in_gpu_reset
)
1574 /* this just adjusts TTM size idea, which sets lpfn to the correct value */
1576 size
= adev
->gmc
.real_vram_size
;
1578 size
= adev
->gmc
.visible_vram_size
;
1579 man
->size
= size
>> PAGE_SHIFT
;
1580 adev
->mman
.buffer_funcs_enabled
= enable
;
1583 int amdgpu_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
1585 struct drm_file
*file_priv
;
1586 struct amdgpu_device
*adev
;
1588 if (unlikely(vma
->vm_pgoff
< DRM_FILE_PAGE_OFFSET
))
1591 file_priv
= filp
->private_data
;
1592 adev
= file_priv
->minor
->dev
->dev_private
;
1596 return ttm_bo_mmap(filp
, vma
, &adev
->mman
.bdev
);
1599 static int amdgpu_map_buffer(struct ttm_buffer_object
*bo
,
1600 struct ttm_mem_reg
*mem
, unsigned num_pages
,
1601 uint64_t offset
, unsigned window
,
1602 struct amdgpu_ring
*ring
,
1605 struct amdgpu_ttm_tt
*gtt
= (void *)bo
->ttm
;
1606 struct amdgpu_device
*adev
= ring
->adev
;
1607 struct ttm_tt
*ttm
= bo
->ttm
;
1608 struct amdgpu_job
*job
;
1609 unsigned num_dw
, num_bytes
;
1610 dma_addr_t
*dma_address
;
1611 struct dma_fence
*fence
;
1612 uint64_t src_addr
, dst_addr
;
1616 BUG_ON(adev
->mman
.buffer_funcs
->copy_max_bytes
<
1617 AMDGPU_GTT_MAX_TRANSFER_SIZE
* 8);
1619 *addr
= adev
->gmc
.gart_start
;
1620 *addr
+= (u64
)window
* AMDGPU_GTT_MAX_TRANSFER_SIZE
*
1621 AMDGPU_GPU_PAGE_SIZE
;
1623 num_dw
= adev
->mman
.buffer_funcs
->copy_num_dw
;
1624 while (num_dw
& 0x7)
1627 num_bytes
= num_pages
* 8;
1629 r
= amdgpu_job_alloc_with_ib(adev
, num_dw
* 4 + num_bytes
, &job
);
1633 src_addr
= num_dw
* 4;
1634 src_addr
+= job
->ibs
[0].gpu_addr
;
1636 dst_addr
= adev
->gart
.table_addr
;
1637 dst_addr
+= window
* AMDGPU_GTT_MAX_TRANSFER_SIZE
* 8;
1638 amdgpu_emit_copy_buffer(adev
, &job
->ibs
[0], src_addr
,
1639 dst_addr
, num_bytes
);
1641 amdgpu_ring_pad_ib(ring
, &job
->ibs
[0]);
1642 WARN_ON(job
->ibs
[0].length_dw
> num_dw
);
1644 dma_address
= >t
->ttm
.dma_address
[offset
>> PAGE_SHIFT
];
1645 flags
= amdgpu_ttm_tt_pte_flags(adev
, ttm
, mem
);
1646 r
= amdgpu_gart_map(adev
, 0, num_pages
, dma_address
, flags
,
1647 &job
->ibs
[0].ptr
[num_dw
]);
1651 r
= amdgpu_job_submit(job
, ring
, &adev
->mman
.entity
,
1652 AMDGPU_FENCE_OWNER_UNDEFINED
, &fence
);
1656 dma_fence_put(fence
);
1661 amdgpu_job_free(job
);
1665 int amdgpu_copy_buffer(struct amdgpu_ring
*ring
, uint64_t src_offset
,
1666 uint64_t dst_offset
, uint32_t byte_count
,
1667 struct reservation_object
*resv
,
1668 struct dma_fence
**fence
, bool direct_submit
,
1669 bool vm_needs_flush
)
1671 struct amdgpu_device
*adev
= ring
->adev
;
1672 struct amdgpu_job
*job
;
1675 unsigned num_loops
, num_dw
;
1679 if (direct_submit
&& !ring
->ready
) {
1680 DRM_ERROR("Trying to move memory with ring turned off.\n");
1684 max_bytes
= adev
->mman
.buffer_funcs
->copy_max_bytes
;
1685 num_loops
= DIV_ROUND_UP(byte_count
, max_bytes
);
1686 num_dw
= num_loops
* adev
->mman
.buffer_funcs
->copy_num_dw
;
1688 /* for IB padding */
1689 while (num_dw
& 0x7)
1692 r
= amdgpu_job_alloc_with_ib(adev
, num_dw
* 4, &job
);
1696 job
->vm_needs_flush
= vm_needs_flush
;
1698 r
= amdgpu_sync_resv(adev
, &job
->sync
, resv
,
1699 AMDGPU_FENCE_OWNER_UNDEFINED
,
1702 DRM_ERROR("sync failed (%d).\n", r
);
1707 for (i
= 0; i
< num_loops
; i
++) {
1708 uint32_t cur_size_in_bytes
= min(byte_count
, max_bytes
);
1710 amdgpu_emit_copy_buffer(adev
, &job
->ibs
[0], src_offset
,
1711 dst_offset
, cur_size_in_bytes
);
1713 src_offset
+= cur_size_in_bytes
;
1714 dst_offset
+= cur_size_in_bytes
;
1715 byte_count
-= cur_size_in_bytes
;
1718 amdgpu_ring_pad_ib(ring
, &job
->ibs
[0]);
1719 WARN_ON(job
->ibs
[0].length_dw
> num_dw
);
1720 if (direct_submit
) {
1721 r
= amdgpu_ib_schedule(ring
, job
->num_ibs
, job
->ibs
,
1723 job
->fence
= dma_fence_get(*fence
);
1725 DRM_ERROR("Error scheduling IBs (%d)\n", r
);
1726 amdgpu_job_free(job
);
1728 r
= amdgpu_job_submit(job
, ring
, &adev
->mman
.entity
,
1729 AMDGPU_FENCE_OWNER_UNDEFINED
, fence
);
1737 amdgpu_job_free(job
);
1741 int amdgpu_fill_buffer(struct amdgpu_bo
*bo
,
1743 struct reservation_object
*resv
,
1744 struct dma_fence
**fence
)
1746 struct amdgpu_device
*adev
= amdgpu_ttm_adev(bo
->tbo
.bdev
);
1747 uint32_t max_bytes
= adev
->mman
.buffer_funcs
->fill_max_bytes
;
1748 struct amdgpu_ring
*ring
= adev
->mman
.buffer_funcs_ring
;
1750 struct drm_mm_node
*mm_node
;
1751 unsigned long num_pages
;
1752 unsigned int num_loops
, num_dw
;
1754 struct amdgpu_job
*job
;
1757 if (!adev
->mman
.buffer_funcs_enabled
) {
1758 DRM_ERROR("Trying to clear memory with ring turned off.\n");
1762 if (bo
->tbo
.mem
.mem_type
== TTM_PL_TT
) {
1763 r
= amdgpu_ttm_alloc_gart(&bo
->tbo
);
1768 num_pages
= bo
->tbo
.num_pages
;
1769 mm_node
= bo
->tbo
.mem
.mm_node
;
1772 uint32_t byte_count
= mm_node
->size
<< PAGE_SHIFT
;
1774 num_loops
+= DIV_ROUND_UP(byte_count
, max_bytes
);
1775 num_pages
-= mm_node
->size
;
1778 num_dw
= num_loops
* adev
->mman
.buffer_funcs
->fill_num_dw
;
1780 /* for IB padding */
1783 r
= amdgpu_job_alloc_with_ib(adev
, num_dw
* 4, &job
);
1788 r
= amdgpu_sync_resv(adev
, &job
->sync
, resv
,
1789 AMDGPU_FENCE_OWNER_UNDEFINED
, false);
1791 DRM_ERROR("sync failed (%d).\n", r
);
1796 num_pages
= bo
->tbo
.num_pages
;
1797 mm_node
= bo
->tbo
.mem
.mm_node
;
1800 uint32_t byte_count
= mm_node
->size
<< PAGE_SHIFT
;
1803 dst_addr
= amdgpu_mm_node_addr(&bo
->tbo
, mm_node
, &bo
->tbo
.mem
);
1804 while (byte_count
) {
1805 uint32_t cur_size_in_bytes
= min(byte_count
, max_bytes
);
1807 amdgpu_emit_fill_buffer(adev
, &job
->ibs
[0], src_data
,
1808 dst_addr
, cur_size_in_bytes
);
1810 dst_addr
+= cur_size_in_bytes
;
1811 byte_count
-= cur_size_in_bytes
;
1814 num_pages
-= mm_node
->size
;
1818 amdgpu_ring_pad_ib(ring
, &job
->ibs
[0]);
1819 WARN_ON(job
->ibs
[0].length_dw
> num_dw
);
1820 r
= amdgpu_job_submit(job
, ring
, &adev
->mman
.entity
,
1821 AMDGPU_FENCE_OWNER_UNDEFINED
, fence
);
1828 amdgpu_job_free(job
);
1832 #if defined(CONFIG_DEBUG_FS)
1834 static int amdgpu_mm_dump_table(struct seq_file
*m
, void *data
)
1836 struct drm_info_node
*node
= (struct drm_info_node
*)m
->private;
1837 unsigned ttm_pl
= *(int *)node
->info_ent
->data
;
1838 struct drm_device
*dev
= node
->minor
->dev
;
1839 struct amdgpu_device
*adev
= dev
->dev_private
;
1840 struct ttm_mem_type_manager
*man
= &adev
->mman
.bdev
.man
[ttm_pl
];
1841 struct drm_printer p
= drm_seq_file_printer(m
);
1843 man
->func
->debug(man
, &p
);
1847 static int ttm_pl_vram
= TTM_PL_VRAM
;
1848 static int ttm_pl_tt
= TTM_PL_TT
;
1850 static const struct drm_info_list amdgpu_ttm_debugfs_list
[] = {
1851 {"amdgpu_vram_mm", amdgpu_mm_dump_table
, 0, &ttm_pl_vram
},
1852 {"amdgpu_gtt_mm", amdgpu_mm_dump_table
, 0, &ttm_pl_tt
},
1853 {"ttm_page_pool", ttm_page_alloc_debugfs
, 0, NULL
},
1854 #ifdef CONFIG_SWIOTLB
1855 {"ttm_dma_page_pool", ttm_dma_page_alloc_debugfs
, 0, NULL
}
1859 static ssize_t
amdgpu_ttm_vram_read(struct file
*f
, char __user
*buf
,
1860 size_t size
, loff_t
*pos
)
1862 struct amdgpu_device
*adev
= file_inode(f
)->i_private
;
1866 if (size
& 0x3 || *pos
& 0x3)
1869 if (*pos
>= adev
->gmc
.mc_vram_size
)
1873 unsigned long flags
;
1876 if (*pos
>= adev
->gmc
.mc_vram_size
)
1879 spin_lock_irqsave(&adev
->mmio_idx_lock
, flags
);
1880 WREG32_NO_KIQ(mmMM_INDEX
, ((uint32_t)*pos
) | 0x80000000);
1881 WREG32_NO_KIQ(mmMM_INDEX_HI
, *pos
>> 31);
1882 value
= RREG32_NO_KIQ(mmMM_DATA
);
1883 spin_unlock_irqrestore(&adev
->mmio_idx_lock
, flags
);
1885 r
= put_user(value
, (uint32_t *)buf
);
1898 static ssize_t
amdgpu_ttm_vram_write(struct file
*f
, const char __user
*buf
,
1899 size_t size
, loff_t
*pos
)
1901 struct amdgpu_device
*adev
= file_inode(f
)->i_private
;
1905 if (size
& 0x3 || *pos
& 0x3)
1908 if (*pos
>= adev
->gmc
.mc_vram_size
)
1912 unsigned long flags
;
1915 if (*pos
>= adev
->gmc
.mc_vram_size
)
1918 r
= get_user(value
, (uint32_t *)buf
);
1922 spin_lock_irqsave(&adev
->mmio_idx_lock
, flags
);
1923 WREG32_NO_KIQ(mmMM_INDEX
, ((uint32_t)*pos
) | 0x80000000);
1924 WREG32_NO_KIQ(mmMM_INDEX_HI
, *pos
>> 31);
1925 WREG32_NO_KIQ(mmMM_DATA
, value
);
1926 spin_unlock_irqrestore(&adev
->mmio_idx_lock
, flags
);
1937 static const struct file_operations amdgpu_ttm_vram_fops
= {
1938 .owner
= THIS_MODULE
,
1939 .read
= amdgpu_ttm_vram_read
,
1940 .write
= amdgpu_ttm_vram_write
,
1941 .llseek
= default_llseek
,
1944 #ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
1946 static ssize_t
amdgpu_ttm_gtt_read(struct file
*f
, char __user
*buf
,
1947 size_t size
, loff_t
*pos
)
1949 struct amdgpu_device
*adev
= file_inode(f
)->i_private
;
1954 loff_t p
= *pos
/ PAGE_SIZE
;
1955 unsigned off
= *pos
& ~PAGE_MASK
;
1956 size_t cur_size
= min_t(size_t, size
, PAGE_SIZE
- off
);
1960 if (p
>= adev
->gart
.num_cpu_pages
)
1963 page
= adev
->gart
.pages
[p
];
1968 r
= copy_to_user(buf
, ptr
, cur_size
);
1969 kunmap(adev
->gart
.pages
[p
]);
1971 r
= clear_user(buf
, cur_size
);
1985 static const struct file_operations amdgpu_ttm_gtt_fops
= {
1986 .owner
= THIS_MODULE
,
1987 .read
= amdgpu_ttm_gtt_read
,
1988 .llseek
= default_llseek
1993 static ssize_t
amdgpu_iomem_read(struct file
*f
, char __user
*buf
,
1994 size_t size
, loff_t
*pos
)
1996 struct amdgpu_device
*adev
= file_inode(f
)->i_private
;
1997 struct iommu_domain
*dom
;
2001 dom
= iommu_get_domain_for_dev(adev
->dev
);
2004 phys_addr_t addr
= *pos
& PAGE_MASK
;
2005 loff_t off
= *pos
& ~PAGE_MASK
;
2006 size_t bytes
= PAGE_SIZE
- off
;
2011 bytes
= bytes
< size
? bytes
: size
;
2013 addr
= dom
? iommu_iova_to_phys(dom
, addr
) : addr
;
2015 pfn
= addr
>> PAGE_SHIFT
;
2016 if (!pfn_valid(pfn
))
2019 p
= pfn_to_page(pfn
);
2020 if (p
->mapping
!= adev
->mman
.bdev
.dev_mapping
)
2024 r
= copy_to_user(buf
, ptr
+ off
, bytes
);
2037 static ssize_t
amdgpu_iomem_write(struct file
*f
, const char __user
*buf
,
2038 size_t size
, loff_t
*pos
)
2040 struct amdgpu_device
*adev
= file_inode(f
)->i_private
;
2041 struct iommu_domain
*dom
;
2045 dom
= iommu_get_domain_for_dev(adev
->dev
);
2048 phys_addr_t addr
= *pos
& PAGE_MASK
;
2049 loff_t off
= *pos
& ~PAGE_MASK
;
2050 size_t bytes
= PAGE_SIZE
- off
;
2055 bytes
= bytes
< size
? bytes
: size
;
2057 addr
= dom
? iommu_iova_to_phys(dom
, addr
) : addr
;
2059 pfn
= addr
>> PAGE_SHIFT
;
2060 if (!pfn_valid(pfn
))
2063 p
= pfn_to_page(pfn
);
2064 if (p
->mapping
!= adev
->mman
.bdev
.dev_mapping
)
2068 r
= copy_from_user(ptr
+ off
, buf
, bytes
);
2081 static const struct file_operations amdgpu_ttm_iomem_fops
= {
2082 .owner
= THIS_MODULE
,
2083 .read
= amdgpu_iomem_read
,
2084 .write
= amdgpu_iomem_write
,
2085 .llseek
= default_llseek
2088 static const struct {
2090 const struct file_operations
*fops
;
2092 } ttm_debugfs_entries
[] = {
2093 { "amdgpu_vram", &amdgpu_ttm_vram_fops
, TTM_PL_VRAM
},
2094 #ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
2095 { "amdgpu_gtt", &amdgpu_ttm_gtt_fops
, TTM_PL_TT
},
2097 { "amdgpu_iomem", &amdgpu_ttm_iomem_fops
, TTM_PL_SYSTEM
},
2102 static int amdgpu_ttm_debugfs_init(struct amdgpu_device
*adev
)
2104 #if defined(CONFIG_DEBUG_FS)
2107 struct drm_minor
*minor
= adev
->ddev
->primary
;
2108 struct dentry
*ent
, *root
= minor
->debugfs_root
;
2110 for (count
= 0; count
< ARRAY_SIZE(ttm_debugfs_entries
); count
++) {
2111 ent
= debugfs_create_file(
2112 ttm_debugfs_entries
[count
].name
,
2113 S_IFREG
| S_IRUGO
, root
,
2115 ttm_debugfs_entries
[count
].fops
);
2117 return PTR_ERR(ent
);
2118 if (ttm_debugfs_entries
[count
].domain
== TTM_PL_VRAM
)
2119 i_size_write(ent
->d_inode
, adev
->gmc
.mc_vram_size
);
2120 else if (ttm_debugfs_entries
[count
].domain
== TTM_PL_TT
)
2121 i_size_write(ent
->d_inode
, adev
->gmc
.gart_size
);
2122 adev
->mman
.debugfs_entries
[count
] = ent
;
2125 count
= ARRAY_SIZE(amdgpu_ttm_debugfs_list
);
2127 #ifdef CONFIG_SWIOTLB
2128 if (!(adev
->need_swiotlb
&& swiotlb_nr_tbl()))
2132 return amdgpu_debugfs_add_files(adev
, amdgpu_ttm_debugfs_list
, count
);
2138 static void amdgpu_ttm_debugfs_fini(struct amdgpu_device
*adev
)
2140 #if defined(CONFIG_DEBUG_FS)
2143 for (i
= 0; i
< ARRAY_SIZE(ttm_debugfs_entries
); i
++)
2144 debugfs_remove(adev
->mman
.debugfs_entries
[i
]);