2 * Copyright 2009 Jerome Glisse.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
28 * Jerome Glisse <glisse@freedesktop.org>
29 * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
33 #include <linux/dma-mapping.h>
34 #include <linux/iommu.h>
35 #include <linux/hmm.h>
36 #include <linux/pagemap.h>
37 #include <linux/sched/task.h>
38 #include <linux/seq_file.h>
39 #include <linux/slab.h>
40 #include <linux/swap.h>
41 #include <linux/swiotlb.h>
43 #include <drm/ttm/ttm_bo_api.h>
44 #include <drm/ttm/ttm_bo_driver.h>
45 #include <drm/ttm/ttm_placement.h>
46 #include <drm/ttm/ttm_module.h>
47 #include <drm/ttm/ttm_page_alloc.h>
49 #include <drm/drm_debugfs.h>
50 #include <drm/amdgpu_drm.h>
53 #include "amdgpu_object.h"
54 #include "amdgpu_trace.h"
55 #include "amdgpu_amdkfd.h"
56 #include "amdgpu_sdma.h"
57 #include "bif/bif_4_1_d.h"
59 static int amdgpu_map_buffer(struct ttm_buffer_object
*bo
,
60 struct ttm_mem_reg
*mem
, unsigned num_pages
,
61 uint64_t offset
, unsigned window
,
62 struct amdgpu_ring
*ring
,
65 static int amdgpu_ttm_debugfs_init(struct amdgpu_device
*adev
);
66 static void amdgpu_ttm_debugfs_fini(struct amdgpu_device
*adev
);
68 static int amdgpu_invalidate_caches(struct ttm_bo_device
*bdev
, uint32_t flags
)
74 * amdgpu_init_mem_type - Initialize a memory manager for a specific type of
77 * @bdev: The TTM BO device object (contains a reference to amdgpu_device)
78 * @type: The type of memory requested
79 * @man: The memory type manager for each domain
81 * This is called by ttm_bo_init_mm() when a buffer object is being
84 static int amdgpu_init_mem_type(struct ttm_bo_device
*bdev
, uint32_t type
,
85 struct ttm_mem_type_manager
*man
)
87 struct amdgpu_device
*adev
;
89 adev
= amdgpu_ttm_adev(bdev
);
94 man
->flags
= TTM_MEMTYPE_FLAG_MAPPABLE
;
95 man
->available_caching
= TTM_PL_MASK_CACHING
;
96 man
->default_caching
= TTM_PL_FLAG_CACHED
;
100 man
->func
= &amdgpu_gtt_mgr_func
;
101 man
->gpu_offset
= adev
->gmc
.gart_start
;
102 man
->available_caching
= TTM_PL_MASK_CACHING
;
103 man
->default_caching
= TTM_PL_FLAG_CACHED
;
104 man
->flags
= TTM_MEMTYPE_FLAG_MAPPABLE
| TTM_MEMTYPE_FLAG_CMA
;
107 /* "On-card" video ram */
108 man
->func
= &amdgpu_vram_mgr_func
;
109 man
->gpu_offset
= adev
->gmc
.vram_start
;
110 man
->flags
= TTM_MEMTYPE_FLAG_FIXED
|
111 TTM_MEMTYPE_FLAG_MAPPABLE
;
112 man
->available_caching
= TTM_PL_FLAG_UNCACHED
| TTM_PL_FLAG_WC
;
113 man
->default_caching
= TTM_PL_FLAG_WC
;
118 /* On-chip GDS memory*/
119 man
->func
= &ttm_bo_manager_func
;
121 man
->flags
= TTM_MEMTYPE_FLAG_FIXED
| TTM_MEMTYPE_FLAG_CMA
;
122 man
->available_caching
= TTM_PL_FLAG_UNCACHED
;
123 man
->default_caching
= TTM_PL_FLAG_UNCACHED
;
126 DRM_ERROR("Unsupported memory type %u\n", (unsigned)type
);
133 * amdgpu_evict_flags - Compute placement flags
135 * @bo: The buffer object to evict
136 * @placement: Possible destination(s) for evicted BO
138 * Fill in placement data when ttm_bo_evict() is called
140 static void amdgpu_evict_flags(struct ttm_buffer_object
*bo
,
141 struct ttm_placement
*placement
)
143 struct amdgpu_device
*adev
= amdgpu_ttm_adev(bo
->bdev
);
144 struct amdgpu_bo
*abo
;
145 static const struct ttm_place placements
= {
148 .flags
= TTM_PL_MASK_CACHING
| TTM_PL_FLAG_SYSTEM
151 /* Don't handle scatter gather BOs */
152 if (bo
->type
== ttm_bo_type_sg
) {
153 placement
->num_placement
= 0;
154 placement
->num_busy_placement
= 0;
158 /* Object isn't an AMDGPU object so ignore */
159 if (!amdgpu_bo_is_amdgpu_bo(bo
)) {
160 placement
->placement
= &placements
;
161 placement
->busy_placement
= &placements
;
162 placement
->num_placement
= 1;
163 placement
->num_busy_placement
= 1;
167 abo
= ttm_to_amdgpu_bo(bo
);
168 switch (bo
->mem
.mem_type
) {
172 placement
->num_placement
= 0;
173 placement
->num_busy_placement
= 0;
177 if (!adev
->mman
.buffer_funcs_enabled
) {
178 /* Move to system memory */
179 amdgpu_bo_placement_from_domain(abo
, AMDGPU_GEM_DOMAIN_CPU
);
180 } else if (!amdgpu_gmc_vram_full_visible(&adev
->gmc
) &&
181 !(abo
->flags
& AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED
) &&
182 amdgpu_bo_in_cpu_visible_vram(abo
)) {
184 /* Try evicting to the CPU inaccessible part of VRAM
185 * first, but only set GTT as busy placement, so this
186 * BO will be evicted to GTT rather than causing other
187 * BOs to be evicted from VRAM
189 amdgpu_bo_placement_from_domain(abo
, AMDGPU_GEM_DOMAIN_VRAM
|
190 AMDGPU_GEM_DOMAIN_GTT
);
191 abo
->placements
[0].fpfn
= adev
->gmc
.visible_vram_size
>> PAGE_SHIFT
;
192 abo
->placements
[0].lpfn
= 0;
193 abo
->placement
.busy_placement
= &abo
->placements
[1];
194 abo
->placement
.num_busy_placement
= 1;
196 /* Move to GTT memory */
197 amdgpu_bo_placement_from_domain(abo
, AMDGPU_GEM_DOMAIN_GTT
);
202 amdgpu_bo_placement_from_domain(abo
, AMDGPU_GEM_DOMAIN_CPU
);
205 *placement
= abo
->placement
;
209 * amdgpu_verify_access - Verify access for a mmap call
211 * @bo: The buffer object to map
212 * @filp: The file pointer from the process performing the mmap
214 * This is called by ttm_bo_mmap() to verify whether a process
215 * has the right to mmap a BO to their process space.
217 static int amdgpu_verify_access(struct ttm_buffer_object
*bo
, struct file
*filp
)
219 struct amdgpu_bo
*abo
= ttm_to_amdgpu_bo(bo
);
222 * Don't verify access for KFD BOs. They don't have a GEM
223 * object associated with them.
228 if (amdgpu_ttm_tt_get_usermm(bo
->ttm
))
230 return drm_vma_node_verify_access(&abo
->gem_base
.vma_node
,
235 * amdgpu_move_null - Register memory for a buffer object
237 * @bo: The bo to assign the memory to
238 * @new_mem: The memory to be assigned.
240 * Assign the memory from new_mem to the memory of the buffer object bo.
242 static void amdgpu_move_null(struct ttm_buffer_object
*bo
,
243 struct ttm_mem_reg
*new_mem
)
245 struct ttm_mem_reg
*old_mem
= &bo
->mem
;
247 BUG_ON(old_mem
->mm_node
!= NULL
);
249 new_mem
->mm_node
= NULL
;
253 * amdgpu_mm_node_addr - Compute the GPU relative offset of a GTT buffer.
255 * @bo: The bo to assign the memory to.
256 * @mm_node: Memory manager node for drm allocator.
257 * @mem: The region where the bo resides.
260 static uint64_t amdgpu_mm_node_addr(struct ttm_buffer_object
*bo
,
261 struct drm_mm_node
*mm_node
,
262 struct ttm_mem_reg
*mem
)
266 if (mm_node
->start
!= AMDGPU_BO_INVALID_OFFSET
) {
267 addr
= mm_node
->start
<< PAGE_SHIFT
;
268 addr
+= bo
->bdev
->man
[mem
->mem_type
].gpu_offset
;
274 * amdgpu_find_mm_node - Helper function finds the drm_mm_node corresponding to
275 * @offset. It also modifies the offset to be within the drm_mm_node returned
277 * @mem: The region where the bo resides.
278 * @offset: The offset that drm_mm_node is used for finding.
281 static struct drm_mm_node
*amdgpu_find_mm_node(struct ttm_mem_reg
*mem
,
282 unsigned long *offset
)
284 struct drm_mm_node
*mm_node
= mem
->mm_node
;
286 while (*offset
>= (mm_node
->size
<< PAGE_SHIFT
)) {
287 *offset
-= (mm_node
->size
<< PAGE_SHIFT
);
294 * amdgpu_copy_ttm_mem_to_mem - Helper function for copy
296 * The function copies @size bytes from {src->mem + src->offset} to
297 * {dst->mem + dst->offset}. src->bo and dst->bo could be same BO for a
298 * move and different for a BO to BO copy.
300 * @f: Returns the last fence if multiple jobs are submitted.
302 int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device
*adev
,
303 struct amdgpu_copy_mem
*src
,
304 struct amdgpu_copy_mem
*dst
,
306 struct reservation_object
*resv
,
307 struct dma_fence
**f
)
309 struct amdgpu_ring
*ring
= adev
->mman
.buffer_funcs_ring
;
310 struct drm_mm_node
*src_mm
, *dst_mm
;
311 uint64_t src_node_start
, dst_node_start
, src_node_size
,
312 dst_node_size
, src_page_offset
, dst_page_offset
;
313 struct dma_fence
*fence
= NULL
;
315 const uint64_t GTT_MAX_BYTES
= (AMDGPU_GTT_MAX_TRANSFER_SIZE
*
316 AMDGPU_GPU_PAGE_SIZE
);
318 if (!adev
->mman
.buffer_funcs_enabled
) {
319 DRM_ERROR("Trying to move memory with ring turned off.\n");
323 src_mm
= amdgpu_find_mm_node(src
->mem
, &src
->offset
);
324 src_node_start
= amdgpu_mm_node_addr(src
->bo
, src_mm
, src
->mem
) +
326 src_node_size
= (src_mm
->size
<< PAGE_SHIFT
) - src
->offset
;
327 src_page_offset
= src_node_start
& (PAGE_SIZE
- 1);
329 dst_mm
= amdgpu_find_mm_node(dst
->mem
, &dst
->offset
);
330 dst_node_start
= amdgpu_mm_node_addr(dst
->bo
, dst_mm
, dst
->mem
) +
332 dst_node_size
= (dst_mm
->size
<< PAGE_SHIFT
) - dst
->offset
;
333 dst_page_offset
= dst_node_start
& (PAGE_SIZE
- 1);
335 mutex_lock(&adev
->mman
.gtt_window_lock
);
338 unsigned long cur_size
;
339 uint64_t from
= src_node_start
, to
= dst_node_start
;
340 struct dma_fence
*next
;
342 /* Copy size cannot exceed GTT_MAX_BYTES. So if src or dst
343 * begins at an offset, then adjust the size accordingly
345 cur_size
= min3(min(src_node_size
, dst_node_size
), size
,
347 if (cur_size
+ src_page_offset
> GTT_MAX_BYTES
||
348 cur_size
+ dst_page_offset
> GTT_MAX_BYTES
)
349 cur_size
-= max(src_page_offset
, dst_page_offset
);
351 /* Map only what needs to be accessed. Map src to window 0 and
354 if (src
->mem
->start
== AMDGPU_BO_INVALID_OFFSET
) {
355 r
= amdgpu_map_buffer(src
->bo
, src
->mem
,
356 PFN_UP(cur_size
+ src_page_offset
),
357 src_node_start
, 0, ring
,
361 /* Adjust the offset because amdgpu_map_buffer returns
362 * start of mapped page
364 from
+= src_page_offset
;
367 if (dst
->mem
->start
== AMDGPU_BO_INVALID_OFFSET
) {
368 r
= amdgpu_map_buffer(dst
->bo
, dst
->mem
,
369 PFN_UP(cur_size
+ dst_page_offset
),
370 dst_node_start
, 1, ring
,
374 to
+= dst_page_offset
;
377 r
= amdgpu_copy_buffer(ring
, from
, to
, cur_size
,
378 resv
, &next
, false, true);
382 dma_fence_put(fence
);
389 src_node_size
-= cur_size
;
390 if (!src_node_size
) {
391 src_node_start
= amdgpu_mm_node_addr(src
->bo
, ++src_mm
,
393 src_node_size
= (src_mm
->size
<< PAGE_SHIFT
);
396 src_node_start
+= cur_size
;
397 src_page_offset
= src_node_start
& (PAGE_SIZE
- 1);
399 dst_node_size
-= cur_size
;
400 if (!dst_node_size
) {
401 dst_node_start
= amdgpu_mm_node_addr(dst
->bo
, ++dst_mm
,
403 dst_node_size
= (dst_mm
->size
<< PAGE_SHIFT
);
406 dst_node_start
+= cur_size
;
407 dst_page_offset
= dst_node_start
& (PAGE_SIZE
- 1);
411 mutex_unlock(&adev
->mman
.gtt_window_lock
);
413 *f
= dma_fence_get(fence
);
414 dma_fence_put(fence
);
419 * amdgpu_move_blit - Copy an entire buffer to another buffer
421 * This is a helper called by amdgpu_bo_move() and amdgpu_move_vram_ram() to
422 * help move buffers to and from VRAM.
424 static int amdgpu_move_blit(struct ttm_buffer_object
*bo
,
425 bool evict
, bool no_wait_gpu
,
426 struct ttm_mem_reg
*new_mem
,
427 struct ttm_mem_reg
*old_mem
)
429 struct amdgpu_device
*adev
= amdgpu_ttm_adev(bo
->bdev
);
430 struct amdgpu_copy_mem src
, dst
;
431 struct dma_fence
*fence
= NULL
;
441 r
= amdgpu_ttm_copy_mem_to_mem(adev
, &src
, &dst
,
442 new_mem
->num_pages
<< PAGE_SHIFT
,
447 /* clear the space being freed */
448 if (old_mem
->mem_type
== TTM_PL_VRAM
&&
449 (ttm_to_amdgpu_bo(bo
)->flags
&
450 AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE
)) {
451 struct dma_fence
*wipe_fence
= NULL
;
453 r
= amdgpu_fill_buffer(ttm_to_amdgpu_bo(bo
), AMDGPU_POISON
,
457 } else if (wipe_fence
) {
458 dma_fence_put(fence
);
463 /* Always block for VM page tables before committing the new location */
464 if (bo
->type
== ttm_bo_type_kernel
)
465 r
= ttm_bo_move_accel_cleanup(bo
, fence
, true, new_mem
);
467 r
= ttm_bo_pipeline_move(bo
, fence
, evict
, new_mem
);
468 dma_fence_put(fence
);
473 dma_fence_wait(fence
, false);
474 dma_fence_put(fence
);
479 * amdgpu_move_vram_ram - Copy VRAM buffer to RAM buffer
481 * Called by amdgpu_bo_move().
483 static int amdgpu_move_vram_ram(struct ttm_buffer_object
*bo
, bool evict
,
484 struct ttm_operation_ctx
*ctx
,
485 struct ttm_mem_reg
*new_mem
)
487 struct amdgpu_device
*adev
;
488 struct ttm_mem_reg
*old_mem
= &bo
->mem
;
489 struct ttm_mem_reg tmp_mem
;
490 struct ttm_place placements
;
491 struct ttm_placement placement
;
494 adev
= amdgpu_ttm_adev(bo
->bdev
);
496 /* create space/pages for new_mem in GTT space */
498 tmp_mem
.mm_node
= NULL
;
499 placement
.num_placement
= 1;
500 placement
.placement
= &placements
;
501 placement
.num_busy_placement
= 1;
502 placement
.busy_placement
= &placements
;
505 placements
.flags
= TTM_PL_MASK_CACHING
| TTM_PL_FLAG_TT
;
506 r
= ttm_bo_mem_space(bo
, &placement
, &tmp_mem
, ctx
);
508 pr_err("Failed to find GTT space for blit from VRAM\n");
512 /* set caching flags */
513 r
= ttm_tt_set_placement_caching(bo
->ttm
, tmp_mem
.placement
);
518 /* Bind the memory to the GTT space */
519 r
= ttm_tt_bind(bo
->ttm
, &tmp_mem
, ctx
);
524 /* blit VRAM to GTT */
525 r
= amdgpu_move_blit(bo
, evict
, ctx
->no_wait_gpu
, &tmp_mem
, old_mem
);
530 /* move BO (in tmp_mem) to new_mem */
531 r
= ttm_bo_move_ttm(bo
, ctx
, new_mem
);
533 ttm_bo_mem_put(bo
, &tmp_mem
);
538 * amdgpu_move_ram_vram - Copy buffer from RAM to VRAM
540 * Called by amdgpu_bo_move().
542 static int amdgpu_move_ram_vram(struct ttm_buffer_object
*bo
, bool evict
,
543 struct ttm_operation_ctx
*ctx
,
544 struct ttm_mem_reg
*new_mem
)
546 struct amdgpu_device
*adev
;
547 struct ttm_mem_reg
*old_mem
= &bo
->mem
;
548 struct ttm_mem_reg tmp_mem
;
549 struct ttm_placement placement
;
550 struct ttm_place placements
;
553 adev
= amdgpu_ttm_adev(bo
->bdev
);
555 /* make space in GTT for old_mem buffer */
557 tmp_mem
.mm_node
= NULL
;
558 placement
.num_placement
= 1;
559 placement
.placement
= &placements
;
560 placement
.num_busy_placement
= 1;
561 placement
.busy_placement
= &placements
;
564 placements
.flags
= TTM_PL_MASK_CACHING
| TTM_PL_FLAG_TT
;
565 r
= ttm_bo_mem_space(bo
, &placement
, &tmp_mem
, ctx
);
567 pr_err("Failed to find GTT space for blit to VRAM\n");
571 /* move/bind old memory to GTT space */
572 r
= ttm_bo_move_ttm(bo
, ctx
, &tmp_mem
);
578 r
= amdgpu_move_blit(bo
, evict
, ctx
->no_wait_gpu
, new_mem
, old_mem
);
583 ttm_bo_mem_put(bo
, &tmp_mem
);
588 * amdgpu_mem_visible - Check that memory can be accessed by ttm_bo_move_memcpy
590 * Called by amdgpu_bo_move()
592 static bool amdgpu_mem_visible(struct amdgpu_device
*adev
,
593 struct ttm_mem_reg
*mem
)
595 struct drm_mm_node
*nodes
= mem
->mm_node
;
597 if (mem
->mem_type
== TTM_PL_SYSTEM
||
598 mem
->mem_type
== TTM_PL_TT
)
600 if (mem
->mem_type
!= TTM_PL_VRAM
)
603 /* ttm_mem_reg_ioremap only supports contiguous memory */
604 if (nodes
->size
!= mem
->num_pages
)
607 return ((nodes
->start
+ nodes
->size
) << PAGE_SHIFT
)
608 <= adev
->gmc
.visible_vram_size
;
612 * amdgpu_bo_move - Move a buffer object to a new memory location
614 * Called by ttm_bo_handle_move_mem()
616 static int amdgpu_bo_move(struct ttm_buffer_object
*bo
, bool evict
,
617 struct ttm_operation_ctx
*ctx
,
618 struct ttm_mem_reg
*new_mem
)
620 struct amdgpu_device
*adev
;
621 struct amdgpu_bo
*abo
;
622 struct ttm_mem_reg
*old_mem
= &bo
->mem
;
625 /* Can't move a pinned BO */
626 abo
= ttm_to_amdgpu_bo(bo
);
627 if (WARN_ON_ONCE(abo
->pin_count
> 0))
630 adev
= amdgpu_ttm_adev(bo
->bdev
);
632 if (old_mem
->mem_type
== TTM_PL_SYSTEM
&& bo
->ttm
== NULL
) {
633 amdgpu_move_null(bo
, new_mem
);
636 if ((old_mem
->mem_type
== TTM_PL_TT
&&
637 new_mem
->mem_type
== TTM_PL_SYSTEM
) ||
638 (old_mem
->mem_type
== TTM_PL_SYSTEM
&&
639 new_mem
->mem_type
== TTM_PL_TT
)) {
641 amdgpu_move_null(bo
, new_mem
);
644 if (old_mem
->mem_type
== AMDGPU_PL_GDS
||
645 old_mem
->mem_type
== AMDGPU_PL_GWS
||
646 old_mem
->mem_type
== AMDGPU_PL_OA
||
647 new_mem
->mem_type
== AMDGPU_PL_GDS
||
648 new_mem
->mem_type
== AMDGPU_PL_GWS
||
649 new_mem
->mem_type
== AMDGPU_PL_OA
) {
650 /* Nothing to save here */
651 amdgpu_move_null(bo
, new_mem
);
655 if (!adev
->mman
.buffer_funcs_enabled
) {
660 if (old_mem
->mem_type
== TTM_PL_VRAM
&&
661 new_mem
->mem_type
== TTM_PL_SYSTEM
) {
662 r
= amdgpu_move_vram_ram(bo
, evict
, ctx
, new_mem
);
663 } else if (old_mem
->mem_type
== TTM_PL_SYSTEM
&&
664 new_mem
->mem_type
== TTM_PL_VRAM
) {
665 r
= amdgpu_move_ram_vram(bo
, evict
, ctx
, new_mem
);
667 r
= amdgpu_move_blit(bo
, evict
, ctx
->no_wait_gpu
,
673 /* Check that all memory is CPU accessible */
674 if (!amdgpu_mem_visible(adev
, old_mem
) ||
675 !amdgpu_mem_visible(adev
, new_mem
)) {
676 pr_err("Move buffer fallback to memcpy unavailable\n");
680 r
= ttm_bo_move_memcpy(bo
, ctx
, new_mem
);
685 if (bo
->type
== ttm_bo_type_device
&&
686 new_mem
->mem_type
== TTM_PL_VRAM
&&
687 old_mem
->mem_type
!= TTM_PL_VRAM
) {
688 /* amdgpu_bo_fault_reserve_notify will re-set this if the CPU
689 * accesses the BO after it's moved.
691 abo
->flags
&= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED
;
694 /* update statistics */
695 atomic64_add((u64
)bo
->num_pages
<< PAGE_SHIFT
, &adev
->num_bytes_moved
);
700 * amdgpu_ttm_io_mem_reserve - Reserve a block of memory during a fault
702 * Called by ttm_mem_io_reserve() ultimately via ttm_bo_vm_fault()
704 static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device
*bdev
, struct ttm_mem_reg
*mem
)
706 struct ttm_mem_type_manager
*man
= &bdev
->man
[mem
->mem_type
];
707 struct amdgpu_device
*adev
= amdgpu_ttm_adev(bdev
);
708 struct drm_mm_node
*mm_node
= mem
->mm_node
;
710 mem
->bus
.addr
= NULL
;
712 mem
->bus
.size
= mem
->num_pages
<< PAGE_SHIFT
;
714 mem
->bus
.is_iomem
= false;
715 if (!(man
->flags
& TTM_MEMTYPE_FLAG_MAPPABLE
))
717 switch (mem
->mem_type
) {
724 mem
->bus
.offset
= mem
->start
<< PAGE_SHIFT
;
725 /* check if it's visible */
726 if ((mem
->bus
.offset
+ mem
->bus
.size
) > adev
->gmc
.visible_vram_size
)
728 /* Only physically contiguous buffers apply. In a contiguous
729 * buffer, size of the first mm_node would match the number of
730 * pages in ttm_mem_reg.
732 if (adev
->mman
.aper_base_kaddr
&&
733 (mm_node
->size
== mem
->num_pages
))
734 mem
->bus
.addr
= (u8
*)adev
->mman
.aper_base_kaddr
+
737 mem
->bus
.base
= adev
->gmc
.aper_base
;
738 mem
->bus
.is_iomem
= true;
746 static void amdgpu_ttm_io_mem_free(struct ttm_bo_device
*bdev
, struct ttm_mem_reg
*mem
)
750 static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object
*bo
,
751 unsigned long page_offset
)
753 struct drm_mm_node
*mm
;
754 unsigned long offset
= (page_offset
<< PAGE_SHIFT
);
756 mm
= amdgpu_find_mm_node(&bo
->mem
, &offset
);
757 return (bo
->mem
.bus
.base
>> PAGE_SHIFT
) + mm
->start
+
758 (offset
>> PAGE_SHIFT
);
762 * TTM backend functions.
764 struct amdgpu_ttm_tt
{
765 struct ttm_dma_tt ttm
;
768 struct task_struct
*usertask
;
770 #if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
771 struct hmm_range
*range
;
776 * amdgpu_ttm_tt_get_user_pages - get device accessible pages that back user
777 * memory and start HMM tracking CPU page table update
779 * Calling function must call amdgpu_ttm_tt_userptr_range_done() once and only
780 * once afterwards to stop HMM tracking
782 #if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
784 #define MAX_RETRY_HMM_RANGE_FAULT 16
786 int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo
*bo
, struct page
**pages
)
788 struct hmm_mirror
*mirror
= bo
->mn
? &bo
->mn
->mirror
: NULL
;
789 struct ttm_tt
*ttm
= bo
->tbo
.ttm
;
790 struct amdgpu_ttm_tt
*gtt
= (void *)ttm
;
791 struct mm_struct
*mm
= gtt
->usertask
->mm
;
792 unsigned long start
= gtt
->userptr
;
793 struct vm_area_struct
*vma
;
794 struct hmm_range
*range
;
800 if (!mm
) /* Happens during process shutdown */
803 if (unlikely(!mirror
)) {
804 DRM_DEBUG_DRIVER("Failed to get hmm_mirror\n");
809 vma
= find_vma(mm
, start
);
810 if (unlikely(!vma
|| start
< vma
->vm_start
)) {
814 if (unlikely((gtt
->userflags
& AMDGPU_GEM_USERPTR_ANONONLY
) &&
820 range
= kzalloc(sizeof(*range
), GFP_KERNEL
);
821 if (unlikely(!range
)) {
826 pfns
= kvmalloc_array(ttm
->num_pages
, sizeof(*pfns
), GFP_KERNEL
);
827 if (unlikely(!pfns
)) {
829 goto out_free_ranges
;
832 amdgpu_hmm_init_range(range
);
833 range
->default_flags
= range
->flags
[HMM_PFN_VALID
];
834 range
->default_flags
|= amdgpu_ttm_tt_is_readonly(ttm
) ?
835 0 : range
->flags
[HMM_PFN_WRITE
];
836 range
->pfn_flags_mask
= 0;
838 hmm_range_register(range
, mirror
, start
,
839 start
+ ttm
->num_pages
* PAGE_SIZE
, PAGE_SHIFT
);
843 * Just wait for range to be valid, safe to ignore return value as we
844 * will use the return value of hmm_range_fault() below under the
845 * mmap_sem to ascertain the validity of the range.
847 hmm_range_wait_until_valid(range
, HMM_RANGE_DEFAULT_TIMEOUT
);
849 down_read(&mm
->mmap_sem
);
851 r
= hmm_range_fault(range
, true);
852 if (unlikely(r
< 0)) {
853 if (likely(r
== -EAGAIN
)) {
855 * return -EAGAIN, mmap_sem is dropped
857 if (retry
++ < MAX_RETRY_HMM_RANGE_FAULT
)
860 pr_err("Retry hmm fault too many times\n");
866 up_read(&mm
->mmap_sem
);
868 for (i
= 0; i
< ttm
->num_pages
; i
++) {
869 pages
[i
] = hmm_device_entry_to_page(range
, pfns
[i
]);
870 if (unlikely(!pages
[i
])) {
871 pr_err("Page fault failed for pfn[%lu] = 0x%llx\n",
884 if (likely(r
!= -EAGAIN
))
885 up_read(&mm
->mmap_sem
);
887 hmm_range_unregister(range
);
896 * amdgpu_ttm_tt_userptr_range_done - stop HMM track the CPU page table change
897 * Check if the pages backing this ttm range have been invalidated
899 * Returns: true if pages are still valid
901 bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt
*ttm
)
903 struct amdgpu_ttm_tt
*gtt
= (void *)ttm
;
906 if (!gtt
|| !gtt
->userptr
)
909 DRM_DEBUG_DRIVER("user_pages_done 0x%llx pages 0x%lx\n",
910 gtt
->userptr
, ttm
->num_pages
);
912 WARN_ONCE(!gtt
->range
|| !gtt
->range
->pfns
,
913 "No user pages to check\n");
916 r
= hmm_range_valid(gtt
->range
);
917 hmm_range_unregister(gtt
->range
);
919 kvfree(gtt
->range
->pfns
);
929 * amdgpu_ttm_tt_set_user_pages - Copy pages in, putting old pages as necessary.
931 * Called by amdgpu_cs_list_validate(). This creates the page list
932 * that backs user memory and will ultimately be mapped into the device
935 void amdgpu_ttm_tt_set_user_pages(struct ttm_tt
*ttm
, struct page
**pages
)
939 for (i
= 0; i
< ttm
->num_pages
; ++i
)
940 ttm
->pages
[i
] = pages
? pages
[i
] : NULL
;
944 * amdgpu_ttm_tt_pin_userptr - prepare the sg table with the user pages
946 * Called by amdgpu_ttm_backend_bind()
948 static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt
*ttm
)
950 struct amdgpu_device
*adev
= amdgpu_ttm_adev(ttm
->bdev
);
951 struct amdgpu_ttm_tt
*gtt
= (void *)ttm
;
955 int write
= !(gtt
->userflags
& AMDGPU_GEM_USERPTR_READONLY
);
956 enum dma_data_direction direction
= write
?
957 DMA_BIDIRECTIONAL
: DMA_TO_DEVICE
;
959 /* Allocate an SG array and squash pages into it */
960 r
= sg_alloc_table_from_pages(ttm
->sg
, ttm
->pages
, ttm
->num_pages
, 0,
961 ttm
->num_pages
<< PAGE_SHIFT
,
966 /* Map SG to device */
968 nents
= dma_map_sg(adev
->dev
, ttm
->sg
->sgl
, ttm
->sg
->nents
, direction
);
969 if (nents
!= ttm
->sg
->nents
)
972 /* convert SG to linear array of pages and dma addresses */
973 drm_prime_sg_to_page_addr_arrays(ttm
->sg
, ttm
->pages
,
974 gtt
->ttm
.dma_address
, ttm
->num_pages
);
984 * amdgpu_ttm_tt_unpin_userptr - Unpin and unmap userptr pages
986 static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt
*ttm
)
988 struct amdgpu_device
*adev
= amdgpu_ttm_adev(ttm
->bdev
);
989 struct amdgpu_ttm_tt
*gtt
= (void *)ttm
;
991 int write
= !(gtt
->userflags
& AMDGPU_GEM_USERPTR_READONLY
);
992 enum dma_data_direction direction
= write
?
993 DMA_BIDIRECTIONAL
: DMA_TO_DEVICE
;
995 /* double check that we don't free the table twice */
999 /* unmap the pages mapped to the device */
1000 dma_unmap_sg(adev
->dev
, ttm
->sg
->sgl
, ttm
->sg
->nents
, direction
);
1002 sg_free_table(ttm
->sg
);
1004 #if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
1006 ttm
->pages
[0] == hmm_device_entry_to_page(gtt
->range
,
1007 gtt
->range
->pfns
[0]))
1008 WARN_ONCE(1, "Missing get_user_page_done\n");
1012 int amdgpu_ttm_gart_bind(struct amdgpu_device
*adev
,
1013 struct ttm_buffer_object
*tbo
,
1016 struct amdgpu_bo
*abo
= ttm_to_amdgpu_bo(tbo
);
1017 struct ttm_tt
*ttm
= tbo
->ttm
;
1018 struct amdgpu_ttm_tt
*gtt
= (void *)ttm
;
1021 if (abo
->flags
& AMDGPU_GEM_CREATE_MQD_GFX9
) {
1022 uint64_t page_idx
= 1;
1024 r
= amdgpu_gart_bind(adev
, gtt
->offset
, page_idx
,
1025 ttm
->pages
, gtt
->ttm
.dma_address
, flags
);
1027 goto gart_bind_fail
;
1029 /* Patch mtype of the second part BO */
1030 flags
&= ~AMDGPU_PTE_MTYPE_VG10_MASK
;
1031 flags
|= AMDGPU_PTE_MTYPE_VG10(AMDGPU_MTYPE_NC
);
1033 r
= amdgpu_gart_bind(adev
,
1034 gtt
->offset
+ (page_idx
<< PAGE_SHIFT
),
1035 ttm
->num_pages
- page_idx
,
1036 &ttm
->pages
[page_idx
],
1037 &(gtt
->ttm
.dma_address
[page_idx
]), flags
);
1039 r
= amdgpu_gart_bind(adev
, gtt
->offset
, ttm
->num_pages
,
1040 ttm
->pages
, gtt
->ttm
.dma_address
, flags
);
1045 DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
1046 ttm
->num_pages
, gtt
->offset
);
1052 * amdgpu_ttm_backend_bind - Bind GTT memory
1054 * Called by ttm_tt_bind() on behalf of ttm_bo_handle_move_mem().
1055 * This handles binding GTT memory to the device address space.
1057 static int amdgpu_ttm_backend_bind(struct ttm_tt
*ttm
,
1058 struct ttm_mem_reg
*bo_mem
)
1060 struct amdgpu_device
*adev
= amdgpu_ttm_adev(ttm
->bdev
);
1061 struct amdgpu_ttm_tt
*gtt
= (void*)ttm
;
1066 r
= amdgpu_ttm_tt_pin_userptr(ttm
);
1068 DRM_ERROR("failed to pin userptr\n");
1072 if (!ttm
->num_pages
) {
1073 WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
1074 ttm
->num_pages
, bo_mem
, ttm
);
1077 if (bo_mem
->mem_type
== AMDGPU_PL_GDS
||
1078 bo_mem
->mem_type
== AMDGPU_PL_GWS
||
1079 bo_mem
->mem_type
== AMDGPU_PL_OA
)
1082 if (!amdgpu_gtt_mgr_has_gart_addr(bo_mem
)) {
1083 gtt
->offset
= AMDGPU_BO_INVALID_OFFSET
;
1087 /* compute PTE flags relevant to this BO memory */
1088 flags
= amdgpu_ttm_tt_pte_flags(adev
, ttm
, bo_mem
);
1090 /* bind pages into GART page tables */
1091 gtt
->offset
= (u64
)bo_mem
->start
<< PAGE_SHIFT
;
1092 r
= amdgpu_gart_bind(adev
, gtt
->offset
, ttm
->num_pages
,
1093 ttm
->pages
, gtt
->ttm
.dma_address
, flags
);
1096 DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
1097 ttm
->num_pages
, gtt
->offset
);
1102 * amdgpu_ttm_alloc_gart - Allocate GART memory for buffer object
1104 int amdgpu_ttm_alloc_gart(struct ttm_buffer_object
*bo
)
1106 struct amdgpu_device
*adev
= amdgpu_ttm_adev(bo
->bdev
);
1107 struct ttm_operation_ctx ctx
= { false, false };
1108 struct amdgpu_ttm_tt
*gtt
= (void*)bo
->ttm
;
1109 struct ttm_mem_reg tmp
;
1110 struct ttm_placement placement
;
1111 struct ttm_place placements
;
1112 uint64_t addr
, flags
;
1115 if (bo
->mem
.start
!= AMDGPU_BO_INVALID_OFFSET
)
1118 addr
= amdgpu_gmc_agp_addr(bo
);
1119 if (addr
!= AMDGPU_BO_INVALID_OFFSET
) {
1120 bo
->mem
.start
= addr
>> PAGE_SHIFT
;
1123 /* allocate GART space */
1126 placement
.num_placement
= 1;
1127 placement
.placement
= &placements
;
1128 placement
.num_busy_placement
= 1;
1129 placement
.busy_placement
= &placements
;
1130 placements
.fpfn
= 0;
1131 placements
.lpfn
= adev
->gmc
.gart_size
>> PAGE_SHIFT
;
1132 placements
.flags
= (bo
->mem
.placement
& ~TTM_PL_MASK_MEM
) |
1135 r
= ttm_bo_mem_space(bo
, &placement
, &tmp
, &ctx
);
1139 /* compute PTE flags for this buffer object */
1140 flags
= amdgpu_ttm_tt_pte_flags(adev
, bo
->ttm
, &tmp
);
1143 gtt
->offset
= (u64
)tmp
.start
<< PAGE_SHIFT
;
1144 r
= amdgpu_ttm_gart_bind(adev
, bo
, flags
);
1146 ttm_bo_mem_put(bo
, &tmp
);
1150 ttm_bo_mem_put(bo
, &bo
->mem
);
1154 bo
->offset
= (bo
->mem
.start
<< PAGE_SHIFT
) +
1155 bo
->bdev
->man
[bo
->mem
.mem_type
].gpu_offset
;
1161 * amdgpu_ttm_recover_gart - Rebind GTT pages
1163 * Called by amdgpu_gtt_mgr_recover() from amdgpu_device_reset() to
1164 * rebind GTT pages during a GPU reset.
1166 int amdgpu_ttm_recover_gart(struct ttm_buffer_object
*tbo
)
1168 struct amdgpu_device
*adev
= amdgpu_ttm_adev(tbo
->bdev
);
1175 flags
= amdgpu_ttm_tt_pte_flags(adev
, tbo
->ttm
, &tbo
->mem
);
1176 r
= amdgpu_ttm_gart_bind(adev
, tbo
, flags
);
1182 * amdgpu_ttm_backend_unbind - Unbind GTT mapped pages
1184 * Called by ttm_tt_unbind() on behalf of ttm_bo_move_ttm() and
1187 static int amdgpu_ttm_backend_unbind(struct ttm_tt
*ttm
)
1189 struct amdgpu_device
*adev
= amdgpu_ttm_adev(ttm
->bdev
);
1190 struct amdgpu_ttm_tt
*gtt
= (void *)ttm
;
1193 /* if the pages have userptr pinning then clear that first */
1195 amdgpu_ttm_tt_unpin_userptr(ttm
);
1197 if (gtt
->offset
== AMDGPU_BO_INVALID_OFFSET
)
1200 /* unbind shouldn't be done for GDS/GWS/OA in ttm_bo_clean_mm */
1201 r
= amdgpu_gart_unbind(adev
, gtt
->offset
, ttm
->num_pages
);
1203 DRM_ERROR("failed to unbind %lu pages at 0x%08llX\n",
1204 gtt
->ttm
.ttm
.num_pages
, gtt
->offset
);
1208 static void amdgpu_ttm_backend_destroy(struct ttm_tt
*ttm
)
1210 struct amdgpu_ttm_tt
*gtt
= (void *)ttm
;
1213 put_task_struct(gtt
->usertask
);
1215 ttm_dma_tt_fini(>t
->ttm
);
1219 static struct ttm_backend_func amdgpu_backend_func
= {
1220 .bind
= &amdgpu_ttm_backend_bind
,
1221 .unbind
= &amdgpu_ttm_backend_unbind
,
1222 .destroy
= &amdgpu_ttm_backend_destroy
,
1226 * amdgpu_ttm_tt_create - Create a ttm_tt object for a given BO
1228 * @bo: The buffer object to create a GTT ttm_tt object around
1230 * Called by ttm_tt_create().
1232 static struct ttm_tt
*amdgpu_ttm_tt_create(struct ttm_buffer_object
*bo
,
1233 uint32_t page_flags
)
1235 struct amdgpu_device
*adev
;
1236 struct amdgpu_ttm_tt
*gtt
;
1238 adev
= amdgpu_ttm_adev(bo
->bdev
);
1240 gtt
= kzalloc(sizeof(struct amdgpu_ttm_tt
), GFP_KERNEL
);
1244 gtt
->ttm
.ttm
.func
= &amdgpu_backend_func
;
1246 /* allocate space for the uninitialized page entries */
1247 if (ttm_sg_tt_init(>t
->ttm
, bo
, page_flags
)) {
1251 return >t
->ttm
.ttm
;
1255 * amdgpu_ttm_tt_populate - Map GTT pages visible to the device
1257 * Map the pages of a ttm_tt object to an address space visible
1258 * to the underlying device.
1260 static int amdgpu_ttm_tt_populate(struct ttm_tt
*ttm
,
1261 struct ttm_operation_ctx
*ctx
)
1263 struct amdgpu_device
*adev
= amdgpu_ttm_adev(ttm
->bdev
);
1264 struct amdgpu_ttm_tt
*gtt
= (void *)ttm
;
1265 bool slave
= !!(ttm
->page_flags
& TTM_PAGE_FLAG_SG
);
1267 /* user pages are bound by amdgpu_ttm_tt_pin_userptr() */
1268 if (gtt
&& gtt
->userptr
) {
1269 ttm
->sg
= kzalloc(sizeof(struct sg_table
), GFP_KERNEL
);
1273 ttm
->page_flags
|= TTM_PAGE_FLAG_SG
;
1274 ttm
->state
= tt_unbound
;
1278 if (slave
&& ttm
->sg
) {
1279 drm_prime_sg_to_page_addr_arrays(ttm
->sg
, ttm
->pages
,
1280 gtt
->ttm
.dma_address
,
1282 ttm
->state
= tt_unbound
;
1286 #ifdef CONFIG_SWIOTLB
1287 if (adev
->need_swiotlb
&& swiotlb_nr_tbl()) {
1288 return ttm_dma_populate(>t
->ttm
, adev
->dev
, ctx
);
1292 /* fall back to generic helper to populate the page array
1293 * and map them to the device */
1294 return ttm_populate_and_map_pages(adev
->dev
, >t
->ttm
, ctx
);
1298 * amdgpu_ttm_tt_unpopulate - unmap GTT pages and unpopulate page arrays
1300 * Unmaps pages of a ttm_tt object from the device address space and
1301 * unpopulates the page array backing it.
1303 static void amdgpu_ttm_tt_unpopulate(struct ttm_tt
*ttm
)
1305 struct amdgpu_device
*adev
;
1306 struct amdgpu_ttm_tt
*gtt
= (void *)ttm
;
1307 bool slave
= !!(ttm
->page_flags
& TTM_PAGE_FLAG_SG
);
1309 if (gtt
&& gtt
->userptr
) {
1310 amdgpu_ttm_tt_set_user_pages(ttm
, NULL
);
1312 ttm
->page_flags
&= ~TTM_PAGE_FLAG_SG
;
1319 adev
= amdgpu_ttm_adev(ttm
->bdev
);
1321 #ifdef CONFIG_SWIOTLB
1322 if (adev
->need_swiotlb
&& swiotlb_nr_tbl()) {
1323 ttm_dma_unpopulate(>t
->ttm
, adev
->dev
);
1328 /* fall back to generic helper to unmap and unpopulate array */
1329 ttm_unmap_and_unpopulate_pages(adev
->dev
, >t
->ttm
);
1333 * amdgpu_ttm_tt_set_userptr - Initialize userptr GTT ttm_tt for the current
1336 * @ttm: The ttm_tt object to bind this userptr object to
1337 * @addr: The address in the current tasks VM space to use
1338 * @flags: Requirements of userptr object.
1340 * Called by amdgpu_gem_userptr_ioctl() to bind userptr pages
1343 int amdgpu_ttm_tt_set_userptr(struct ttm_tt
*ttm
, uint64_t addr
,
1346 struct amdgpu_ttm_tt
*gtt
= (void *)ttm
;
1351 gtt
->userptr
= addr
;
1352 gtt
->userflags
= flags
;
1355 put_task_struct(gtt
->usertask
);
1356 gtt
->usertask
= current
->group_leader
;
1357 get_task_struct(gtt
->usertask
);
1363 * amdgpu_ttm_tt_get_usermm - Return memory manager for ttm_tt object
1365 struct mm_struct
*amdgpu_ttm_tt_get_usermm(struct ttm_tt
*ttm
)
1367 struct amdgpu_ttm_tt
*gtt
= (void *)ttm
;
1372 if (gtt
->usertask
== NULL
)
1375 return gtt
->usertask
->mm
;
1379 * amdgpu_ttm_tt_affect_userptr - Determine if a ttm_tt object lays inside an
1380 * address range for the current task.
1383 bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt
*ttm
, unsigned long start
,
1386 struct amdgpu_ttm_tt
*gtt
= (void *)ttm
;
1389 if (gtt
== NULL
|| !gtt
->userptr
)
1392 /* Return false if no part of the ttm_tt object lies within
1395 size
= (unsigned long)gtt
->ttm
.ttm
.num_pages
* PAGE_SIZE
;
1396 if (gtt
->userptr
> end
|| gtt
->userptr
+ size
<= start
)
1403 * amdgpu_ttm_tt_is_userptr - Have the pages backing by userptr?
1405 bool amdgpu_ttm_tt_is_userptr(struct ttm_tt
*ttm
)
1407 struct amdgpu_ttm_tt
*gtt
= (void *)ttm
;
1409 if (gtt
== NULL
|| !gtt
->userptr
)
1416 * amdgpu_ttm_tt_is_readonly - Is the ttm_tt object read only?
1418 bool amdgpu_ttm_tt_is_readonly(struct ttm_tt
*ttm
)
1420 struct amdgpu_ttm_tt
*gtt
= (void *)ttm
;
1425 return !!(gtt
->userflags
& AMDGPU_GEM_USERPTR_READONLY
);
1429 * amdgpu_ttm_tt_pde_flags - Compute PDE flags for ttm_tt object
1431 * @ttm: The ttm_tt object to compute the flags for
1432 * @mem: The memory registry backing this ttm_tt object
1434 * Figure out the flags to use for a VM PDE (Page Directory Entry).
1436 uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt
*ttm
, struct ttm_mem_reg
*mem
)
1440 if (mem
&& mem
->mem_type
!= TTM_PL_SYSTEM
)
1441 flags
|= AMDGPU_PTE_VALID
;
1443 if (mem
&& mem
->mem_type
== TTM_PL_TT
) {
1444 flags
|= AMDGPU_PTE_SYSTEM
;
1446 if (ttm
->caching_state
== tt_cached
)
1447 flags
|= AMDGPU_PTE_SNOOPED
;
1454 * amdgpu_ttm_tt_pte_flags - Compute PTE flags for ttm_tt object
1456 * @ttm: The ttm_tt object to compute the flags for
1457 * @mem: The memory registry backing this ttm_tt object
1459 * Figure out the flags to use for a VM PTE (Page Table Entry).
1461 uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device
*adev
, struct ttm_tt
*ttm
,
1462 struct ttm_mem_reg
*mem
)
1464 uint64_t flags
= amdgpu_ttm_tt_pde_flags(ttm
, mem
);
1466 flags
|= adev
->gart
.gart_pte_flags
;
1467 flags
|= AMDGPU_PTE_READABLE
;
1469 if (!amdgpu_ttm_tt_is_readonly(ttm
))
1470 flags
|= AMDGPU_PTE_WRITEABLE
;
1476 * amdgpu_ttm_bo_eviction_valuable - Check to see if we can evict a buffer
1479 * Return true if eviction is sensible. Called by ttm_mem_evict_first() on
1480 * behalf of ttm_bo_mem_force_space() which tries to evict buffer objects until
1481 * it can find space for a new object and by ttm_bo_force_list_clean() which is
1482 * used to clean out a memory space.
1484 static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object
*bo
,
1485 const struct ttm_place
*place
)
1487 unsigned long num_pages
= bo
->mem
.num_pages
;
1488 struct drm_mm_node
*node
= bo
->mem
.mm_node
;
1489 struct reservation_object_list
*flist
;
1490 struct dma_fence
*f
;
1493 /* Don't evict VM page tables while they are busy, otherwise we can't
1494 * cleanly handle page faults.
1496 if (bo
->type
== ttm_bo_type_kernel
&&
1497 !reservation_object_test_signaled_rcu(bo
->resv
, true))
1500 /* If bo is a KFD BO, check if the bo belongs to the current process.
1501 * If true, then return false as any KFD process needs all its BOs to
1502 * be resident to run successfully
1504 flist
= reservation_object_get_list(bo
->resv
);
1506 for (i
= 0; i
< flist
->shared_count
; ++i
) {
1507 f
= rcu_dereference_protected(flist
->shared
[i
],
1508 reservation_object_held(bo
->resv
));
1509 if (amdkfd_fence_check_mm(f
, current
->mm
))
1514 switch (bo
->mem
.mem_type
) {
1519 /* Check each drm MM node individually */
1521 if (place
->fpfn
< (node
->start
+ node
->size
) &&
1522 !(place
->lpfn
&& place
->lpfn
<= node
->start
))
1525 num_pages
-= node
->size
;
1534 return ttm_bo_eviction_valuable(bo
, place
);
1538 * amdgpu_ttm_access_memory - Read or Write memory that backs a buffer object.
1540 * @bo: The buffer object to read/write
1541 * @offset: Offset into buffer object
1542 * @buf: Secondary buffer to write/read from
1543 * @len: Length in bytes of access
1544 * @write: true if writing
1546 * This is used to access VRAM that backs a buffer object via MMIO
1547 * access for debugging purposes.
1549 static int amdgpu_ttm_access_memory(struct ttm_buffer_object
*bo
,
1550 unsigned long offset
,
1551 void *buf
, int len
, int write
)
1553 struct amdgpu_bo
*abo
= ttm_to_amdgpu_bo(bo
);
1554 struct amdgpu_device
*adev
= amdgpu_ttm_adev(abo
->tbo
.bdev
);
1555 struct drm_mm_node
*nodes
;
1559 unsigned long flags
;
1561 if (bo
->mem
.mem_type
!= TTM_PL_VRAM
)
1564 nodes
= amdgpu_find_mm_node(&abo
->tbo
.mem
, &offset
);
1565 pos
= (nodes
->start
<< PAGE_SHIFT
) + offset
;
1567 while (len
&& pos
< adev
->gmc
.mc_vram_size
) {
1568 uint64_t aligned_pos
= pos
& ~(uint64_t)3;
1569 uint32_t bytes
= 4 - (pos
& 3);
1570 uint32_t shift
= (pos
& 3) * 8;
1571 uint32_t mask
= 0xffffffff << shift
;
1574 mask
&= 0xffffffff >> (bytes
- len
) * 8;
1578 spin_lock_irqsave(&adev
->mmio_idx_lock
, flags
);
1579 WREG32_NO_KIQ(mmMM_INDEX
, ((uint32_t)aligned_pos
) | 0x80000000);
1580 WREG32_NO_KIQ(mmMM_INDEX_HI
, aligned_pos
>> 31);
1581 if (!write
|| mask
!= 0xffffffff)
1582 value
= RREG32_NO_KIQ(mmMM_DATA
);
1585 value
|= (*(uint32_t *)buf
<< shift
) & mask
;
1586 WREG32_NO_KIQ(mmMM_DATA
, value
);
1588 spin_unlock_irqrestore(&adev
->mmio_idx_lock
, flags
);
1590 value
= (value
& mask
) >> shift
;
1591 memcpy(buf
, &value
, bytes
);
1595 buf
= (uint8_t *)buf
+ bytes
;
1598 if (pos
>= (nodes
->start
+ nodes
->size
) << PAGE_SHIFT
) {
1600 pos
= (nodes
->start
<< PAGE_SHIFT
);
1607 static struct ttm_bo_driver amdgpu_bo_driver
= {
1608 .ttm_tt_create
= &amdgpu_ttm_tt_create
,
1609 .ttm_tt_populate
= &amdgpu_ttm_tt_populate
,
1610 .ttm_tt_unpopulate
= &amdgpu_ttm_tt_unpopulate
,
1611 .invalidate_caches
= &amdgpu_invalidate_caches
,
1612 .init_mem_type
= &amdgpu_init_mem_type
,
1613 .eviction_valuable
= amdgpu_ttm_bo_eviction_valuable
,
1614 .evict_flags
= &amdgpu_evict_flags
,
1615 .move
= &amdgpu_bo_move
,
1616 .verify_access
= &amdgpu_verify_access
,
1617 .move_notify
= &amdgpu_bo_move_notify
,
1618 .release_notify
= &amdgpu_bo_release_notify
,
1619 .fault_reserve_notify
= &amdgpu_bo_fault_reserve_notify
,
1620 .io_mem_reserve
= &amdgpu_ttm_io_mem_reserve
,
1621 .io_mem_free
= &amdgpu_ttm_io_mem_free
,
1622 .io_mem_pfn
= amdgpu_ttm_io_mem_pfn
,
1623 .access_memory
= &amdgpu_ttm_access_memory
,
1624 .del_from_lru_notify
= &amdgpu_vm_del_from_lru_notify
1628 * Firmware Reservation functions
1631 * amdgpu_ttm_fw_reserve_vram_fini - free fw reserved vram
1633 * @adev: amdgpu_device pointer
1635 * free fw reserved vram if it has been reserved.
1637 static void amdgpu_ttm_fw_reserve_vram_fini(struct amdgpu_device
*adev
)
1639 amdgpu_bo_free_kernel(&adev
->fw_vram_usage
.reserved_bo
,
1640 NULL
, &adev
->fw_vram_usage
.va
);
1644 * amdgpu_ttm_fw_reserve_vram_init - create bo vram reservation from fw
1646 * @adev: amdgpu_device pointer
1648 * create bo vram reservation from fw.
1650 static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device
*adev
)
1652 struct ttm_operation_ctx ctx
= { false, false };
1653 struct amdgpu_bo_param bp
;
1656 u64 vram_size
= adev
->gmc
.visible_vram_size
;
1657 u64 offset
= adev
->fw_vram_usage
.start_offset
;
1658 u64 size
= adev
->fw_vram_usage
.size
;
1659 struct amdgpu_bo
*bo
;
1661 memset(&bp
, 0, sizeof(bp
));
1662 bp
.size
= adev
->fw_vram_usage
.size
;
1663 bp
.byte_align
= PAGE_SIZE
;
1664 bp
.domain
= AMDGPU_GEM_DOMAIN_VRAM
;
1665 bp
.flags
= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED
|
1666 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS
;
1667 bp
.type
= ttm_bo_type_kernel
;
1669 adev
->fw_vram_usage
.va
= NULL
;
1670 adev
->fw_vram_usage
.reserved_bo
= NULL
;
1672 if (adev
->fw_vram_usage
.size
> 0 &&
1673 adev
->fw_vram_usage
.size
<= vram_size
) {
1675 r
= amdgpu_bo_create(adev
, &bp
,
1676 &adev
->fw_vram_usage
.reserved_bo
);
1680 r
= amdgpu_bo_reserve(adev
->fw_vram_usage
.reserved_bo
, false);
1684 /* remove the original mem node and create a new one at the
1687 bo
= adev
->fw_vram_usage
.reserved_bo
;
1688 offset
= ALIGN(offset
, PAGE_SIZE
);
1689 for (i
= 0; i
< bo
->placement
.num_placement
; ++i
) {
1690 bo
->placements
[i
].fpfn
= offset
>> PAGE_SHIFT
;
1691 bo
->placements
[i
].lpfn
= (offset
+ size
) >> PAGE_SHIFT
;
1694 ttm_bo_mem_put(&bo
->tbo
, &bo
->tbo
.mem
);
1695 r
= ttm_bo_mem_space(&bo
->tbo
, &bo
->placement
,
1696 &bo
->tbo
.mem
, &ctx
);
1700 r
= amdgpu_bo_pin_restricted(adev
->fw_vram_usage
.reserved_bo
,
1701 AMDGPU_GEM_DOMAIN_VRAM
,
1702 adev
->fw_vram_usage
.start_offset
,
1703 (adev
->fw_vram_usage
.start_offset
+
1704 adev
->fw_vram_usage
.size
));
1707 r
= amdgpu_bo_kmap(adev
->fw_vram_usage
.reserved_bo
,
1708 &adev
->fw_vram_usage
.va
);
1712 amdgpu_bo_unreserve(adev
->fw_vram_usage
.reserved_bo
);
1717 amdgpu_bo_unpin(adev
->fw_vram_usage
.reserved_bo
);
1719 amdgpu_bo_unreserve(adev
->fw_vram_usage
.reserved_bo
);
1721 amdgpu_bo_unref(&adev
->fw_vram_usage
.reserved_bo
);
1723 adev
->fw_vram_usage
.va
= NULL
;
1724 adev
->fw_vram_usage
.reserved_bo
= NULL
;
1728 * amdgpu_ttm_init - Init the memory management (ttm) as well as various
1729 * gtt/vram related fields.
1731 * This initializes all of the memory space pools that the TTM layer
1732 * will need such as the GTT space (system memory mapped to the device),
1733 * VRAM (on-board memory), and on-chip memories (GDS, GWS, OA) which
1734 * can be mapped per VMID.
1736 int amdgpu_ttm_init(struct amdgpu_device
*adev
)
1742 mutex_init(&adev
->mman
.gtt_window_lock
);
1744 /* No others user of address space so set it to 0 */
1745 r
= ttm_bo_device_init(&adev
->mman
.bdev
,
1747 adev
->ddev
->anon_inode
->i_mapping
,
1750 DRM_ERROR("failed initializing buffer object driver(%d).\n", r
);
1753 adev
->mman
.initialized
= true;
1755 /* We opt to avoid OOM on system pages allocations */
1756 adev
->mman
.bdev
.no_retry
= true;
1758 /* Initialize VRAM pool with all of VRAM divided into pages */
1759 r
= ttm_bo_init_mm(&adev
->mman
.bdev
, TTM_PL_VRAM
,
1760 adev
->gmc
.real_vram_size
>> PAGE_SHIFT
);
1762 DRM_ERROR("Failed initializing VRAM heap.\n");
1766 /* Reduce size of CPU-visible VRAM if requested */
1767 vis_vram_limit
= (u64
)amdgpu_vis_vram_limit
* 1024 * 1024;
1768 if (amdgpu_vis_vram_limit
> 0 &&
1769 vis_vram_limit
<= adev
->gmc
.visible_vram_size
)
1770 adev
->gmc
.visible_vram_size
= vis_vram_limit
;
1772 /* Change the size here instead of the init above so only lpfn is affected */
1773 amdgpu_ttm_set_buffer_funcs_status(adev
, false);
1775 adev
->mman
.aper_base_kaddr
= ioremap_wc(adev
->gmc
.aper_base
,
1776 adev
->gmc
.visible_vram_size
);
1780 *The reserved vram for firmware must be pinned to the specified
1781 *place on the VRAM, so reserve it early.
1783 r
= amdgpu_ttm_fw_reserve_vram_init(adev
);
1788 /* allocate memory as required for VGA
1789 * This is used for VGA emulation and pre-OS scanout buffers to
1790 * avoid display artifacts while transitioning between pre-OS
1792 r
= amdgpu_bo_create_kernel(adev
, adev
->gmc
.stolen_size
, PAGE_SIZE
,
1793 AMDGPU_GEM_DOMAIN_VRAM
,
1794 &adev
->stolen_vga_memory
,
1798 DRM_INFO("amdgpu: %uM of VRAM memory ready\n",
1799 (unsigned) (adev
->gmc
.real_vram_size
/ (1024 * 1024)));
1801 /* Compute GTT size, either bsaed on 3/4th the size of RAM size
1802 * or whatever the user passed on module init */
1803 if (amdgpu_gtt_size
== -1) {
1807 gtt_size
= min(max((AMDGPU_DEFAULT_GTT_SIZE_MB
<< 20),
1808 adev
->gmc
.mc_vram_size
),
1809 ((uint64_t)si
.totalram
* si
.mem_unit
* 3/4));
1812 gtt_size
= (uint64_t)amdgpu_gtt_size
<< 20;
1814 /* Initialize GTT memory pool */
1815 r
= ttm_bo_init_mm(&adev
->mman
.bdev
, TTM_PL_TT
, gtt_size
>> PAGE_SHIFT
);
1817 DRM_ERROR("Failed initializing GTT heap.\n");
1820 DRM_INFO("amdgpu: %uM of GTT memory ready.\n",
1821 (unsigned)(gtt_size
/ (1024 * 1024)));
1823 /* Initialize various on-chip memory pools */
1824 r
= ttm_bo_init_mm(&adev
->mman
.bdev
, AMDGPU_PL_GDS
,
1825 adev
->gds
.gds_size
);
1827 DRM_ERROR("Failed initializing GDS heap.\n");
1831 r
= ttm_bo_init_mm(&adev
->mman
.bdev
, AMDGPU_PL_GWS
,
1832 adev
->gds
.gws_size
);
1834 DRM_ERROR("Failed initializing gws heap.\n");
1838 r
= ttm_bo_init_mm(&adev
->mman
.bdev
, AMDGPU_PL_OA
,
1841 DRM_ERROR("Failed initializing oa heap.\n");
1845 /* Register debugfs entries for amdgpu_ttm */
1846 r
= amdgpu_ttm_debugfs_init(adev
);
1848 DRM_ERROR("Failed to init debugfs\n");
1855 * amdgpu_ttm_late_init - Handle any late initialization for amdgpu_ttm
1857 void amdgpu_ttm_late_init(struct amdgpu_device
*adev
)
1859 /* return the VGA stolen memory (if any) back to VRAM */
1860 amdgpu_bo_free_kernel(&adev
->stolen_vga_memory
, NULL
, NULL
);
1864 * amdgpu_ttm_fini - De-initialize the TTM memory pools
1866 void amdgpu_ttm_fini(struct amdgpu_device
*adev
)
1868 if (!adev
->mman
.initialized
)
1871 amdgpu_ttm_debugfs_fini(adev
);
1872 amdgpu_ttm_fw_reserve_vram_fini(adev
);
1873 if (adev
->mman
.aper_base_kaddr
)
1874 iounmap(adev
->mman
.aper_base_kaddr
);
1875 adev
->mman
.aper_base_kaddr
= NULL
;
1877 ttm_bo_clean_mm(&adev
->mman
.bdev
, TTM_PL_VRAM
);
1878 ttm_bo_clean_mm(&adev
->mman
.bdev
, TTM_PL_TT
);
1879 ttm_bo_clean_mm(&adev
->mman
.bdev
, AMDGPU_PL_GDS
);
1880 ttm_bo_clean_mm(&adev
->mman
.bdev
, AMDGPU_PL_GWS
);
1881 ttm_bo_clean_mm(&adev
->mman
.bdev
, AMDGPU_PL_OA
);
1882 ttm_bo_device_release(&adev
->mman
.bdev
);
1883 adev
->mman
.initialized
= false;
1884 DRM_INFO("amdgpu: ttm finalized\n");
1888 * amdgpu_ttm_set_buffer_funcs_status - enable/disable use of buffer functions
1890 * @adev: amdgpu_device pointer
1891 * @enable: true when we can use buffer functions.
1893 * Enable/disable use of buffer functions during suspend/resume. This should
1894 * only be called at bootup or when userspace isn't running.
1896 void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device
*adev
, bool enable
)
1898 struct ttm_mem_type_manager
*man
= &adev
->mman
.bdev
.man
[TTM_PL_VRAM
];
1902 if (!adev
->mman
.initialized
|| adev
->in_gpu_reset
||
1903 adev
->mman
.buffer_funcs_enabled
== enable
)
1907 struct amdgpu_ring
*ring
;
1908 struct drm_sched_rq
*rq
;
1910 ring
= adev
->mman
.buffer_funcs_ring
;
1911 rq
= &ring
->sched
.sched_rq
[DRM_SCHED_PRIORITY_KERNEL
];
1912 r
= drm_sched_entity_init(&adev
->mman
.entity
, &rq
, 1, NULL
);
1914 DRM_ERROR("Failed setting up TTM BO move entity (%d)\n",
1919 drm_sched_entity_destroy(&adev
->mman
.entity
);
1920 dma_fence_put(man
->move
);
1924 /* this just adjusts TTM size idea, which sets lpfn to the correct value */
1926 size
= adev
->gmc
.real_vram_size
;
1928 size
= adev
->gmc
.visible_vram_size
;
1929 man
->size
= size
>> PAGE_SHIFT
;
1930 adev
->mman
.buffer_funcs_enabled
= enable
;
1933 int amdgpu_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
1935 struct drm_file
*file_priv
= filp
->private_data
;
1936 struct amdgpu_device
*adev
= file_priv
->minor
->dev
->dev_private
;
1941 return ttm_bo_mmap(filp
, vma
, &adev
->mman
.bdev
);
1944 static int amdgpu_map_buffer(struct ttm_buffer_object
*bo
,
1945 struct ttm_mem_reg
*mem
, unsigned num_pages
,
1946 uint64_t offset
, unsigned window
,
1947 struct amdgpu_ring
*ring
,
1950 struct amdgpu_ttm_tt
*gtt
= (void *)bo
->ttm
;
1951 struct amdgpu_device
*adev
= ring
->adev
;
1952 struct ttm_tt
*ttm
= bo
->ttm
;
1953 struct amdgpu_job
*job
;
1954 unsigned num_dw
, num_bytes
;
1955 dma_addr_t
*dma_address
;
1956 struct dma_fence
*fence
;
1957 uint64_t src_addr
, dst_addr
;
1961 BUG_ON(adev
->mman
.buffer_funcs
->copy_max_bytes
<
1962 AMDGPU_GTT_MAX_TRANSFER_SIZE
* 8);
1964 *addr
= adev
->gmc
.gart_start
;
1965 *addr
+= (u64
)window
* AMDGPU_GTT_MAX_TRANSFER_SIZE
*
1966 AMDGPU_GPU_PAGE_SIZE
;
1968 num_dw
= adev
->mman
.buffer_funcs
->copy_num_dw
;
1969 while (num_dw
& 0x7)
1972 num_bytes
= num_pages
* 8;
1974 r
= amdgpu_job_alloc_with_ib(adev
, num_dw
* 4 + num_bytes
, &job
);
1978 src_addr
= num_dw
* 4;
1979 src_addr
+= job
->ibs
[0].gpu_addr
;
1981 dst_addr
= amdgpu_bo_gpu_offset(adev
->gart
.bo
);
1982 dst_addr
+= window
* AMDGPU_GTT_MAX_TRANSFER_SIZE
* 8;
1983 amdgpu_emit_copy_buffer(adev
, &job
->ibs
[0], src_addr
,
1984 dst_addr
, num_bytes
);
1986 amdgpu_ring_pad_ib(ring
, &job
->ibs
[0]);
1987 WARN_ON(job
->ibs
[0].length_dw
> num_dw
);
1989 dma_address
= >t
->ttm
.dma_address
[offset
>> PAGE_SHIFT
];
1990 flags
= amdgpu_ttm_tt_pte_flags(adev
, ttm
, mem
);
1991 r
= amdgpu_gart_map(adev
, 0, num_pages
, dma_address
, flags
,
1992 &job
->ibs
[0].ptr
[num_dw
]);
1996 r
= amdgpu_job_submit(job
, &adev
->mman
.entity
,
1997 AMDGPU_FENCE_OWNER_UNDEFINED
, &fence
);
2001 dma_fence_put(fence
);
2006 amdgpu_job_free(job
);
2010 int amdgpu_copy_buffer(struct amdgpu_ring
*ring
, uint64_t src_offset
,
2011 uint64_t dst_offset
, uint32_t byte_count
,
2012 struct reservation_object
*resv
,
2013 struct dma_fence
**fence
, bool direct_submit
,
2014 bool vm_needs_flush
)
2016 struct amdgpu_device
*adev
= ring
->adev
;
2017 struct amdgpu_job
*job
;
2020 unsigned num_loops
, num_dw
;
2024 if (direct_submit
&& !ring
->sched
.ready
) {
2025 DRM_ERROR("Trying to move memory with ring turned off.\n");
2029 max_bytes
= adev
->mman
.buffer_funcs
->copy_max_bytes
;
2030 num_loops
= DIV_ROUND_UP(byte_count
, max_bytes
);
2031 num_dw
= num_loops
* adev
->mman
.buffer_funcs
->copy_num_dw
;
2033 /* for IB padding */
2034 while (num_dw
& 0x7)
2037 r
= amdgpu_job_alloc_with_ib(adev
, num_dw
* 4, &job
);
2041 if (vm_needs_flush
) {
2042 job
->vm_pd_addr
= amdgpu_gmc_pd_addr(adev
->gart
.bo
);
2043 job
->vm_needs_flush
= true;
2046 r
= amdgpu_sync_resv(adev
, &job
->sync
, resv
,
2047 AMDGPU_FENCE_OWNER_UNDEFINED
,
2050 DRM_ERROR("sync failed (%d).\n", r
);
2055 for (i
= 0; i
< num_loops
; i
++) {
2056 uint32_t cur_size_in_bytes
= min(byte_count
, max_bytes
);
2058 amdgpu_emit_copy_buffer(adev
, &job
->ibs
[0], src_offset
,
2059 dst_offset
, cur_size_in_bytes
);
2061 src_offset
+= cur_size_in_bytes
;
2062 dst_offset
+= cur_size_in_bytes
;
2063 byte_count
-= cur_size_in_bytes
;
2066 amdgpu_ring_pad_ib(ring
, &job
->ibs
[0]);
2067 WARN_ON(job
->ibs
[0].length_dw
> num_dw
);
2069 r
= amdgpu_job_submit_direct(job
, ring
, fence
);
2071 r
= amdgpu_job_submit(job
, &adev
->mman
.entity
,
2072 AMDGPU_FENCE_OWNER_UNDEFINED
, fence
);
2079 amdgpu_job_free(job
);
2080 DRM_ERROR("Error scheduling IBs (%d)\n", r
);
2084 int amdgpu_fill_buffer(struct amdgpu_bo
*bo
,
2086 struct reservation_object
*resv
,
2087 struct dma_fence
**fence
)
2089 struct amdgpu_device
*adev
= amdgpu_ttm_adev(bo
->tbo
.bdev
);
2090 uint32_t max_bytes
= adev
->mman
.buffer_funcs
->fill_max_bytes
;
2091 struct amdgpu_ring
*ring
= adev
->mman
.buffer_funcs_ring
;
2093 struct drm_mm_node
*mm_node
;
2094 unsigned long num_pages
;
2095 unsigned int num_loops
, num_dw
;
2097 struct amdgpu_job
*job
;
2100 if (!adev
->mman
.buffer_funcs_enabled
) {
2101 DRM_ERROR("Trying to clear memory with ring turned off.\n");
2105 if (bo
->tbo
.mem
.mem_type
== TTM_PL_TT
) {
2106 r
= amdgpu_ttm_alloc_gart(&bo
->tbo
);
2111 num_pages
= bo
->tbo
.num_pages
;
2112 mm_node
= bo
->tbo
.mem
.mm_node
;
2115 uint64_t byte_count
= mm_node
->size
<< PAGE_SHIFT
;
2117 num_loops
+= DIV_ROUND_UP_ULL(byte_count
, max_bytes
);
2118 num_pages
-= mm_node
->size
;
2121 num_dw
= num_loops
* adev
->mman
.buffer_funcs
->fill_num_dw
;
2123 /* for IB padding */
2126 r
= amdgpu_job_alloc_with_ib(adev
, num_dw
* 4, &job
);
2131 r
= amdgpu_sync_resv(adev
, &job
->sync
, resv
,
2132 AMDGPU_FENCE_OWNER_UNDEFINED
, false);
2134 DRM_ERROR("sync failed (%d).\n", r
);
2139 num_pages
= bo
->tbo
.num_pages
;
2140 mm_node
= bo
->tbo
.mem
.mm_node
;
2143 uint64_t byte_count
= mm_node
->size
<< PAGE_SHIFT
;
2146 dst_addr
= amdgpu_mm_node_addr(&bo
->tbo
, mm_node
, &bo
->tbo
.mem
);
2147 while (byte_count
) {
2148 uint32_t cur_size_in_bytes
= min_t(uint64_t, byte_count
,
2151 amdgpu_emit_fill_buffer(adev
, &job
->ibs
[0], src_data
,
2152 dst_addr
, cur_size_in_bytes
);
2154 dst_addr
+= cur_size_in_bytes
;
2155 byte_count
-= cur_size_in_bytes
;
2158 num_pages
-= mm_node
->size
;
2162 amdgpu_ring_pad_ib(ring
, &job
->ibs
[0]);
2163 WARN_ON(job
->ibs
[0].length_dw
> num_dw
);
2164 r
= amdgpu_job_submit(job
, &adev
->mman
.entity
,
2165 AMDGPU_FENCE_OWNER_UNDEFINED
, fence
);
2172 amdgpu_job_free(job
);
2176 #if defined(CONFIG_DEBUG_FS)
2178 static int amdgpu_mm_dump_table(struct seq_file
*m
, void *data
)
2180 struct drm_info_node
*node
= (struct drm_info_node
*)m
->private;
2181 unsigned ttm_pl
= (uintptr_t)node
->info_ent
->data
;
2182 struct drm_device
*dev
= node
->minor
->dev
;
2183 struct amdgpu_device
*adev
= dev
->dev_private
;
2184 struct ttm_mem_type_manager
*man
= &adev
->mman
.bdev
.man
[ttm_pl
];
2185 struct drm_printer p
= drm_seq_file_printer(m
);
2187 man
->func
->debug(man
, &p
);
2191 static const struct drm_info_list amdgpu_ttm_debugfs_list
[] = {
2192 {"amdgpu_vram_mm", amdgpu_mm_dump_table
, 0, (void *)TTM_PL_VRAM
},
2193 {"amdgpu_gtt_mm", amdgpu_mm_dump_table
, 0, (void *)TTM_PL_TT
},
2194 {"amdgpu_gds_mm", amdgpu_mm_dump_table
, 0, (void *)AMDGPU_PL_GDS
},
2195 {"amdgpu_gws_mm", amdgpu_mm_dump_table
, 0, (void *)AMDGPU_PL_GWS
},
2196 {"amdgpu_oa_mm", amdgpu_mm_dump_table
, 0, (void *)AMDGPU_PL_OA
},
2197 {"ttm_page_pool", ttm_page_alloc_debugfs
, 0, NULL
},
2198 #ifdef CONFIG_SWIOTLB
2199 {"ttm_dma_page_pool", ttm_dma_page_alloc_debugfs
, 0, NULL
}
2204 * amdgpu_ttm_vram_read - Linear read access to VRAM
2206 * Accesses VRAM via MMIO for debugging purposes.
2208 static ssize_t
amdgpu_ttm_vram_read(struct file
*f
, char __user
*buf
,
2209 size_t size
, loff_t
*pos
)
2211 struct amdgpu_device
*adev
= file_inode(f
)->i_private
;
2215 if (size
& 0x3 || *pos
& 0x3)
2218 if (*pos
>= adev
->gmc
.mc_vram_size
)
2222 unsigned long flags
;
2225 if (*pos
>= adev
->gmc
.mc_vram_size
)
2228 spin_lock_irqsave(&adev
->mmio_idx_lock
, flags
);
2229 WREG32_NO_KIQ(mmMM_INDEX
, ((uint32_t)*pos
) | 0x80000000);
2230 WREG32_NO_KIQ(mmMM_INDEX_HI
, *pos
>> 31);
2231 value
= RREG32_NO_KIQ(mmMM_DATA
);
2232 spin_unlock_irqrestore(&adev
->mmio_idx_lock
, flags
);
2234 r
= put_user(value
, (uint32_t *)buf
);
2248 * amdgpu_ttm_vram_write - Linear write access to VRAM
2250 * Accesses VRAM via MMIO for debugging purposes.
2252 static ssize_t
amdgpu_ttm_vram_write(struct file
*f
, const char __user
*buf
,
2253 size_t size
, loff_t
*pos
)
2255 struct amdgpu_device
*adev
= file_inode(f
)->i_private
;
2259 if (size
& 0x3 || *pos
& 0x3)
2262 if (*pos
>= adev
->gmc
.mc_vram_size
)
2266 unsigned long flags
;
2269 if (*pos
>= adev
->gmc
.mc_vram_size
)
2272 r
= get_user(value
, (uint32_t *)buf
);
2276 spin_lock_irqsave(&adev
->mmio_idx_lock
, flags
);
2277 WREG32_NO_KIQ(mmMM_INDEX
, ((uint32_t)*pos
) | 0x80000000);
2278 WREG32_NO_KIQ(mmMM_INDEX_HI
, *pos
>> 31);
2279 WREG32_NO_KIQ(mmMM_DATA
, value
);
2280 spin_unlock_irqrestore(&adev
->mmio_idx_lock
, flags
);
2291 static const struct file_operations amdgpu_ttm_vram_fops
= {
2292 .owner
= THIS_MODULE
,
2293 .read
= amdgpu_ttm_vram_read
,
2294 .write
= amdgpu_ttm_vram_write
,
2295 .llseek
= default_llseek
,
2298 #ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
2301 * amdgpu_ttm_gtt_read - Linear read access to GTT memory
2303 static ssize_t
amdgpu_ttm_gtt_read(struct file
*f
, char __user
*buf
,
2304 size_t size
, loff_t
*pos
)
2306 struct amdgpu_device
*adev
= file_inode(f
)->i_private
;
2311 loff_t p
= *pos
/ PAGE_SIZE
;
2312 unsigned off
= *pos
& ~PAGE_MASK
;
2313 size_t cur_size
= min_t(size_t, size
, PAGE_SIZE
- off
);
2317 if (p
>= adev
->gart
.num_cpu_pages
)
2320 page
= adev
->gart
.pages
[p
];
2325 r
= copy_to_user(buf
, ptr
, cur_size
);
2326 kunmap(adev
->gart
.pages
[p
]);
2328 r
= clear_user(buf
, cur_size
);
2342 static const struct file_operations amdgpu_ttm_gtt_fops
= {
2343 .owner
= THIS_MODULE
,
2344 .read
= amdgpu_ttm_gtt_read
,
2345 .llseek
= default_llseek
2351 * amdgpu_iomem_read - Virtual read access to GPU mapped memory
2353 * This function is used to read memory that has been mapped to the
2354 * GPU and the known addresses are not physical addresses but instead
2355 * bus addresses (e.g., what you'd put in an IB or ring buffer).
2357 static ssize_t
amdgpu_iomem_read(struct file
*f
, char __user
*buf
,
2358 size_t size
, loff_t
*pos
)
2360 struct amdgpu_device
*adev
= file_inode(f
)->i_private
;
2361 struct iommu_domain
*dom
;
2365 /* retrieve the IOMMU domain if any for this device */
2366 dom
= iommu_get_domain_for_dev(adev
->dev
);
2369 phys_addr_t addr
= *pos
& PAGE_MASK
;
2370 loff_t off
= *pos
& ~PAGE_MASK
;
2371 size_t bytes
= PAGE_SIZE
- off
;
2376 bytes
= bytes
< size
? bytes
: size
;
2378 /* Translate the bus address to a physical address. If
2379 * the domain is NULL it means there is no IOMMU active
2380 * and the address translation is the identity
2382 addr
= dom
? iommu_iova_to_phys(dom
, addr
) : addr
;
2384 pfn
= addr
>> PAGE_SHIFT
;
2385 if (!pfn_valid(pfn
))
2388 p
= pfn_to_page(pfn
);
2389 if (p
->mapping
!= adev
->mman
.bdev
.dev_mapping
)
2393 r
= copy_to_user(buf
, ptr
+ off
, bytes
);
2407 * amdgpu_iomem_write - Virtual write access to GPU mapped memory
2409 * This function is used to write memory that has been mapped to the
2410 * GPU and the known addresses are not physical addresses but instead
2411 * bus addresses (e.g., what you'd put in an IB or ring buffer).
2413 static ssize_t
amdgpu_iomem_write(struct file
*f
, const char __user
*buf
,
2414 size_t size
, loff_t
*pos
)
2416 struct amdgpu_device
*adev
= file_inode(f
)->i_private
;
2417 struct iommu_domain
*dom
;
2421 dom
= iommu_get_domain_for_dev(adev
->dev
);
2424 phys_addr_t addr
= *pos
& PAGE_MASK
;
2425 loff_t off
= *pos
& ~PAGE_MASK
;
2426 size_t bytes
= PAGE_SIZE
- off
;
2431 bytes
= bytes
< size
? bytes
: size
;
2433 addr
= dom
? iommu_iova_to_phys(dom
, addr
) : addr
;
2435 pfn
= addr
>> PAGE_SHIFT
;
2436 if (!pfn_valid(pfn
))
2439 p
= pfn_to_page(pfn
);
2440 if (p
->mapping
!= adev
->mman
.bdev
.dev_mapping
)
2444 r
= copy_from_user(ptr
+ off
, buf
, bytes
);
2457 static const struct file_operations amdgpu_ttm_iomem_fops
= {
2458 .owner
= THIS_MODULE
,
2459 .read
= amdgpu_iomem_read
,
2460 .write
= amdgpu_iomem_write
,
2461 .llseek
= default_llseek
2464 static const struct {
2466 const struct file_operations
*fops
;
2468 } ttm_debugfs_entries
[] = {
2469 { "amdgpu_vram", &amdgpu_ttm_vram_fops
, TTM_PL_VRAM
},
2470 #ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
2471 { "amdgpu_gtt", &amdgpu_ttm_gtt_fops
, TTM_PL_TT
},
2473 { "amdgpu_iomem", &amdgpu_ttm_iomem_fops
, TTM_PL_SYSTEM
},
2478 static int amdgpu_ttm_debugfs_init(struct amdgpu_device
*adev
)
2480 #if defined(CONFIG_DEBUG_FS)
2483 struct drm_minor
*minor
= adev
->ddev
->primary
;
2484 struct dentry
*ent
, *root
= minor
->debugfs_root
;
2486 for (count
= 0; count
< ARRAY_SIZE(ttm_debugfs_entries
); count
++) {
2487 ent
= debugfs_create_file(
2488 ttm_debugfs_entries
[count
].name
,
2489 S_IFREG
| S_IRUGO
, root
,
2491 ttm_debugfs_entries
[count
].fops
);
2493 return PTR_ERR(ent
);
2494 if (ttm_debugfs_entries
[count
].domain
== TTM_PL_VRAM
)
2495 i_size_write(ent
->d_inode
, adev
->gmc
.mc_vram_size
);
2496 else if (ttm_debugfs_entries
[count
].domain
== TTM_PL_TT
)
2497 i_size_write(ent
->d_inode
, adev
->gmc
.gart_size
);
2498 adev
->mman
.debugfs_entries
[count
] = ent
;
2501 count
= ARRAY_SIZE(amdgpu_ttm_debugfs_list
);
2503 #ifdef CONFIG_SWIOTLB
2504 if (!(adev
->need_swiotlb
&& swiotlb_nr_tbl()))
2508 return amdgpu_debugfs_add_files(adev
, amdgpu_ttm_debugfs_list
, count
);
2514 static void amdgpu_ttm_debugfs_fini(struct amdgpu_device
*adev
)
2516 #if defined(CONFIG_DEBUG_FS)
2519 for (i
= 0; i
< ARRAY_SIZE(ttm_debugfs_entries
); i
++)
2520 debugfs_remove(adev
->mman
.debugfs_entries
[i
]);