2 * Copyright 2009 Jerome Glisse.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
28 * Jerome Glisse <glisse@freedesktop.org>
29 * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
33 #include <linux/dma-mapping.h>
34 #include <linux/iommu.h>
35 #include <linux/hmm.h>
36 #include <linux/pagemap.h>
37 #include <linux/sched/task.h>
38 #include <linux/sched/mm.h>
39 #include <linux/seq_file.h>
40 #include <linux/slab.h>
41 #include <linux/swap.h>
42 #include <linux/swiotlb.h>
43 #include <linux/dma-buf.h>
44 #include <linux/sizes.h>
46 #include <drm/ttm/ttm_bo_api.h>
47 #include <drm/ttm/ttm_bo_driver.h>
48 #include <drm/ttm/ttm_placement.h>
49 #include <drm/ttm/ttm_module.h>
50 #include <drm/ttm/ttm_page_alloc.h>
52 #include <drm/drm_debugfs.h>
53 #include <drm/amdgpu_drm.h>
56 #include "amdgpu_object.h"
57 #include "amdgpu_trace.h"
58 #include "amdgpu_amdkfd.h"
59 #include "amdgpu_sdma.h"
60 #include "amdgpu_ras.h"
61 #include "bif/bif_4_1_d.h"
63 #define AMDGPU_TTM_VRAM_MAX_DW_READ (size_t)128
67 * amdgpu_init_mem_type - Initialize a memory manager for a specific type of
70 * @bdev: The TTM BO device object (contains a reference to amdgpu_device)
71 * @type: The type of memory requested
72 * @man: The memory type manager for each domain
74 * This is called by ttm_bo_init_mm() when a buffer object is being
77 static int amdgpu_init_mem_type(struct ttm_bo_device
*bdev
, uint32_t type
,
78 struct ttm_mem_type_manager
*man
)
80 struct amdgpu_device
*adev
;
82 adev
= amdgpu_ttm_adev(bdev
);
87 man
->flags
= TTM_MEMTYPE_FLAG_MAPPABLE
;
88 man
->available_caching
= TTM_PL_MASK_CACHING
;
89 man
->default_caching
= TTM_PL_FLAG_CACHED
;
93 man
->func
= &amdgpu_gtt_mgr_func
;
94 man
->gpu_offset
= adev
->gmc
.gart_start
;
95 man
->available_caching
= TTM_PL_MASK_CACHING
;
96 man
->default_caching
= TTM_PL_FLAG_CACHED
;
97 man
->flags
= TTM_MEMTYPE_FLAG_MAPPABLE
| TTM_MEMTYPE_FLAG_CMA
;
100 /* "On-card" video ram */
101 man
->func
= &amdgpu_vram_mgr_func
;
102 man
->gpu_offset
= adev
->gmc
.vram_start
;
103 man
->flags
= TTM_MEMTYPE_FLAG_FIXED
|
104 TTM_MEMTYPE_FLAG_MAPPABLE
;
105 man
->available_caching
= TTM_PL_FLAG_UNCACHED
| TTM_PL_FLAG_WC
;
106 man
->default_caching
= TTM_PL_FLAG_WC
;
111 /* On-chip GDS memory*/
112 man
->func
= &ttm_bo_manager_func
;
114 man
->flags
= TTM_MEMTYPE_FLAG_FIXED
| TTM_MEMTYPE_FLAG_CMA
;
115 man
->available_caching
= TTM_PL_FLAG_UNCACHED
;
116 man
->default_caching
= TTM_PL_FLAG_UNCACHED
;
119 DRM_ERROR("Unsupported memory type %u\n", (unsigned)type
);
126 * amdgpu_evict_flags - Compute placement flags
128 * @bo: The buffer object to evict
129 * @placement: Possible destination(s) for evicted BO
131 * Fill in placement data when ttm_bo_evict() is called
133 static void amdgpu_evict_flags(struct ttm_buffer_object
*bo
,
134 struct ttm_placement
*placement
)
136 struct amdgpu_device
*adev
= amdgpu_ttm_adev(bo
->bdev
);
137 struct amdgpu_bo
*abo
;
138 static const struct ttm_place placements
= {
141 .flags
= TTM_PL_MASK_CACHING
| TTM_PL_FLAG_SYSTEM
144 /* Don't handle scatter gather BOs */
145 if (bo
->type
== ttm_bo_type_sg
) {
146 placement
->num_placement
= 0;
147 placement
->num_busy_placement
= 0;
151 /* Object isn't an AMDGPU object so ignore */
152 if (!amdgpu_bo_is_amdgpu_bo(bo
)) {
153 placement
->placement
= &placements
;
154 placement
->busy_placement
= &placements
;
155 placement
->num_placement
= 1;
156 placement
->num_busy_placement
= 1;
160 abo
= ttm_to_amdgpu_bo(bo
);
161 switch (bo
->mem
.mem_type
) {
165 placement
->num_placement
= 0;
166 placement
->num_busy_placement
= 0;
170 if (!adev
->mman
.buffer_funcs_enabled
) {
171 /* Move to system memory */
172 amdgpu_bo_placement_from_domain(abo
, AMDGPU_GEM_DOMAIN_CPU
);
173 } else if (!amdgpu_gmc_vram_full_visible(&adev
->gmc
) &&
174 !(abo
->flags
& AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED
) &&
175 amdgpu_bo_in_cpu_visible_vram(abo
)) {
177 /* Try evicting to the CPU inaccessible part of VRAM
178 * first, but only set GTT as busy placement, so this
179 * BO will be evicted to GTT rather than causing other
180 * BOs to be evicted from VRAM
182 amdgpu_bo_placement_from_domain(abo
, AMDGPU_GEM_DOMAIN_VRAM
|
183 AMDGPU_GEM_DOMAIN_GTT
);
184 abo
->placements
[0].fpfn
= adev
->gmc
.visible_vram_size
>> PAGE_SHIFT
;
185 abo
->placements
[0].lpfn
= 0;
186 abo
->placement
.busy_placement
= &abo
->placements
[1];
187 abo
->placement
.num_busy_placement
= 1;
189 /* Move to GTT memory */
190 amdgpu_bo_placement_from_domain(abo
, AMDGPU_GEM_DOMAIN_GTT
);
195 amdgpu_bo_placement_from_domain(abo
, AMDGPU_GEM_DOMAIN_CPU
);
198 *placement
= abo
->placement
;
202 * amdgpu_verify_access - Verify access for a mmap call
204 * @bo: The buffer object to map
205 * @filp: The file pointer from the process performing the mmap
207 * This is called by ttm_bo_mmap() to verify whether a process
208 * has the right to mmap a BO to their process space.
210 static int amdgpu_verify_access(struct ttm_buffer_object
*bo
, struct file
*filp
)
212 struct amdgpu_bo
*abo
= ttm_to_amdgpu_bo(bo
);
215 * Don't verify access for KFD BOs. They don't have a GEM
216 * object associated with them.
221 if (amdgpu_ttm_tt_get_usermm(bo
->ttm
))
223 return drm_vma_node_verify_access(&abo
->tbo
.base
.vma_node
,
228 * amdgpu_move_null - Register memory for a buffer object
230 * @bo: The bo to assign the memory to
231 * @new_mem: The memory to be assigned.
233 * Assign the memory from new_mem to the memory of the buffer object bo.
235 static void amdgpu_move_null(struct ttm_buffer_object
*bo
,
236 struct ttm_mem_reg
*new_mem
)
238 struct ttm_mem_reg
*old_mem
= &bo
->mem
;
240 BUG_ON(old_mem
->mm_node
!= NULL
);
242 new_mem
->mm_node
= NULL
;
246 * amdgpu_mm_node_addr - Compute the GPU relative offset of a GTT buffer.
248 * @bo: The bo to assign the memory to.
249 * @mm_node: Memory manager node for drm allocator.
250 * @mem: The region where the bo resides.
253 static uint64_t amdgpu_mm_node_addr(struct ttm_buffer_object
*bo
,
254 struct drm_mm_node
*mm_node
,
255 struct ttm_mem_reg
*mem
)
259 if (mm_node
->start
!= AMDGPU_BO_INVALID_OFFSET
) {
260 addr
= mm_node
->start
<< PAGE_SHIFT
;
261 addr
+= bo
->bdev
->man
[mem
->mem_type
].gpu_offset
;
267 * amdgpu_find_mm_node - Helper function finds the drm_mm_node corresponding to
268 * @offset. It also modifies the offset to be within the drm_mm_node returned
270 * @mem: The region where the bo resides.
271 * @offset: The offset that drm_mm_node is used for finding.
274 static struct drm_mm_node
*amdgpu_find_mm_node(struct ttm_mem_reg
*mem
,
277 struct drm_mm_node
*mm_node
= mem
->mm_node
;
279 while (*offset
>= (mm_node
->size
<< PAGE_SHIFT
)) {
280 *offset
-= (mm_node
->size
<< PAGE_SHIFT
);
287 * amdgpu_ttm_map_buffer - Map memory into the GART windows
288 * @bo: buffer object to map
289 * @mem: memory object to map
290 * @mm_node: drm_mm node object to map
291 * @num_pages: number of pages to map
292 * @offset: offset into @mm_node where to start
293 * @window: which GART window to use
294 * @ring: DMA ring to use for the copy
295 * @tmz: if we should setup a TMZ enabled mapping
296 * @addr: resulting address inside the MC address space
298 * Setup one of the GART windows to access a specific piece of memory or return
299 * the physical address for local memory.
301 static int amdgpu_ttm_map_buffer(struct ttm_buffer_object
*bo
,
302 struct ttm_mem_reg
*mem
,
303 struct drm_mm_node
*mm_node
,
304 unsigned num_pages
, uint64_t offset
,
305 unsigned window
, struct amdgpu_ring
*ring
,
306 bool tmz
, uint64_t *addr
)
308 struct amdgpu_device
*adev
= ring
->adev
;
309 struct amdgpu_job
*job
;
310 unsigned num_dw
, num_bytes
;
311 struct dma_fence
*fence
;
312 uint64_t src_addr
, dst_addr
;
318 BUG_ON(adev
->mman
.buffer_funcs
->copy_max_bytes
<
319 AMDGPU_GTT_MAX_TRANSFER_SIZE
* 8);
321 /* Map only what can't be accessed directly */
322 if (!tmz
&& mem
->start
!= AMDGPU_BO_INVALID_OFFSET
) {
323 *addr
= amdgpu_mm_node_addr(bo
, mm_node
, mem
) + offset
;
327 *addr
= adev
->gmc
.gart_start
;
328 *addr
+= (u64
)window
* AMDGPU_GTT_MAX_TRANSFER_SIZE
*
329 AMDGPU_GPU_PAGE_SIZE
;
330 *addr
+= offset
& ~PAGE_MASK
;
332 num_dw
= ALIGN(adev
->mman
.buffer_funcs
->copy_num_dw
, 8);
333 num_bytes
= num_pages
* 8;
335 r
= amdgpu_job_alloc_with_ib(adev
, num_dw
* 4 + num_bytes
,
336 AMDGPU_IB_POOL_DELAYED
, &job
);
340 src_addr
= num_dw
* 4;
341 src_addr
+= job
->ibs
[0].gpu_addr
;
343 dst_addr
= amdgpu_bo_gpu_offset(adev
->gart
.bo
);
344 dst_addr
+= window
* AMDGPU_GTT_MAX_TRANSFER_SIZE
* 8;
345 amdgpu_emit_copy_buffer(adev
, &job
->ibs
[0], src_addr
,
346 dst_addr
, num_bytes
, false);
348 amdgpu_ring_pad_ib(ring
, &job
->ibs
[0]);
349 WARN_ON(job
->ibs
[0].length_dw
> num_dw
);
351 flags
= amdgpu_ttm_tt_pte_flags(adev
, bo
->ttm
, mem
);
353 flags
|= AMDGPU_PTE_TMZ
;
355 cpu_addr
= &job
->ibs
[0].ptr
[num_dw
];
357 if (mem
->mem_type
== TTM_PL_TT
) {
358 struct ttm_dma_tt
*dma
;
359 dma_addr_t
*dma_address
;
361 dma
= container_of(bo
->ttm
, struct ttm_dma_tt
, ttm
);
362 dma_address
= &dma
->dma_address
[offset
>> PAGE_SHIFT
];
363 r
= amdgpu_gart_map(adev
, 0, num_pages
, dma_address
, flags
,
368 dma_addr_t dma_address
;
370 dma_address
= (mm_node
->start
<< PAGE_SHIFT
) + offset
;
371 dma_address
+= adev
->vm_manager
.vram_base_offset
;
373 for (i
= 0; i
< num_pages
; ++i
) {
374 r
= amdgpu_gart_map(adev
, i
<< PAGE_SHIFT
, 1,
375 &dma_address
, flags
, cpu_addr
);
379 dma_address
+= PAGE_SIZE
;
383 r
= amdgpu_job_submit(job
, &adev
->mman
.entity
,
384 AMDGPU_FENCE_OWNER_UNDEFINED
, &fence
);
388 dma_fence_put(fence
);
393 amdgpu_job_free(job
);
398 * amdgpu_copy_ttm_mem_to_mem - Helper function for copy
399 * @adev: amdgpu device
400 * @src: buffer/address where to read from
401 * @dst: buffer/address where to write to
402 * @size: number of bytes to copy
403 * @tmz: if a secure copy should be used
404 * @resv: resv object to sync to
405 * @f: Returns the last fence if multiple jobs are submitted.
407 * The function copies @size bytes from {src->mem + src->offset} to
408 * {dst->mem + dst->offset}. src->bo and dst->bo could be same BO for a
409 * move and different for a BO to BO copy.
412 int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device
*adev
,
413 const struct amdgpu_copy_mem
*src
,
414 const struct amdgpu_copy_mem
*dst
,
415 uint64_t size
, bool tmz
,
416 struct dma_resv
*resv
,
417 struct dma_fence
**f
)
419 const uint32_t GTT_MAX_BYTES
= (AMDGPU_GTT_MAX_TRANSFER_SIZE
*
420 AMDGPU_GPU_PAGE_SIZE
);
422 uint64_t src_node_size
, dst_node_size
, src_offset
, dst_offset
;
423 struct amdgpu_ring
*ring
= adev
->mman
.buffer_funcs_ring
;
424 struct drm_mm_node
*src_mm
, *dst_mm
;
425 struct dma_fence
*fence
= NULL
;
428 if (!adev
->mman
.buffer_funcs_enabled
) {
429 DRM_ERROR("Trying to move memory with ring turned off.\n");
433 src_offset
= src
->offset
;
434 src_mm
= amdgpu_find_mm_node(src
->mem
, &src_offset
);
435 src_node_size
= (src_mm
->size
<< PAGE_SHIFT
) - src_offset
;
437 dst_offset
= dst
->offset
;
438 dst_mm
= amdgpu_find_mm_node(dst
->mem
, &dst_offset
);
439 dst_node_size
= (dst_mm
->size
<< PAGE_SHIFT
) - dst_offset
;
441 mutex_lock(&adev
->mman
.gtt_window_lock
);
444 uint32_t src_page_offset
= src_offset
& ~PAGE_MASK
;
445 uint32_t dst_page_offset
= dst_offset
& ~PAGE_MASK
;
446 struct dma_fence
*next
;
450 /* Copy size cannot exceed GTT_MAX_BYTES. So if src or dst
451 * begins at an offset, then adjust the size accordingly
453 cur_size
= max(src_page_offset
, dst_page_offset
);
454 cur_size
= min(min3(src_node_size
, dst_node_size
, size
),
455 (uint64_t)(GTT_MAX_BYTES
- cur_size
));
457 /* Map src to window 0 and dst to window 1. */
458 r
= amdgpu_ttm_map_buffer(src
->bo
, src
->mem
, src_mm
,
459 PFN_UP(cur_size
+ src_page_offset
),
460 src_offset
, 0, ring
, tmz
, &from
);
464 r
= amdgpu_ttm_map_buffer(dst
->bo
, dst
->mem
, dst_mm
,
465 PFN_UP(cur_size
+ dst_page_offset
),
466 dst_offset
, 1, ring
, tmz
, &to
);
470 r
= amdgpu_copy_buffer(ring
, from
, to
, cur_size
,
471 resv
, &next
, false, true, tmz
);
475 dma_fence_put(fence
);
482 src_node_size
-= cur_size
;
483 if (!src_node_size
) {
485 src_node_size
= src_mm
->size
<< PAGE_SHIFT
;
488 src_offset
+= cur_size
;
491 dst_node_size
-= cur_size
;
492 if (!dst_node_size
) {
494 dst_node_size
= dst_mm
->size
<< PAGE_SHIFT
;
497 dst_offset
+= cur_size
;
501 mutex_unlock(&adev
->mman
.gtt_window_lock
);
503 *f
= dma_fence_get(fence
);
504 dma_fence_put(fence
);
509 * amdgpu_move_blit - Copy an entire buffer to another buffer
511 * This is a helper called by amdgpu_bo_move() and amdgpu_move_vram_ram() to
512 * help move buffers to and from VRAM.
514 static int amdgpu_move_blit(struct ttm_buffer_object
*bo
,
515 bool evict
, bool no_wait_gpu
,
516 struct ttm_mem_reg
*new_mem
,
517 struct ttm_mem_reg
*old_mem
)
519 struct amdgpu_device
*adev
= amdgpu_ttm_adev(bo
->bdev
);
520 struct amdgpu_bo
*abo
= ttm_to_amdgpu_bo(bo
);
521 struct amdgpu_copy_mem src
, dst
;
522 struct dma_fence
*fence
= NULL
;
532 r
= amdgpu_ttm_copy_mem_to_mem(adev
, &src
, &dst
,
533 new_mem
->num_pages
<< PAGE_SHIFT
,
534 amdgpu_bo_encrypted(abo
),
535 bo
->base
.resv
, &fence
);
539 /* clear the space being freed */
540 if (old_mem
->mem_type
== TTM_PL_VRAM
&&
541 (abo
->flags
& AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE
)) {
542 struct dma_fence
*wipe_fence
= NULL
;
544 r
= amdgpu_fill_buffer(ttm_to_amdgpu_bo(bo
), AMDGPU_POISON
,
548 } else if (wipe_fence
) {
549 dma_fence_put(fence
);
554 /* Always block for VM page tables before committing the new location */
555 if (bo
->type
== ttm_bo_type_kernel
)
556 r
= ttm_bo_move_accel_cleanup(bo
, fence
, true, new_mem
);
558 r
= ttm_bo_pipeline_move(bo
, fence
, evict
, new_mem
);
559 dma_fence_put(fence
);
564 dma_fence_wait(fence
, false);
565 dma_fence_put(fence
);
570 * amdgpu_move_vram_ram - Copy VRAM buffer to RAM buffer
572 * Called by amdgpu_bo_move().
574 static int amdgpu_move_vram_ram(struct ttm_buffer_object
*bo
, bool evict
,
575 struct ttm_operation_ctx
*ctx
,
576 struct ttm_mem_reg
*new_mem
)
578 struct ttm_mem_reg
*old_mem
= &bo
->mem
;
579 struct ttm_mem_reg tmp_mem
;
580 struct ttm_place placements
;
581 struct ttm_placement placement
;
584 /* create space/pages for new_mem in GTT space */
586 tmp_mem
.mm_node
= NULL
;
587 placement
.num_placement
= 1;
588 placement
.placement
= &placements
;
589 placement
.num_busy_placement
= 1;
590 placement
.busy_placement
= &placements
;
593 placements
.flags
= TTM_PL_MASK_CACHING
| TTM_PL_FLAG_TT
;
594 r
= ttm_bo_mem_space(bo
, &placement
, &tmp_mem
, ctx
);
596 pr_err("Failed to find GTT space for blit from VRAM\n");
600 /* set caching flags */
601 r
= ttm_tt_set_placement_caching(bo
->ttm
, tmp_mem
.placement
);
606 /* Bind the memory to the GTT space */
607 r
= ttm_tt_bind(bo
->ttm
, &tmp_mem
, ctx
);
612 /* blit VRAM to GTT */
613 r
= amdgpu_move_blit(bo
, evict
, ctx
->no_wait_gpu
, &tmp_mem
, old_mem
);
618 /* move BO (in tmp_mem) to new_mem */
619 r
= ttm_bo_move_ttm(bo
, ctx
, new_mem
);
621 ttm_bo_mem_put(bo
, &tmp_mem
);
626 * amdgpu_move_ram_vram - Copy buffer from RAM to VRAM
628 * Called by amdgpu_bo_move().
630 static int amdgpu_move_ram_vram(struct ttm_buffer_object
*bo
, bool evict
,
631 struct ttm_operation_ctx
*ctx
,
632 struct ttm_mem_reg
*new_mem
)
634 struct ttm_mem_reg
*old_mem
= &bo
->mem
;
635 struct ttm_mem_reg tmp_mem
;
636 struct ttm_placement placement
;
637 struct ttm_place placements
;
640 /* make space in GTT for old_mem buffer */
642 tmp_mem
.mm_node
= NULL
;
643 placement
.num_placement
= 1;
644 placement
.placement
= &placements
;
645 placement
.num_busy_placement
= 1;
646 placement
.busy_placement
= &placements
;
649 placements
.flags
= TTM_PL_MASK_CACHING
| TTM_PL_FLAG_TT
;
650 r
= ttm_bo_mem_space(bo
, &placement
, &tmp_mem
, ctx
);
652 pr_err("Failed to find GTT space for blit to VRAM\n");
656 /* move/bind old memory to GTT space */
657 r
= ttm_bo_move_ttm(bo
, ctx
, &tmp_mem
);
663 r
= amdgpu_move_blit(bo
, evict
, ctx
->no_wait_gpu
, new_mem
, old_mem
);
668 ttm_bo_mem_put(bo
, &tmp_mem
);
673 * amdgpu_mem_visible - Check that memory can be accessed by ttm_bo_move_memcpy
675 * Called by amdgpu_bo_move()
677 static bool amdgpu_mem_visible(struct amdgpu_device
*adev
,
678 struct ttm_mem_reg
*mem
)
680 struct drm_mm_node
*nodes
= mem
->mm_node
;
682 if (mem
->mem_type
== TTM_PL_SYSTEM
||
683 mem
->mem_type
== TTM_PL_TT
)
685 if (mem
->mem_type
!= TTM_PL_VRAM
)
688 /* ttm_mem_reg_ioremap only supports contiguous memory */
689 if (nodes
->size
!= mem
->num_pages
)
692 return ((nodes
->start
+ nodes
->size
) << PAGE_SHIFT
)
693 <= adev
->gmc
.visible_vram_size
;
697 * amdgpu_bo_move - Move a buffer object to a new memory location
699 * Called by ttm_bo_handle_move_mem()
701 static int amdgpu_bo_move(struct ttm_buffer_object
*bo
, bool evict
,
702 struct ttm_operation_ctx
*ctx
,
703 struct ttm_mem_reg
*new_mem
)
705 struct amdgpu_device
*adev
;
706 struct amdgpu_bo
*abo
;
707 struct ttm_mem_reg
*old_mem
= &bo
->mem
;
710 /* Can't move a pinned BO */
711 abo
= ttm_to_amdgpu_bo(bo
);
712 if (WARN_ON_ONCE(abo
->pin_count
> 0))
715 adev
= amdgpu_ttm_adev(bo
->bdev
);
717 if (old_mem
->mem_type
== TTM_PL_SYSTEM
&& bo
->ttm
== NULL
) {
718 amdgpu_move_null(bo
, new_mem
);
721 if ((old_mem
->mem_type
== TTM_PL_TT
&&
722 new_mem
->mem_type
== TTM_PL_SYSTEM
) ||
723 (old_mem
->mem_type
== TTM_PL_SYSTEM
&&
724 new_mem
->mem_type
== TTM_PL_TT
)) {
726 amdgpu_move_null(bo
, new_mem
);
729 if (old_mem
->mem_type
== AMDGPU_PL_GDS
||
730 old_mem
->mem_type
== AMDGPU_PL_GWS
||
731 old_mem
->mem_type
== AMDGPU_PL_OA
||
732 new_mem
->mem_type
== AMDGPU_PL_GDS
||
733 new_mem
->mem_type
== AMDGPU_PL_GWS
||
734 new_mem
->mem_type
== AMDGPU_PL_OA
) {
735 /* Nothing to save here */
736 amdgpu_move_null(bo
, new_mem
);
740 if (!adev
->mman
.buffer_funcs_enabled
) {
745 if (old_mem
->mem_type
== TTM_PL_VRAM
&&
746 new_mem
->mem_type
== TTM_PL_SYSTEM
) {
747 r
= amdgpu_move_vram_ram(bo
, evict
, ctx
, new_mem
);
748 } else if (old_mem
->mem_type
== TTM_PL_SYSTEM
&&
749 new_mem
->mem_type
== TTM_PL_VRAM
) {
750 r
= amdgpu_move_ram_vram(bo
, evict
, ctx
, new_mem
);
752 r
= amdgpu_move_blit(bo
, evict
, ctx
->no_wait_gpu
,
758 /* Check that all memory is CPU accessible */
759 if (!amdgpu_mem_visible(adev
, old_mem
) ||
760 !amdgpu_mem_visible(adev
, new_mem
)) {
761 pr_err("Move buffer fallback to memcpy unavailable\n");
765 r
= ttm_bo_move_memcpy(bo
, ctx
, new_mem
);
770 if (bo
->type
== ttm_bo_type_device
&&
771 new_mem
->mem_type
== TTM_PL_VRAM
&&
772 old_mem
->mem_type
!= TTM_PL_VRAM
) {
773 /* amdgpu_bo_fault_reserve_notify will re-set this if the CPU
774 * accesses the BO after it's moved.
776 abo
->flags
&= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED
;
779 /* update statistics */
780 atomic64_add((u64
)bo
->num_pages
<< PAGE_SHIFT
, &adev
->num_bytes_moved
);
785 * amdgpu_ttm_io_mem_reserve - Reserve a block of memory during a fault
787 * Called by ttm_mem_io_reserve() ultimately via ttm_bo_vm_fault()
789 static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device
*bdev
, struct ttm_mem_reg
*mem
)
791 struct ttm_mem_type_manager
*man
= &bdev
->man
[mem
->mem_type
];
792 struct amdgpu_device
*adev
= amdgpu_ttm_adev(bdev
);
793 struct drm_mm_node
*mm_node
= mem
->mm_node
;
795 mem
->bus
.addr
= NULL
;
797 mem
->bus
.size
= mem
->num_pages
<< PAGE_SHIFT
;
799 mem
->bus
.is_iomem
= false;
800 if (!(man
->flags
& TTM_MEMTYPE_FLAG_MAPPABLE
))
802 switch (mem
->mem_type
) {
809 mem
->bus
.offset
= mem
->start
<< PAGE_SHIFT
;
810 /* check if it's visible */
811 if ((mem
->bus
.offset
+ mem
->bus
.size
) > adev
->gmc
.visible_vram_size
)
813 /* Only physically contiguous buffers apply. In a contiguous
814 * buffer, size of the first mm_node would match the number of
815 * pages in ttm_mem_reg.
817 if (adev
->mman
.aper_base_kaddr
&&
818 (mm_node
->size
== mem
->num_pages
))
819 mem
->bus
.addr
= (u8
*)adev
->mman
.aper_base_kaddr
+
822 mem
->bus
.base
= adev
->gmc
.aper_base
;
823 mem
->bus
.is_iomem
= true;
831 static void amdgpu_ttm_io_mem_free(struct ttm_bo_device
*bdev
, struct ttm_mem_reg
*mem
)
835 static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object
*bo
,
836 unsigned long page_offset
)
838 uint64_t offset
= (page_offset
<< PAGE_SHIFT
);
839 struct drm_mm_node
*mm
;
841 mm
= amdgpu_find_mm_node(&bo
->mem
, &offset
);
842 return (bo
->mem
.bus
.base
>> PAGE_SHIFT
) + mm
->start
+
843 (offset
>> PAGE_SHIFT
);
847 * TTM backend functions.
849 struct amdgpu_ttm_tt
{
850 struct ttm_dma_tt ttm
;
851 struct drm_gem_object
*gobj
;
854 struct task_struct
*usertask
;
856 #if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
857 struct hmm_range
*range
;
861 #ifdef CONFIG_DRM_AMDGPU_USERPTR
863 * amdgpu_ttm_tt_get_user_pages - get device accessible pages that back user
864 * memory and start HMM tracking CPU page table update
866 * Calling function must call amdgpu_ttm_tt_userptr_range_done() once and only
867 * once afterwards to stop HMM tracking
869 int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo
*bo
, struct page
**pages
)
871 struct ttm_tt
*ttm
= bo
->tbo
.ttm
;
872 struct amdgpu_ttm_tt
*gtt
= (void *)ttm
;
873 unsigned long start
= gtt
->userptr
;
874 struct vm_area_struct
*vma
;
875 struct hmm_range
*range
;
876 unsigned long timeout
;
877 struct mm_struct
*mm
;
881 mm
= bo
->notifier
.mm
;
883 DRM_DEBUG_DRIVER("BO is not registered?\n");
887 /* Another get_user_pages is running at the same time?? */
888 if (WARN_ON(gtt
->range
))
891 if (!mmget_not_zero(mm
)) /* Happens during process shutdown */
894 range
= kzalloc(sizeof(*range
), GFP_KERNEL
);
895 if (unlikely(!range
)) {
899 range
->notifier
= &bo
->notifier
;
900 range
->start
= bo
->notifier
.interval_tree
.start
;
901 range
->end
= bo
->notifier
.interval_tree
.last
+ 1;
902 range
->default_flags
= HMM_PFN_REQ_FAULT
;
903 if (!amdgpu_ttm_tt_is_readonly(ttm
))
904 range
->default_flags
|= HMM_PFN_REQ_WRITE
;
906 range
->hmm_pfns
= kvmalloc_array(ttm
->num_pages
,
907 sizeof(*range
->hmm_pfns
), GFP_KERNEL
);
908 if (unlikely(!range
->hmm_pfns
)) {
910 goto out_free_ranges
;
913 down_read(&mm
->mmap_sem
);
914 vma
= find_vma(mm
, start
);
915 if (unlikely(!vma
|| start
< vma
->vm_start
)) {
919 if (unlikely((gtt
->userflags
& AMDGPU_GEM_USERPTR_ANONONLY
) &&
924 up_read(&mm
->mmap_sem
);
925 timeout
= jiffies
+ msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT
);
928 range
->notifier_seq
= mmu_interval_read_begin(&bo
->notifier
);
930 down_read(&mm
->mmap_sem
);
931 r
= hmm_range_fault(range
);
932 up_read(&mm
->mmap_sem
);
935 * FIXME: This timeout should encompass the retry from
936 * mmu_interval_read_retry() as well.
938 if (r
== -EBUSY
&& !time_after(jiffies
, timeout
))
944 * Due to default_flags, all pages are HMM_PFN_VALID or
945 * hmm_range_fault() fails. FIXME: The pages cannot be touched outside
946 * the notifier_lock, and mmu_interval_read_retry() must be done first.
948 for (i
= 0; i
< ttm
->num_pages
; i
++)
949 pages
[i
] = hmm_pfn_to_page(range
->hmm_pfns
[i
]);
957 up_read(&mm
->mmap_sem
);
959 kvfree(range
->hmm_pfns
);
968 * amdgpu_ttm_tt_userptr_range_done - stop HMM track the CPU page table change
969 * Check if the pages backing this ttm range have been invalidated
971 * Returns: true if pages are still valid
973 bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt
*ttm
)
975 struct amdgpu_ttm_tt
*gtt
= (void *)ttm
;
978 if (!gtt
|| !gtt
->userptr
)
981 DRM_DEBUG_DRIVER("user_pages_done 0x%llx pages 0x%lx\n",
982 gtt
->userptr
, ttm
->num_pages
);
984 WARN_ONCE(!gtt
->range
|| !gtt
->range
->hmm_pfns
,
985 "No user pages to check\n");
989 * FIXME: Must always hold notifier_lock for this, and must
990 * not ignore the return code.
992 r
= mmu_interval_read_retry(gtt
->range
->notifier
,
993 gtt
->range
->notifier_seq
);
994 kvfree(gtt
->range
->hmm_pfns
);
1004 * amdgpu_ttm_tt_set_user_pages - Copy pages in, putting old pages as necessary.
1006 * Called by amdgpu_cs_list_validate(). This creates the page list
1007 * that backs user memory and will ultimately be mapped into the device
1010 void amdgpu_ttm_tt_set_user_pages(struct ttm_tt
*ttm
, struct page
**pages
)
1014 for (i
= 0; i
< ttm
->num_pages
; ++i
)
1015 ttm
->pages
[i
] = pages
? pages
[i
] : NULL
;
1019 * amdgpu_ttm_tt_pin_userptr - prepare the sg table with the user pages
1021 * Called by amdgpu_ttm_backend_bind()
1023 static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt
*ttm
)
1025 struct amdgpu_device
*adev
= amdgpu_ttm_adev(ttm
->bdev
);
1026 struct amdgpu_ttm_tt
*gtt
= (void *)ttm
;
1030 int write
= !(gtt
->userflags
& AMDGPU_GEM_USERPTR_READONLY
);
1031 enum dma_data_direction direction
= write
?
1032 DMA_BIDIRECTIONAL
: DMA_TO_DEVICE
;
1034 /* Allocate an SG array and squash pages into it */
1035 r
= sg_alloc_table_from_pages(ttm
->sg
, ttm
->pages
, ttm
->num_pages
, 0,
1036 ttm
->num_pages
<< PAGE_SHIFT
,
1041 /* Map SG to device */
1043 nents
= dma_map_sg(adev
->dev
, ttm
->sg
->sgl
, ttm
->sg
->nents
, direction
);
1047 /* convert SG to linear array of pages and dma addresses */
1048 drm_prime_sg_to_page_addr_arrays(ttm
->sg
, ttm
->pages
,
1049 gtt
->ttm
.dma_address
, ttm
->num_pages
);
1059 * amdgpu_ttm_tt_unpin_userptr - Unpin and unmap userptr pages
1061 static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt
*ttm
)
1063 struct amdgpu_device
*adev
= amdgpu_ttm_adev(ttm
->bdev
);
1064 struct amdgpu_ttm_tt
*gtt
= (void *)ttm
;
1066 int write
= !(gtt
->userflags
& AMDGPU_GEM_USERPTR_READONLY
);
1067 enum dma_data_direction direction
= write
?
1068 DMA_BIDIRECTIONAL
: DMA_TO_DEVICE
;
1070 /* double check that we don't free the table twice */
1074 /* unmap the pages mapped to the device */
1075 dma_unmap_sg(adev
->dev
, ttm
->sg
->sgl
, ttm
->sg
->nents
, direction
);
1077 sg_free_table(ttm
->sg
);
1079 #if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
1083 for (i
= 0; i
< ttm
->num_pages
; i
++) {
1084 if (ttm
->pages
[i
] !=
1085 hmm_pfn_to_page(gtt
->range
->hmm_pfns
[i
]))
1089 WARN((i
== ttm
->num_pages
), "Missing get_user_page_done\n");
1094 int amdgpu_ttm_gart_bind(struct amdgpu_device
*adev
,
1095 struct ttm_buffer_object
*tbo
,
1098 struct amdgpu_bo
*abo
= ttm_to_amdgpu_bo(tbo
);
1099 struct ttm_tt
*ttm
= tbo
->ttm
;
1100 struct amdgpu_ttm_tt
*gtt
= (void *)ttm
;
1103 if (amdgpu_bo_encrypted(abo
))
1104 flags
|= AMDGPU_PTE_TMZ
;
1106 if (abo
->flags
& AMDGPU_GEM_CREATE_CP_MQD_GFX9
) {
1107 uint64_t page_idx
= 1;
1109 r
= amdgpu_gart_bind(adev
, gtt
->offset
, page_idx
,
1110 ttm
->pages
, gtt
->ttm
.dma_address
, flags
);
1112 goto gart_bind_fail
;
1114 /* The memory type of the first page defaults to UC. Now
1115 * modify the memory type to NC from the second page of
1118 flags
&= ~AMDGPU_PTE_MTYPE_VG10_MASK
;
1119 flags
|= AMDGPU_PTE_MTYPE_VG10(AMDGPU_MTYPE_NC
);
1121 r
= amdgpu_gart_bind(adev
,
1122 gtt
->offset
+ (page_idx
<< PAGE_SHIFT
),
1123 ttm
->num_pages
- page_idx
,
1124 &ttm
->pages
[page_idx
],
1125 &(gtt
->ttm
.dma_address
[page_idx
]), flags
);
1127 r
= amdgpu_gart_bind(adev
, gtt
->offset
, ttm
->num_pages
,
1128 ttm
->pages
, gtt
->ttm
.dma_address
, flags
);
1133 DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
1134 ttm
->num_pages
, gtt
->offset
);
1140 * amdgpu_ttm_backend_bind - Bind GTT memory
1142 * Called by ttm_tt_bind() on behalf of ttm_bo_handle_move_mem().
1143 * This handles binding GTT memory to the device address space.
1145 static int amdgpu_ttm_backend_bind(struct ttm_tt
*ttm
,
1146 struct ttm_mem_reg
*bo_mem
)
1148 struct amdgpu_device
*adev
= amdgpu_ttm_adev(ttm
->bdev
);
1149 struct amdgpu_ttm_tt
*gtt
= (void*)ttm
;
1154 r
= amdgpu_ttm_tt_pin_userptr(ttm
);
1156 DRM_ERROR("failed to pin userptr\n");
1160 if (!ttm
->num_pages
) {
1161 WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
1162 ttm
->num_pages
, bo_mem
, ttm
);
1165 if (bo_mem
->mem_type
== AMDGPU_PL_GDS
||
1166 bo_mem
->mem_type
== AMDGPU_PL_GWS
||
1167 bo_mem
->mem_type
== AMDGPU_PL_OA
)
1170 if (!amdgpu_gtt_mgr_has_gart_addr(bo_mem
)) {
1171 gtt
->offset
= AMDGPU_BO_INVALID_OFFSET
;
1175 /* compute PTE flags relevant to this BO memory */
1176 flags
= amdgpu_ttm_tt_pte_flags(adev
, ttm
, bo_mem
);
1178 /* bind pages into GART page tables */
1179 gtt
->offset
= (u64
)bo_mem
->start
<< PAGE_SHIFT
;
1180 r
= amdgpu_gart_bind(adev
, gtt
->offset
, ttm
->num_pages
,
1181 ttm
->pages
, gtt
->ttm
.dma_address
, flags
);
1184 DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
1185 ttm
->num_pages
, gtt
->offset
);
1190 * amdgpu_ttm_alloc_gart - Allocate GART memory for buffer object
1192 int amdgpu_ttm_alloc_gart(struct ttm_buffer_object
*bo
)
1194 struct amdgpu_device
*adev
= amdgpu_ttm_adev(bo
->bdev
);
1195 struct ttm_operation_ctx ctx
= { false, false };
1196 struct amdgpu_ttm_tt
*gtt
= (void*)bo
->ttm
;
1197 struct ttm_mem_reg tmp
;
1198 struct ttm_placement placement
;
1199 struct ttm_place placements
;
1200 uint64_t addr
, flags
;
1203 if (bo
->mem
.start
!= AMDGPU_BO_INVALID_OFFSET
)
1206 addr
= amdgpu_gmc_agp_addr(bo
);
1207 if (addr
!= AMDGPU_BO_INVALID_OFFSET
) {
1208 bo
->mem
.start
= addr
>> PAGE_SHIFT
;
1211 /* allocate GART space */
1214 placement
.num_placement
= 1;
1215 placement
.placement
= &placements
;
1216 placement
.num_busy_placement
= 1;
1217 placement
.busy_placement
= &placements
;
1218 placements
.fpfn
= 0;
1219 placements
.lpfn
= adev
->gmc
.gart_size
>> PAGE_SHIFT
;
1220 placements
.flags
= (bo
->mem
.placement
& ~TTM_PL_MASK_MEM
) |
1223 r
= ttm_bo_mem_space(bo
, &placement
, &tmp
, &ctx
);
1227 /* compute PTE flags for this buffer object */
1228 flags
= amdgpu_ttm_tt_pte_flags(adev
, bo
->ttm
, &tmp
);
1231 gtt
->offset
= (u64
)tmp
.start
<< PAGE_SHIFT
;
1232 r
= amdgpu_ttm_gart_bind(adev
, bo
, flags
);
1234 ttm_bo_mem_put(bo
, &tmp
);
1238 ttm_bo_mem_put(bo
, &bo
->mem
);
1242 bo
->offset
= (bo
->mem
.start
<< PAGE_SHIFT
) +
1243 bo
->bdev
->man
[bo
->mem
.mem_type
].gpu_offset
;
1249 * amdgpu_ttm_recover_gart - Rebind GTT pages
1251 * Called by amdgpu_gtt_mgr_recover() from amdgpu_device_reset() to
1252 * rebind GTT pages during a GPU reset.
1254 int amdgpu_ttm_recover_gart(struct ttm_buffer_object
*tbo
)
1256 struct amdgpu_device
*adev
= amdgpu_ttm_adev(tbo
->bdev
);
1263 flags
= amdgpu_ttm_tt_pte_flags(adev
, tbo
->ttm
, &tbo
->mem
);
1264 r
= amdgpu_ttm_gart_bind(adev
, tbo
, flags
);
1270 * amdgpu_ttm_backend_unbind - Unbind GTT mapped pages
1272 * Called by ttm_tt_unbind() on behalf of ttm_bo_move_ttm() and
1275 static int amdgpu_ttm_backend_unbind(struct ttm_tt
*ttm
)
1277 struct amdgpu_device
*adev
= amdgpu_ttm_adev(ttm
->bdev
);
1278 struct amdgpu_ttm_tt
*gtt
= (void *)ttm
;
1281 /* if the pages have userptr pinning then clear that first */
1283 amdgpu_ttm_tt_unpin_userptr(ttm
);
1285 if (gtt
->offset
== AMDGPU_BO_INVALID_OFFSET
)
1288 /* unbind shouldn't be done for GDS/GWS/OA in ttm_bo_clean_mm */
1289 r
= amdgpu_gart_unbind(adev
, gtt
->offset
, ttm
->num_pages
);
1291 DRM_ERROR("failed to unbind %lu pages at 0x%08llX\n",
1292 gtt
->ttm
.ttm
.num_pages
, gtt
->offset
);
1296 static void amdgpu_ttm_backend_destroy(struct ttm_tt
*ttm
)
1298 struct amdgpu_ttm_tt
*gtt
= (void *)ttm
;
1301 put_task_struct(gtt
->usertask
);
1303 ttm_dma_tt_fini(>t
->ttm
);
1307 static struct ttm_backend_func amdgpu_backend_func
= {
1308 .bind
= &amdgpu_ttm_backend_bind
,
1309 .unbind
= &amdgpu_ttm_backend_unbind
,
1310 .destroy
= &amdgpu_ttm_backend_destroy
,
1314 * amdgpu_ttm_tt_create - Create a ttm_tt object for a given BO
1316 * @bo: The buffer object to create a GTT ttm_tt object around
1318 * Called by ttm_tt_create().
1320 static struct ttm_tt
*amdgpu_ttm_tt_create(struct ttm_buffer_object
*bo
,
1321 uint32_t page_flags
)
1323 struct amdgpu_ttm_tt
*gtt
;
1325 gtt
= kzalloc(sizeof(struct amdgpu_ttm_tt
), GFP_KERNEL
);
1329 gtt
->ttm
.ttm
.func
= &amdgpu_backend_func
;
1330 gtt
->gobj
= &bo
->base
;
1332 /* allocate space for the uninitialized page entries */
1333 if (ttm_sg_tt_init(>t
->ttm
, bo
, page_flags
)) {
1337 return >t
->ttm
.ttm
;
1341 * amdgpu_ttm_tt_populate - Map GTT pages visible to the device
1343 * Map the pages of a ttm_tt object to an address space visible
1344 * to the underlying device.
1346 static int amdgpu_ttm_tt_populate(struct ttm_tt
*ttm
,
1347 struct ttm_operation_ctx
*ctx
)
1349 struct amdgpu_device
*adev
= amdgpu_ttm_adev(ttm
->bdev
);
1350 struct amdgpu_ttm_tt
*gtt
= (void *)ttm
;
1352 /* user pages are bound by amdgpu_ttm_tt_pin_userptr() */
1353 if (gtt
&& gtt
->userptr
) {
1354 ttm
->sg
= kzalloc(sizeof(struct sg_table
), GFP_KERNEL
);
1358 ttm
->page_flags
|= TTM_PAGE_FLAG_SG
;
1359 ttm
->state
= tt_unbound
;
1363 if (ttm
->page_flags
& TTM_PAGE_FLAG_SG
) {
1365 struct dma_buf_attachment
*attach
;
1366 struct sg_table
*sgt
;
1368 attach
= gtt
->gobj
->import_attach
;
1369 sgt
= dma_buf_map_attachment(attach
, DMA_BIDIRECTIONAL
);
1371 return PTR_ERR(sgt
);
1376 drm_prime_sg_to_page_addr_arrays(ttm
->sg
, ttm
->pages
,
1377 gtt
->ttm
.dma_address
,
1379 ttm
->state
= tt_unbound
;
1383 #ifdef CONFIG_SWIOTLB
1384 if (adev
->need_swiotlb
&& swiotlb_nr_tbl()) {
1385 return ttm_dma_populate(>t
->ttm
, adev
->dev
, ctx
);
1389 /* fall back to generic helper to populate the page array
1390 * and map them to the device */
1391 return ttm_populate_and_map_pages(adev
->dev
, >t
->ttm
, ctx
);
1395 * amdgpu_ttm_tt_unpopulate - unmap GTT pages and unpopulate page arrays
1397 * Unmaps pages of a ttm_tt object from the device address space and
1398 * unpopulates the page array backing it.
1400 static void amdgpu_ttm_tt_unpopulate(struct ttm_tt
*ttm
)
1402 struct amdgpu_ttm_tt
*gtt
= (void *)ttm
;
1403 struct amdgpu_device
*adev
;
1405 if (gtt
&& gtt
->userptr
) {
1406 amdgpu_ttm_tt_set_user_pages(ttm
, NULL
);
1408 ttm
->page_flags
&= ~TTM_PAGE_FLAG_SG
;
1412 if (ttm
->sg
&& gtt
->gobj
->import_attach
) {
1413 struct dma_buf_attachment
*attach
;
1415 attach
= gtt
->gobj
->import_attach
;
1416 dma_buf_unmap_attachment(attach
, ttm
->sg
, DMA_BIDIRECTIONAL
);
1421 if (ttm
->page_flags
& TTM_PAGE_FLAG_SG
)
1424 adev
= amdgpu_ttm_adev(ttm
->bdev
);
1426 #ifdef CONFIG_SWIOTLB
1427 if (adev
->need_swiotlb
&& swiotlb_nr_tbl()) {
1428 ttm_dma_unpopulate(>t
->ttm
, adev
->dev
);
1433 /* fall back to generic helper to unmap and unpopulate array */
1434 ttm_unmap_and_unpopulate_pages(adev
->dev
, >t
->ttm
);
1438 * amdgpu_ttm_tt_set_userptr - Initialize userptr GTT ttm_tt for the current
1441 * @ttm: The ttm_tt object to bind this userptr object to
1442 * @addr: The address in the current tasks VM space to use
1443 * @flags: Requirements of userptr object.
1445 * Called by amdgpu_gem_userptr_ioctl() to bind userptr pages
1448 int amdgpu_ttm_tt_set_userptr(struct ttm_tt
*ttm
, uint64_t addr
,
1451 struct amdgpu_ttm_tt
*gtt
= (void *)ttm
;
1456 gtt
->userptr
= addr
;
1457 gtt
->userflags
= flags
;
1460 put_task_struct(gtt
->usertask
);
1461 gtt
->usertask
= current
->group_leader
;
1462 get_task_struct(gtt
->usertask
);
1468 * amdgpu_ttm_tt_get_usermm - Return memory manager for ttm_tt object
1470 struct mm_struct
*amdgpu_ttm_tt_get_usermm(struct ttm_tt
*ttm
)
1472 struct amdgpu_ttm_tt
*gtt
= (void *)ttm
;
1477 if (gtt
->usertask
== NULL
)
1480 return gtt
->usertask
->mm
;
1484 * amdgpu_ttm_tt_affect_userptr - Determine if a ttm_tt object lays inside an
1485 * address range for the current task.
1488 bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt
*ttm
, unsigned long start
,
1491 struct amdgpu_ttm_tt
*gtt
= (void *)ttm
;
1494 if (gtt
== NULL
|| !gtt
->userptr
)
1497 /* Return false if no part of the ttm_tt object lies within
1500 size
= (unsigned long)gtt
->ttm
.ttm
.num_pages
* PAGE_SIZE
;
1501 if (gtt
->userptr
> end
|| gtt
->userptr
+ size
<= start
)
1508 * amdgpu_ttm_tt_is_userptr - Have the pages backing by userptr?
1510 bool amdgpu_ttm_tt_is_userptr(struct ttm_tt
*ttm
)
1512 struct amdgpu_ttm_tt
*gtt
= (void *)ttm
;
1514 if (gtt
== NULL
|| !gtt
->userptr
)
1521 * amdgpu_ttm_tt_is_readonly - Is the ttm_tt object read only?
1523 bool amdgpu_ttm_tt_is_readonly(struct ttm_tt
*ttm
)
1525 struct amdgpu_ttm_tt
*gtt
= (void *)ttm
;
1530 return !!(gtt
->userflags
& AMDGPU_GEM_USERPTR_READONLY
);
1534 * amdgpu_ttm_tt_pde_flags - Compute PDE flags for ttm_tt object
1536 * @ttm: The ttm_tt object to compute the flags for
1537 * @mem: The memory registry backing this ttm_tt object
1539 * Figure out the flags to use for a VM PDE (Page Directory Entry).
1541 uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt
*ttm
, struct ttm_mem_reg
*mem
)
1545 if (mem
&& mem
->mem_type
!= TTM_PL_SYSTEM
)
1546 flags
|= AMDGPU_PTE_VALID
;
1548 if (mem
&& mem
->mem_type
== TTM_PL_TT
) {
1549 flags
|= AMDGPU_PTE_SYSTEM
;
1551 if (ttm
->caching_state
== tt_cached
)
1552 flags
|= AMDGPU_PTE_SNOOPED
;
1559 * amdgpu_ttm_tt_pte_flags - Compute PTE flags for ttm_tt object
1561 * @ttm: The ttm_tt object to compute the flags for
1562 * @mem: The memory registry backing this ttm_tt object
1564 * Figure out the flags to use for a VM PTE (Page Table Entry).
1566 uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device
*adev
, struct ttm_tt
*ttm
,
1567 struct ttm_mem_reg
*mem
)
1569 uint64_t flags
= amdgpu_ttm_tt_pde_flags(ttm
, mem
);
1571 flags
|= adev
->gart
.gart_pte_flags
;
1572 flags
|= AMDGPU_PTE_READABLE
;
1574 if (!amdgpu_ttm_tt_is_readonly(ttm
))
1575 flags
|= AMDGPU_PTE_WRITEABLE
;
1581 * amdgpu_ttm_bo_eviction_valuable - Check to see if we can evict a buffer
1584 * Return true if eviction is sensible. Called by ttm_mem_evict_first() on
1585 * behalf of ttm_bo_mem_force_space() which tries to evict buffer objects until
1586 * it can find space for a new object and by ttm_bo_force_list_clean() which is
1587 * used to clean out a memory space.
1589 static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object
*bo
,
1590 const struct ttm_place
*place
)
1592 unsigned long num_pages
= bo
->mem
.num_pages
;
1593 struct drm_mm_node
*node
= bo
->mem
.mm_node
;
1594 struct dma_resv_list
*flist
;
1595 struct dma_fence
*f
;
1598 if (bo
->type
== ttm_bo_type_kernel
&&
1599 !amdgpu_vm_evictable(ttm_to_amdgpu_bo(bo
)))
1602 /* If bo is a KFD BO, check if the bo belongs to the current process.
1603 * If true, then return false as any KFD process needs all its BOs to
1604 * be resident to run successfully
1606 flist
= dma_resv_get_list(bo
->base
.resv
);
1608 for (i
= 0; i
< flist
->shared_count
; ++i
) {
1609 f
= rcu_dereference_protected(flist
->shared
[i
],
1610 dma_resv_held(bo
->base
.resv
));
1611 if (amdkfd_fence_check_mm(f
, current
->mm
))
1616 switch (bo
->mem
.mem_type
) {
1618 if (amdgpu_bo_is_amdgpu_bo(bo
) &&
1619 amdgpu_bo_encrypted(ttm_to_amdgpu_bo(bo
)))
1624 /* Check each drm MM node individually */
1626 if (place
->fpfn
< (node
->start
+ node
->size
) &&
1627 !(place
->lpfn
&& place
->lpfn
<= node
->start
))
1630 num_pages
-= node
->size
;
1639 return ttm_bo_eviction_valuable(bo
, place
);
1643 * amdgpu_ttm_access_memory - Read or Write memory that backs a buffer object.
1645 * @bo: The buffer object to read/write
1646 * @offset: Offset into buffer object
1647 * @buf: Secondary buffer to write/read from
1648 * @len: Length in bytes of access
1649 * @write: true if writing
1651 * This is used to access VRAM that backs a buffer object via MMIO
1652 * access for debugging purposes.
1654 static int amdgpu_ttm_access_memory(struct ttm_buffer_object
*bo
,
1655 unsigned long offset
,
1656 void *buf
, int len
, int write
)
1658 struct amdgpu_bo
*abo
= ttm_to_amdgpu_bo(bo
);
1659 struct amdgpu_device
*adev
= amdgpu_ttm_adev(abo
->tbo
.bdev
);
1660 struct drm_mm_node
*nodes
;
1664 unsigned long flags
;
1666 if (bo
->mem
.mem_type
!= TTM_PL_VRAM
)
1670 nodes
= amdgpu_find_mm_node(&abo
->tbo
.mem
, &pos
);
1671 pos
+= (nodes
->start
<< PAGE_SHIFT
);
1673 while (len
&& pos
< adev
->gmc
.mc_vram_size
) {
1674 uint64_t aligned_pos
= pos
& ~(uint64_t)3;
1675 uint64_t bytes
= 4 - (pos
& 3);
1676 uint32_t shift
= (pos
& 3) * 8;
1677 uint32_t mask
= 0xffffffff << shift
;
1680 mask
&= 0xffffffff >> (bytes
- len
) * 8;
1684 if (mask
!= 0xffffffff) {
1685 spin_lock_irqsave(&adev
->mmio_idx_lock
, flags
);
1686 WREG32_NO_KIQ(mmMM_INDEX
, ((uint32_t)aligned_pos
) | 0x80000000);
1687 WREG32_NO_KIQ(mmMM_INDEX_HI
, aligned_pos
>> 31);
1688 if (!write
|| mask
!= 0xffffffff)
1689 value
= RREG32_NO_KIQ(mmMM_DATA
);
1692 value
|= (*(uint32_t *)buf
<< shift
) & mask
;
1693 WREG32_NO_KIQ(mmMM_DATA
, value
);
1695 spin_unlock_irqrestore(&adev
->mmio_idx_lock
, flags
);
1697 value
= (value
& mask
) >> shift
;
1698 memcpy(buf
, &value
, bytes
);
1701 bytes
= (nodes
->start
+ nodes
->size
) << PAGE_SHIFT
;
1702 bytes
= min(bytes
- pos
, (uint64_t)len
& ~0x3ull
);
1704 amdgpu_device_vram_access(adev
, pos
, (uint32_t *)buf
,
1709 buf
= (uint8_t *)buf
+ bytes
;
1712 if (pos
>= (nodes
->start
+ nodes
->size
) << PAGE_SHIFT
) {
1714 pos
= (nodes
->start
<< PAGE_SHIFT
);
1721 static struct ttm_bo_driver amdgpu_bo_driver
= {
1722 .ttm_tt_create
= &amdgpu_ttm_tt_create
,
1723 .ttm_tt_populate
= &amdgpu_ttm_tt_populate
,
1724 .ttm_tt_unpopulate
= &amdgpu_ttm_tt_unpopulate
,
1725 .init_mem_type
= &amdgpu_init_mem_type
,
1726 .eviction_valuable
= amdgpu_ttm_bo_eviction_valuable
,
1727 .evict_flags
= &amdgpu_evict_flags
,
1728 .move
= &amdgpu_bo_move
,
1729 .verify_access
= &amdgpu_verify_access
,
1730 .move_notify
= &amdgpu_bo_move_notify
,
1731 .release_notify
= &amdgpu_bo_release_notify
,
1732 .fault_reserve_notify
= &amdgpu_bo_fault_reserve_notify
,
1733 .io_mem_reserve
= &amdgpu_ttm_io_mem_reserve
,
1734 .io_mem_free
= &amdgpu_ttm_io_mem_free
,
1735 .io_mem_pfn
= amdgpu_ttm_io_mem_pfn
,
1736 .access_memory
= &amdgpu_ttm_access_memory
,
1737 .del_from_lru_notify
= &amdgpu_vm_del_from_lru_notify
1741 * Firmware Reservation functions
1744 * amdgpu_ttm_fw_reserve_vram_fini - free fw reserved vram
1746 * @adev: amdgpu_device pointer
1748 * free fw reserved vram if it has been reserved.
1750 static void amdgpu_ttm_fw_reserve_vram_fini(struct amdgpu_device
*adev
)
1752 amdgpu_bo_free_kernel(&adev
->fw_vram_usage
.reserved_bo
,
1753 NULL
, &adev
->fw_vram_usage
.va
);
1757 * amdgpu_ttm_fw_reserve_vram_init - create bo vram reservation from fw
1759 * @adev: amdgpu_device pointer
1761 * create bo vram reservation from fw.
1763 static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device
*adev
)
1765 uint64_t vram_size
= adev
->gmc
.visible_vram_size
;
1767 adev
->fw_vram_usage
.va
= NULL
;
1768 adev
->fw_vram_usage
.reserved_bo
= NULL
;
1770 if (adev
->fw_vram_usage
.size
== 0 ||
1771 adev
->fw_vram_usage
.size
> vram_size
)
1774 return amdgpu_bo_create_kernel_at(adev
,
1775 adev
->fw_vram_usage
.start_offset
,
1776 adev
->fw_vram_usage
.size
,
1777 AMDGPU_GEM_DOMAIN_VRAM
,
1778 &adev
->fw_vram_usage
.reserved_bo
,
1779 &adev
->fw_vram_usage
.va
);
1783 * Memoy training reservation functions
1787 * amdgpu_ttm_training_reserve_vram_fini - free memory training reserved vram
1789 * @adev: amdgpu_device pointer
1791 * free memory training reserved vram if it has been reserved.
1793 static int amdgpu_ttm_training_reserve_vram_fini(struct amdgpu_device
*adev
)
1795 struct psp_memory_training_context
*ctx
= &adev
->psp
.mem_train_ctx
;
1797 ctx
->init
= PSP_MEM_TRAIN_NOT_SUPPORT
;
1798 amdgpu_bo_free_kernel(&ctx
->c2p_bo
, NULL
, NULL
);
1804 static u64
amdgpu_ttm_training_get_c2p_offset(u64 vram_size
)
1806 if ((vram_size
& (SZ_1M
- 1)) < (SZ_4K
+ 1) )
1809 return ALIGN(vram_size
, SZ_1M
);
1813 * amdgpu_ttm_training_reserve_vram_init - create bo vram reservation from memory training
1815 * @adev: amdgpu_device pointer
1817 * create bo vram reservation from memory training.
1819 static int amdgpu_ttm_training_reserve_vram_init(struct amdgpu_device
*adev
)
1822 struct psp_memory_training_context
*ctx
= &adev
->psp
.mem_train_ctx
;
1824 memset(ctx
, 0, sizeof(*ctx
));
1825 if (!adev
->fw_vram_usage
.mem_train_support
) {
1826 DRM_DEBUG("memory training does not support!\n");
1830 ctx
->c2p_train_data_offset
= amdgpu_ttm_training_get_c2p_offset(adev
->gmc
.mc_vram_size
);
1831 ctx
->p2c_train_data_offset
= (adev
->gmc
.mc_vram_size
- GDDR6_MEM_TRAINING_OFFSET
);
1832 ctx
->train_data_size
= GDDR6_MEM_TRAINING_DATA_SIZE_IN_BYTES
;
1834 DRM_DEBUG("train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n",
1835 ctx
->train_data_size
,
1836 ctx
->p2c_train_data_offset
,
1837 ctx
->c2p_train_data_offset
);
1839 ret
= amdgpu_bo_create_kernel_at(adev
,
1840 ctx
->c2p_train_data_offset
,
1841 ctx
->train_data_size
,
1842 AMDGPU_GEM_DOMAIN_VRAM
,
1846 DRM_ERROR("alloc c2p_bo failed(%d)!\n", ret
);
1847 amdgpu_ttm_training_reserve_vram_fini(adev
);
1851 ctx
->init
= PSP_MEM_TRAIN_RESERVE_SUCCESS
;
1856 * amdgpu_ttm_init - Init the memory management (ttm) as well as various
1857 * gtt/vram related fields.
1859 * This initializes all of the memory space pools that the TTM layer
1860 * will need such as the GTT space (system memory mapped to the device),
1861 * VRAM (on-board memory), and on-chip memories (GDS, GWS, OA) which
1862 * can be mapped per VMID.
1864 int amdgpu_ttm_init(struct amdgpu_device
*adev
)
1869 void *stolen_vga_buf
;
1871 mutex_init(&adev
->mman
.gtt_window_lock
);
1873 /* No others user of address space so set it to 0 */
1874 r
= ttm_bo_device_init(&adev
->mman
.bdev
,
1876 adev
->ddev
->anon_inode
->i_mapping
,
1877 adev
->ddev
->vma_offset_manager
,
1878 dma_addressing_limited(adev
->dev
));
1880 DRM_ERROR("failed initializing buffer object driver(%d).\n", r
);
1883 adev
->mman
.initialized
= true;
1885 /* We opt to avoid OOM on system pages allocations */
1886 adev
->mman
.bdev
.no_retry
= true;
1888 /* Initialize VRAM pool with all of VRAM divided into pages */
1889 r
= ttm_bo_init_mm(&adev
->mman
.bdev
, TTM_PL_VRAM
,
1890 adev
->gmc
.real_vram_size
>> PAGE_SHIFT
);
1892 DRM_ERROR("Failed initializing VRAM heap.\n");
1896 /* Reduce size of CPU-visible VRAM if requested */
1897 vis_vram_limit
= (u64
)amdgpu_vis_vram_limit
* 1024 * 1024;
1898 if (amdgpu_vis_vram_limit
> 0 &&
1899 vis_vram_limit
<= adev
->gmc
.visible_vram_size
)
1900 adev
->gmc
.visible_vram_size
= vis_vram_limit
;
1902 /* Change the size here instead of the init above so only lpfn is affected */
1903 amdgpu_ttm_set_buffer_funcs_status(adev
, false);
1905 adev
->mman
.aper_base_kaddr
= ioremap_wc(adev
->gmc
.aper_base
,
1906 adev
->gmc
.visible_vram_size
);
1910 *The reserved vram for firmware must be pinned to the specified
1911 *place on the VRAM, so reserve it early.
1913 r
= amdgpu_ttm_fw_reserve_vram_init(adev
);
1919 *The reserved vram for memory training must be pinned to the specified
1920 *place on the VRAM, so reserve it early.
1922 if (!amdgpu_sriov_vf(adev
)) {
1923 r
= amdgpu_ttm_training_reserve_vram_init(adev
);
1928 /* allocate memory as required for VGA
1929 * This is used for VGA emulation and pre-OS scanout buffers to
1930 * avoid display artifacts while transitioning between pre-OS
1932 r
= amdgpu_bo_create_kernel(adev
, adev
->gmc
.stolen_size
, PAGE_SIZE
,
1933 AMDGPU_GEM_DOMAIN_VRAM
,
1934 &adev
->stolen_vga_memory
,
1935 NULL
, &stolen_vga_buf
);
1940 * reserve TMR memory at the top of VRAM which holds
1941 * IP Discovery data and is protected by PSP.
1943 if (adev
->discovery_tmr_size
> 0) {
1944 r
= amdgpu_bo_create_kernel_at(adev
,
1945 adev
->gmc
.real_vram_size
- adev
->discovery_tmr_size
,
1946 adev
->discovery_tmr_size
,
1947 AMDGPU_GEM_DOMAIN_VRAM
,
1948 &adev
->discovery_memory
,
1954 DRM_INFO("amdgpu: %uM of VRAM memory ready\n",
1955 (unsigned) (adev
->gmc
.real_vram_size
/ (1024 * 1024)));
1957 /* Compute GTT size, either bsaed on 3/4th the size of RAM size
1958 * or whatever the user passed on module init */
1959 if (amdgpu_gtt_size
== -1) {
1963 gtt_size
= min(max((AMDGPU_DEFAULT_GTT_SIZE_MB
<< 20),
1964 adev
->gmc
.mc_vram_size
),
1965 ((uint64_t)si
.totalram
* si
.mem_unit
* 3/4));
1968 gtt_size
= (uint64_t)amdgpu_gtt_size
<< 20;
1970 /* Initialize GTT memory pool */
1971 r
= ttm_bo_init_mm(&adev
->mman
.bdev
, TTM_PL_TT
, gtt_size
>> PAGE_SHIFT
);
1973 DRM_ERROR("Failed initializing GTT heap.\n");
1976 DRM_INFO("amdgpu: %uM of GTT memory ready.\n",
1977 (unsigned)(gtt_size
/ (1024 * 1024)));
1979 /* Initialize various on-chip memory pools */
1980 r
= ttm_bo_init_mm(&adev
->mman
.bdev
, AMDGPU_PL_GDS
,
1981 adev
->gds
.gds_size
);
1983 DRM_ERROR("Failed initializing GDS heap.\n");
1987 r
= ttm_bo_init_mm(&adev
->mman
.bdev
, AMDGPU_PL_GWS
,
1988 adev
->gds
.gws_size
);
1990 DRM_ERROR("Failed initializing gws heap.\n");
1994 r
= ttm_bo_init_mm(&adev
->mman
.bdev
, AMDGPU_PL_OA
,
1997 DRM_ERROR("Failed initializing oa heap.\n");
2005 * amdgpu_ttm_late_init - Handle any late initialization for amdgpu_ttm
2007 void amdgpu_ttm_late_init(struct amdgpu_device
*adev
)
2009 void *stolen_vga_buf
;
2010 /* return the VGA stolen memory (if any) back to VRAM */
2011 amdgpu_bo_free_kernel(&adev
->stolen_vga_memory
, NULL
, &stolen_vga_buf
);
2015 * amdgpu_ttm_fini - De-initialize the TTM memory pools
2017 void amdgpu_ttm_fini(struct amdgpu_device
*adev
)
2019 if (!adev
->mman
.initialized
)
2022 amdgpu_ttm_training_reserve_vram_fini(adev
);
2023 /* return the IP Discovery TMR memory back to VRAM */
2024 amdgpu_bo_free_kernel(&adev
->discovery_memory
, NULL
, NULL
);
2025 amdgpu_ttm_fw_reserve_vram_fini(adev
);
2027 if (adev
->mman
.aper_base_kaddr
)
2028 iounmap(adev
->mman
.aper_base_kaddr
);
2029 adev
->mman
.aper_base_kaddr
= NULL
;
2031 ttm_bo_clean_mm(&adev
->mman
.bdev
, TTM_PL_VRAM
);
2032 ttm_bo_clean_mm(&adev
->mman
.bdev
, TTM_PL_TT
);
2033 ttm_bo_clean_mm(&adev
->mman
.bdev
, AMDGPU_PL_GDS
);
2034 ttm_bo_clean_mm(&adev
->mman
.bdev
, AMDGPU_PL_GWS
);
2035 ttm_bo_clean_mm(&adev
->mman
.bdev
, AMDGPU_PL_OA
);
2036 ttm_bo_device_release(&adev
->mman
.bdev
);
2037 adev
->mman
.initialized
= false;
2038 DRM_INFO("amdgpu: ttm finalized\n");
2042 * amdgpu_ttm_set_buffer_funcs_status - enable/disable use of buffer functions
2044 * @adev: amdgpu_device pointer
2045 * @enable: true when we can use buffer functions.
2047 * Enable/disable use of buffer functions during suspend/resume. This should
2048 * only be called at bootup or when userspace isn't running.
2050 void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device
*adev
, bool enable
)
2052 struct ttm_mem_type_manager
*man
= &adev
->mman
.bdev
.man
[TTM_PL_VRAM
];
2056 if (!adev
->mman
.initialized
|| adev
->in_gpu_reset
||
2057 adev
->mman
.buffer_funcs_enabled
== enable
)
2061 struct amdgpu_ring
*ring
;
2062 struct drm_gpu_scheduler
*sched
;
2064 ring
= adev
->mman
.buffer_funcs_ring
;
2065 sched
= &ring
->sched
;
2066 r
= drm_sched_entity_init(&adev
->mman
.entity
,
2067 DRM_SCHED_PRIORITY_KERNEL
, &sched
,
2070 DRM_ERROR("Failed setting up TTM BO move entity (%d)\n",
2075 drm_sched_entity_destroy(&adev
->mman
.entity
);
2076 dma_fence_put(man
->move
);
2080 /* this just adjusts TTM size idea, which sets lpfn to the correct value */
2082 size
= adev
->gmc
.real_vram_size
;
2084 size
= adev
->gmc
.visible_vram_size
;
2085 man
->size
= size
>> PAGE_SHIFT
;
2086 adev
->mman
.buffer_funcs_enabled
= enable
;
2089 int amdgpu_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
2091 struct drm_file
*file_priv
= filp
->private_data
;
2092 struct amdgpu_device
*adev
= file_priv
->minor
->dev
->dev_private
;
2097 return ttm_bo_mmap(filp
, vma
, &adev
->mman
.bdev
);
2100 int amdgpu_copy_buffer(struct amdgpu_ring
*ring
, uint64_t src_offset
,
2101 uint64_t dst_offset
, uint32_t byte_count
,
2102 struct dma_resv
*resv
,
2103 struct dma_fence
**fence
, bool direct_submit
,
2104 bool vm_needs_flush
, bool tmz
)
2106 enum amdgpu_ib_pool_type pool
= direct_submit
? AMDGPU_IB_POOL_DIRECT
:
2107 AMDGPU_IB_POOL_DELAYED
;
2108 struct amdgpu_device
*adev
= ring
->adev
;
2109 struct amdgpu_job
*job
;
2112 unsigned num_loops
, num_dw
;
2116 if (direct_submit
&& !ring
->sched
.ready
) {
2117 DRM_ERROR("Trying to move memory with ring turned off.\n");
2121 max_bytes
= adev
->mman
.buffer_funcs
->copy_max_bytes
;
2122 num_loops
= DIV_ROUND_UP(byte_count
, max_bytes
);
2123 num_dw
= ALIGN(num_loops
* adev
->mman
.buffer_funcs
->copy_num_dw
, 8);
2125 r
= amdgpu_job_alloc_with_ib(adev
, num_dw
* 4, pool
, &job
);
2129 if (vm_needs_flush
) {
2130 job
->vm_pd_addr
= amdgpu_gmc_pd_addr(adev
->gart
.bo
);
2131 job
->vm_needs_flush
= true;
2134 r
= amdgpu_sync_resv(adev
, &job
->sync
, resv
,
2136 AMDGPU_FENCE_OWNER_UNDEFINED
);
2138 DRM_ERROR("sync failed (%d).\n", r
);
2143 for (i
= 0; i
< num_loops
; i
++) {
2144 uint32_t cur_size_in_bytes
= min(byte_count
, max_bytes
);
2146 amdgpu_emit_copy_buffer(adev
, &job
->ibs
[0], src_offset
,
2147 dst_offset
, cur_size_in_bytes
, tmz
);
2149 src_offset
+= cur_size_in_bytes
;
2150 dst_offset
+= cur_size_in_bytes
;
2151 byte_count
-= cur_size_in_bytes
;
2154 amdgpu_ring_pad_ib(ring
, &job
->ibs
[0]);
2155 WARN_ON(job
->ibs
[0].length_dw
> num_dw
);
2157 r
= amdgpu_job_submit_direct(job
, ring
, fence
);
2159 r
= amdgpu_job_submit(job
, &adev
->mman
.entity
,
2160 AMDGPU_FENCE_OWNER_UNDEFINED
, fence
);
2167 amdgpu_job_free(job
);
2168 DRM_ERROR("Error scheduling IBs (%d)\n", r
);
2172 int amdgpu_fill_buffer(struct amdgpu_bo
*bo
,
2174 struct dma_resv
*resv
,
2175 struct dma_fence
**fence
)
2177 struct amdgpu_device
*adev
= amdgpu_ttm_adev(bo
->tbo
.bdev
);
2178 uint32_t max_bytes
= adev
->mman
.buffer_funcs
->fill_max_bytes
;
2179 struct amdgpu_ring
*ring
= adev
->mman
.buffer_funcs_ring
;
2181 struct drm_mm_node
*mm_node
;
2182 unsigned long num_pages
;
2183 unsigned int num_loops
, num_dw
;
2185 struct amdgpu_job
*job
;
2188 if (!adev
->mman
.buffer_funcs_enabled
) {
2189 DRM_ERROR("Trying to clear memory with ring turned off.\n");
2193 if (bo
->tbo
.mem
.mem_type
== TTM_PL_TT
) {
2194 r
= amdgpu_ttm_alloc_gart(&bo
->tbo
);
2199 num_pages
= bo
->tbo
.num_pages
;
2200 mm_node
= bo
->tbo
.mem
.mm_node
;
2203 uint64_t byte_count
= mm_node
->size
<< PAGE_SHIFT
;
2205 num_loops
+= DIV_ROUND_UP_ULL(byte_count
, max_bytes
);
2206 num_pages
-= mm_node
->size
;
2209 num_dw
= num_loops
* adev
->mman
.buffer_funcs
->fill_num_dw
;
2211 /* for IB padding */
2214 r
= amdgpu_job_alloc_with_ib(adev
, num_dw
* 4, AMDGPU_IB_POOL_DELAYED
,
2220 r
= amdgpu_sync_resv(adev
, &job
->sync
, resv
,
2222 AMDGPU_FENCE_OWNER_UNDEFINED
);
2224 DRM_ERROR("sync failed (%d).\n", r
);
2229 num_pages
= bo
->tbo
.num_pages
;
2230 mm_node
= bo
->tbo
.mem
.mm_node
;
2233 uint64_t byte_count
= mm_node
->size
<< PAGE_SHIFT
;
2236 dst_addr
= amdgpu_mm_node_addr(&bo
->tbo
, mm_node
, &bo
->tbo
.mem
);
2237 while (byte_count
) {
2238 uint32_t cur_size_in_bytes
= min_t(uint64_t, byte_count
,
2241 amdgpu_emit_fill_buffer(adev
, &job
->ibs
[0], src_data
,
2242 dst_addr
, cur_size_in_bytes
);
2244 dst_addr
+= cur_size_in_bytes
;
2245 byte_count
-= cur_size_in_bytes
;
2248 num_pages
-= mm_node
->size
;
2252 amdgpu_ring_pad_ib(ring
, &job
->ibs
[0]);
2253 WARN_ON(job
->ibs
[0].length_dw
> num_dw
);
2254 r
= amdgpu_job_submit(job
, &adev
->mman
.entity
,
2255 AMDGPU_FENCE_OWNER_UNDEFINED
, fence
);
2262 amdgpu_job_free(job
);
2266 #if defined(CONFIG_DEBUG_FS)
2268 static int amdgpu_mm_dump_table(struct seq_file
*m
, void *data
)
2270 struct drm_info_node
*node
= (struct drm_info_node
*)m
->private;
2271 unsigned ttm_pl
= (uintptr_t)node
->info_ent
->data
;
2272 struct drm_device
*dev
= node
->minor
->dev
;
2273 struct amdgpu_device
*adev
= dev
->dev_private
;
2274 struct ttm_mem_type_manager
*man
= &adev
->mman
.bdev
.man
[ttm_pl
];
2275 struct drm_printer p
= drm_seq_file_printer(m
);
2277 man
->func
->debug(man
, &p
);
2281 static const struct drm_info_list amdgpu_ttm_debugfs_list
[] = {
2282 {"amdgpu_vram_mm", amdgpu_mm_dump_table
, 0, (void *)TTM_PL_VRAM
},
2283 {"amdgpu_gtt_mm", amdgpu_mm_dump_table
, 0, (void *)TTM_PL_TT
},
2284 {"amdgpu_gds_mm", amdgpu_mm_dump_table
, 0, (void *)AMDGPU_PL_GDS
},
2285 {"amdgpu_gws_mm", amdgpu_mm_dump_table
, 0, (void *)AMDGPU_PL_GWS
},
2286 {"amdgpu_oa_mm", amdgpu_mm_dump_table
, 0, (void *)AMDGPU_PL_OA
},
2287 {"ttm_page_pool", ttm_page_alloc_debugfs
, 0, NULL
},
2288 #ifdef CONFIG_SWIOTLB
2289 {"ttm_dma_page_pool", ttm_dma_page_alloc_debugfs
, 0, NULL
}
2294 * amdgpu_ttm_vram_read - Linear read access to VRAM
2296 * Accesses VRAM via MMIO for debugging purposes.
2298 static ssize_t
amdgpu_ttm_vram_read(struct file
*f
, char __user
*buf
,
2299 size_t size
, loff_t
*pos
)
2301 struct amdgpu_device
*adev
= file_inode(f
)->i_private
;
2304 if (size
& 0x3 || *pos
& 0x3)
2307 if (*pos
>= adev
->gmc
.mc_vram_size
)
2310 size
= min(size
, (size_t)(adev
->gmc
.mc_vram_size
- *pos
));
2312 size_t bytes
= min(size
, AMDGPU_TTM_VRAM_MAX_DW_READ
* 4);
2313 uint32_t value
[AMDGPU_TTM_VRAM_MAX_DW_READ
];
2315 amdgpu_device_vram_access(adev
, *pos
, value
, bytes
, false);
2316 if (copy_to_user(buf
, value
, bytes
))
2329 * amdgpu_ttm_vram_write - Linear write access to VRAM
2331 * Accesses VRAM via MMIO for debugging purposes.
2333 static ssize_t
amdgpu_ttm_vram_write(struct file
*f
, const char __user
*buf
,
2334 size_t size
, loff_t
*pos
)
2336 struct amdgpu_device
*adev
= file_inode(f
)->i_private
;
2340 if (size
& 0x3 || *pos
& 0x3)
2343 if (*pos
>= adev
->gmc
.mc_vram_size
)
2347 unsigned long flags
;
2350 if (*pos
>= adev
->gmc
.mc_vram_size
)
2353 r
= get_user(value
, (uint32_t *)buf
);
2357 spin_lock_irqsave(&adev
->mmio_idx_lock
, flags
);
2358 WREG32_NO_KIQ(mmMM_INDEX
, ((uint32_t)*pos
) | 0x80000000);
2359 WREG32_NO_KIQ(mmMM_INDEX_HI
, *pos
>> 31);
2360 WREG32_NO_KIQ(mmMM_DATA
, value
);
2361 spin_unlock_irqrestore(&adev
->mmio_idx_lock
, flags
);
2372 static const struct file_operations amdgpu_ttm_vram_fops
= {
2373 .owner
= THIS_MODULE
,
2374 .read
= amdgpu_ttm_vram_read
,
2375 .write
= amdgpu_ttm_vram_write
,
2376 .llseek
= default_llseek
,
2379 #ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
2382 * amdgpu_ttm_gtt_read - Linear read access to GTT memory
2384 static ssize_t
amdgpu_ttm_gtt_read(struct file
*f
, char __user
*buf
,
2385 size_t size
, loff_t
*pos
)
2387 struct amdgpu_device
*adev
= file_inode(f
)->i_private
;
2392 loff_t p
= *pos
/ PAGE_SIZE
;
2393 unsigned off
= *pos
& ~PAGE_MASK
;
2394 size_t cur_size
= min_t(size_t, size
, PAGE_SIZE
- off
);
2398 if (p
>= adev
->gart
.num_cpu_pages
)
2401 page
= adev
->gart
.pages
[p
];
2406 r
= copy_to_user(buf
, ptr
, cur_size
);
2407 kunmap(adev
->gart
.pages
[p
]);
2409 r
= clear_user(buf
, cur_size
);
2423 static const struct file_operations amdgpu_ttm_gtt_fops
= {
2424 .owner
= THIS_MODULE
,
2425 .read
= amdgpu_ttm_gtt_read
,
2426 .llseek
= default_llseek
2432 * amdgpu_iomem_read - Virtual read access to GPU mapped memory
2434 * This function is used to read memory that has been mapped to the
2435 * GPU and the known addresses are not physical addresses but instead
2436 * bus addresses (e.g., what you'd put in an IB or ring buffer).
2438 static ssize_t
amdgpu_iomem_read(struct file
*f
, char __user
*buf
,
2439 size_t size
, loff_t
*pos
)
2441 struct amdgpu_device
*adev
= file_inode(f
)->i_private
;
2442 struct iommu_domain
*dom
;
2446 /* retrieve the IOMMU domain if any for this device */
2447 dom
= iommu_get_domain_for_dev(adev
->dev
);
2450 phys_addr_t addr
= *pos
& PAGE_MASK
;
2451 loff_t off
= *pos
& ~PAGE_MASK
;
2452 size_t bytes
= PAGE_SIZE
- off
;
2457 bytes
= bytes
< size
? bytes
: size
;
2459 /* Translate the bus address to a physical address. If
2460 * the domain is NULL it means there is no IOMMU active
2461 * and the address translation is the identity
2463 addr
= dom
? iommu_iova_to_phys(dom
, addr
) : addr
;
2465 pfn
= addr
>> PAGE_SHIFT
;
2466 if (!pfn_valid(pfn
))
2469 p
= pfn_to_page(pfn
);
2470 if (p
->mapping
!= adev
->mman
.bdev
.dev_mapping
)
2474 r
= copy_to_user(buf
, ptr
+ off
, bytes
);
2488 * amdgpu_iomem_write - Virtual write access to GPU mapped memory
2490 * This function is used to write memory that has been mapped to the
2491 * GPU and the known addresses are not physical addresses but instead
2492 * bus addresses (e.g., what you'd put in an IB or ring buffer).
2494 static ssize_t
amdgpu_iomem_write(struct file
*f
, const char __user
*buf
,
2495 size_t size
, loff_t
*pos
)
2497 struct amdgpu_device
*adev
= file_inode(f
)->i_private
;
2498 struct iommu_domain
*dom
;
2502 dom
= iommu_get_domain_for_dev(adev
->dev
);
2505 phys_addr_t addr
= *pos
& PAGE_MASK
;
2506 loff_t off
= *pos
& ~PAGE_MASK
;
2507 size_t bytes
= PAGE_SIZE
- off
;
2512 bytes
= bytes
< size
? bytes
: size
;
2514 addr
= dom
? iommu_iova_to_phys(dom
, addr
) : addr
;
2516 pfn
= addr
>> PAGE_SHIFT
;
2517 if (!pfn_valid(pfn
))
2520 p
= pfn_to_page(pfn
);
2521 if (p
->mapping
!= adev
->mman
.bdev
.dev_mapping
)
2525 r
= copy_from_user(ptr
+ off
, buf
, bytes
);
2538 static const struct file_operations amdgpu_ttm_iomem_fops
= {
2539 .owner
= THIS_MODULE
,
2540 .read
= amdgpu_iomem_read
,
2541 .write
= amdgpu_iomem_write
,
2542 .llseek
= default_llseek
2545 static const struct {
2547 const struct file_operations
*fops
;
2549 } ttm_debugfs_entries
[] = {
2550 { "amdgpu_vram", &amdgpu_ttm_vram_fops
, TTM_PL_VRAM
},
2551 #ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
2552 { "amdgpu_gtt", &amdgpu_ttm_gtt_fops
, TTM_PL_TT
},
2554 { "amdgpu_iomem", &amdgpu_ttm_iomem_fops
, TTM_PL_SYSTEM
},
2559 int amdgpu_ttm_debugfs_init(struct amdgpu_device
*adev
)
2561 #if defined(CONFIG_DEBUG_FS)
2564 struct drm_minor
*minor
= adev
->ddev
->primary
;
2565 struct dentry
*ent
, *root
= minor
->debugfs_root
;
2567 for (count
= 0; count
< ARRAY_SIZE(ttm_debugfs_entries
); count
++) {
2568 ent
= debugfs_create_file(
2569 ttm_debugfs_entries
[count
].name
,
2570 S_IFREG
| S_IRUGO
, root
,
2572 ttm_debugfs_entries
[count
].fops
);
2574 return PTR_ERR(ent
);
2575 if (ttm_debugfs_entries
[count
].domain
== TTM_PL_VRAM
)
2576 i_size_write(ent
->d_inode
, adev
->gmc
.mc_vram_size
);
2577 else if (ttm_debugfs_entries
[count
].domain
== TTM_PL_TT
)
2578 i_size_write(ent
->d_inode
, adev
->gmc
.gart_size
);
2579 adev
->mman
.debugfs_entries
[count
] = ent
;
2582 count
= ARRAY_SIZE(amdgpu_ttm_debugfs_list
);
2584 #ifdef CONFIG_SWIOTLB
2585 if (!(adev
->need_swiotlb
&& swiotlb_nr_tbl()))
2589 return amdgpu_debugfs_add_files(adev
, amdgpu_ttm_debugfs_list
, count
);