2 * Copyright 2009 Jerome Glisse.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
28 * Jerome Glisse <glisse@freedesktop.org>
29 * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
32 #include <drm/ttm/ttm_bo_api.h>
33 #include <drm/ttm/ttm_bo_driver.h>
34 #include <drm/ttm/ttm_placement.h>
35 #include <drm/ttm/ttm_module.h>
36 #include <drm/ttm/ttm_page_alloc.h>
38 #include <drm/amdgpu_drm.h>
39 #include <linux/seq_file.h>
40 #include <linux/slab.h>
41 #include <linux/swiotlb.h>
42 #include <linux/swap.h>
43 #include <linux/pagemap.h>
44 #include <linux/debugfs.h>
45 #include <linux/iommu.h>
47 #include "amdgpu_object.h"
48 #include "amdgpu_trace.h"
49 #include "amdgpu_amdkfd.h"
50 #include "bif/bif_4_1_d.h"
52 #define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
54 static int amdgpu_map_buffer(struct ttm_buffer_object
*bo
,
55 struct ttm_mem_reg
*mem
, unsigned num_pages
,
56 uint64_t offset
, unsigned window
,
57 struct amdgpu_ring
*ring
,
60 static int amdgpu_ttm_debugfs_init(struct amdgpu_device
*adev
);
61 static void amdgpu_ttm_debugfs_fini(struct amdgpu_device
*adev
);
68 * amdgpu_ttm_mem_global_init - Initialize and acquire reference to
71 * @ref: Object for initialization.
73 * This is called by drm_global_item_ref() when an object is being
76 static int amdgpu_ttm_mem_global_init(struct drm_global_reference
*ref
)
78 return ttm_mem_global_init(ref
->object
);
82 * amdgpu_ttm_mem_global_release - Drop reference to a memory object
84 * @ref: Object being removed
86 * This is called by drm_global_item_unref() when an object is being
89 static void amdgpu_ttm_mem_global_release(struct drm_global_reference
*ref
)
91 ttm_mem_global_release(ref
->object
);
95 * amdgpu_ttm_global_init - Initialize global TTM memory reference
98 * @adev: AMDGPU device for which the global structures need to be
101 * This is called as part of the AMDGPU ttm init from amdgpu_ttm_init()
104 static int amdgpu_ttm_global_init(struct amdgpu_device
*adev
)
106 struct drm_global_reference
*global_ref
;
107 struct amdgpu_ring
*ring
;
108 struct drm_sched_rq
*rq
;
111 /* ensure reference is false in case init fails */
112 adev
->mman
.mem_global_referenced
= false;
114 global_ref
= &adev
->mman
.mem_global_ref
;
115 global_ref
->global_type
= DRM_GLOBAL_TTM_MEM
;
116 global_ref
->size
= sizeof(struct ttm_mem_global
);
117 global_ref
->init
= &amdgpu_ttm_mem_global_init
;
118 global_ref
->release
= &amdgpu_ttm_mem_global_release
;
119 r
= drm_global_item_ref(global_ref
);
121 DRM_ERROR("Failed setting up TTM memory accounting "
126 adev
->mman
.bo_global_ref
.mem_glob
=
127 adev
->mman
.mem_global_ref
.object
;
128 global_ref
= &adev
->mman
.bo_global_ref
.ref
;
129 global_ref
->global_type
= DRM_GLOBAL_TTM_BO
;
130 global_ref
->size
= sizeof(struct ttm_bo_global
);
131 global_ref
->init
= &ttm_bo_global_init
;
132 global_ref
->release
= &ttm_bo_global_release
;
133 r
= drm_global_item_ref(global_ref
);
135 DRM_ERROR("Failed setting up TTM BO subsystem.\n");
139 mutex_init(&adev
->mman
.gtt_window_lock
);
141 ring
= adev
->mman
.buffer_funcs_ring
;
142 rq
= &ring
->sched
.sched_rq
[DRM_SCHED_PRIORITY_KERNEL
];
143 r
= drm_sched_entity_init(&ring
->sched
, &adev
->mman
.entity
,
146 DRM_ERROR("Failed setting up TTM BO move run queue.\n");
150 adev
->mman
.mem_global_referenced
= true;
155 drm_global_item_unref(&adev
->mman
.bo_global_ref
.ref
);
157 drm_global_item_unref(&adev
->mman
.mem_global_ref
);
162 static void amdgpu_ttm_global_fini(struct amdgpu_device
*adev
)
164 if (adev
->mman
.mem_global_referenced
) {
165 drm_sched_entity_fini(adev
->mman
.entity
.sched
,
167 mutex_destroy(&adev
->mman
.gtt_window_lock
);
168 drm_global_item_unref(&adev
->mman
.bo_global_ref
.ref
);
169 drm_global_item_unref(&adev
->mman
.mem_global_ref
);
170 adev
->mman
.mem_global_referenced
= false;
174 static int amdgpu_invalidate_caches(struct ttm_bo_device
*bdev
, uint32_t flags
)
180 * amdgpu_init_mem_type - Initialize a memory manager for a specific
181 * type of memory request.
183 * @bdev: The TTM BO device object (contains a reference to
185 * @type: The type of memory requested
188 * This is called by ttm_bo_init_mm() when a buffer object is being
191 static int amdgpu_init_mem_type(struct ttm_bo_device
*bdev
, uint32_t type
,
192 struct ttm_mem_type_manager
*man
)
194 struct amdgpu_device
*adev
;
196 adev
= amdgpu_ttm_adev(bdev
);
201 man
->flags
= TTM_MEMTYPE_FLAG_MAPPABLE
;
202 man
->available_caching
= TTM_PL_MASK_CACHING
;
203 man
->default_caching
= TTM_PL_FLAG_CACHED
;
207 man
->func
= &amdgpu_gtt_mgr_func
;
208 man
->gpu_offset
= adev
->gmc
.gart_start
;
209 man
->available_caching
= TTM_PL_MASK_CACHING
;
210 man
->default_caching
= TTM_PL_FLAG_CACHED
;
211 man
->flags
= TTM_MEMTYPE_FLAG_MAPPABLE
| TTM_MEMTYPE_FLAG_CMA
;
214 /* "On-card" video ram */
215 man
->func
= &amdgpu_vram_mgr_func
;
216 man
->gpu_offset
= adev
->gmc
.vram_start
;
217 man
->flags
= TTM_MEMTYPE_FLAG_FIXED
|
218 TTM_MEMTYPE_FLAG_MAPPABLE
;
219 man
->available_caching
= TTM_PL_FLAG_UNCACHED
| TTM_PL_FLAG_WC
;
220 man
->default_caching
= TTM_PL_FLAG_WC
;
225 /* On-chip GDS memory*/
226 man
->func
= &ttm_bo_manager_func
;
228 man
->flags
= TTM_MEMTYPE_FLAG_FIXED
| TTM_MEMTYPE_FLAG_CMA
;
229 man
->available_caching
= TTM_PL_FLAG_UNCACHED
;
230 man
->default_caching
= TTM_PL_FLAG_UNCACHED
;
233 DRM_ERROR("Unsupported memory type %u\n", (unsigned)type
);
240 * amdgpu_evict_flags - Compute placement flags
242 * @bo: The buffer object to evict
243 * @placement: Possible destination(s) for evicted BO
245 * Fill in placement data when ttm_bo_evict() is called
247 static void amdgpu_evict_flags(struct ttm_buffer_object
*bo
,
248 struct ttm_placement
*placement
)
250 struct amdgpu_device
*adev
= amdgpu_ttm_adev(bo
->bdev
);
251 struct amdgpu_bo
*abo
;
252 static const struct ttm_place placements
= {
255 .flags
= TTM_PL_MASK_CACHING
| TTM_PL_FLAG_SYSTEM
258 /* Don't handle scatter gather BOs */
259 if (bo
->type
== ttm_bo_type_sg
) {
260 placement
->num_placement
= 0;
261 placement
->num_busy_placement
= 0;
265 /* Object isn't an AMDGPU object so ignore */
266 if (!amdgpu_ttm_bo_is_amdgpu_bo(bo
)) {
267 placement
->placement
= &placements
;
268 placement
->busy_placement
= &placements
;
269 placement
->num_placement
= 1;
270 placement
->num_busy_placement
= 1;
274 abo
= ttm_to_amdgpu_bo(bo
);
275 switch (bo
->mem
.mem_type
) {
277 if (!adev
->mman
.buffer_funcs_enabled
) {
278 /* Move to system memory */
279 amdgpu_ttm_placement_from_domain(abo
, AMDGPU_GEM_DOMAIN_CPU
);
280 } else if (!amdgpu_gmc_vram_full_visible(&adev
->gmc
) &&
281 !(abo
->flags
& AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED
) &&
282 amdgpu_bo_in_cpu_visible_vram(abo
)) {
284 /* Try evicting to the CPU inaccessible part of VRAM
285 * first, but only set GTT as busy placement, so this
286 * BO will be evicted to GTT rather than causing other
287 * BOs to be evicted from VRAM
289 amdgpu_ttm_placement_from_domain(abo
, AMDGPU_GEM_DOMAIN_VRAM
|
290 AMDGPU_GEM_DOMAIN_GTT
);
291 abo
->placements
[0].fpfn
= adev
->gmc
.visible_vram_size
>> PAGE_SHIFT
;
292 abo
->placements
[0].lpfn
= 0;
293 abo
->placement
.busy_placement
= &abo
->placements
[1];
294 abo
->placement
.num_busy_placement
= 1;
296 /* Move to GTT memory */
297 amdgpu_ttm_placement_from_domain(abo
, AMDGPU_GEM_DOMAIN_GTT
);
302 amdgpu_ttm_placement_from_domain(abo
, AMDGPU_GEM_DOMAIN_CPU
);
304 *placement
= abo
->placement
;
308 * amdgpu_verify_access - Verify access for a mmap call
310 * @bo: The buffer object to map
311 * @filp: The file pointer from the process performing the mmap
313 * This is called by ttm_bo_mmap() to verify whether a process
314 * has the right to mmap a BO to their process space.
316 static int amdgpu_verify_access(struct ttm_buffer_object
*bo
, struct file
*filp
)
318 struct amdgpu_bo
*abo
= ttm_to_amdgpu_bo(bo
);
321 * Don't verify access for KFD BOs. They don't have a GEM
322 * object associated with them.
327 if (amdgpu_ttm_tt_get_usermm(bo
->ttm
))
329 return drm_vma_node_verify_access(&abo
->gem_base
.vma_node
,
334 * amdgpu_move_null - Register memory for a buffer object
336 * @bo: The bo to assign the memory to
337 * @new_mem: The memory to be assigned.
339 * Assign the memory from new_mem to the memory of the buffer object
342 static void amdgpu_move_null(struct ttm_buffer_object
*bo
,
343 struct ttm_mem_reg
*new_mem
)
345 struct ttm_mem_reg
*old_mem
= &bo
->mem
;
347 BUG_ON(old_mem
->mm_node
!= NULL
);
349 new_mem
->mm_node
= NULL
;
353 * amdgpu_mm_node_addr - Compute the GPU relative offset of a GTT
356 static uint64_t amdgpu_mm_node_addr(struct ttm_buffer_object
*bo
,
357 struct drm_mm_node
*mm_node
,
358 struct ttm_mem_reg
*mem
)
362 if (mem
->mem_type
!= TTM_PL_TT
|| amdgpu_gtt_mgr_has_gart_addr(mem
)) {
363 addr
= mm_node
->start
<< PAGE_SHIFT
;
364 addr
+= bo
->bdev
->man
[mem
->mem_type
].gpu_offset
;
370 * amdgpu_find_mm_node - Helper function finds the drm_mm_node
371 * corresponding to @offset. It also modifies
372 * the offset to be within the drm_mm_node
375 static struct drm_mm_node
*amdgpu_find_mm_node(struct ttm_mem_reg
*mem
,
376 unsigned long *offset
)
378 struct drm_mm_node
*mm_node
= mem
->mm_node
;
380 while (*offset
>= (mm_node
->size
<< PAGE_SHIFT
)) {
381 *offset
-= (mm_node
->size
<< PAGE_SHIFT
);
388 * amdgpu_copy_ttm_mem_to_mem - Helper function for copy
390 * The function copies @size bytes from {src->mem + src->offset} to
391 * {dst->mem + dst->offset}. src->bo and dst->bo could be same BO for a
392 * move and different for a BO to BO copy.
394 * @f: Returns the last fence if multiple jobs are submitted.
396 int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device
*adev
,
397 struct amdgpu_copy_mem
*src
,
398 struct amdgpu_copy_mem
*dst
,
400 struct reservation_object
*resv
,
401 struct dma_fence
**f
)
403 struct amdgpu_ring
*ring
= adev
->mman
.buffer_funcs_ring
;
404 struct drm_mm_node
*src_mm
, *dst_mm
;
405 uint64_t src_node_start
, dst_node_start
, src_node_size
,
406 dst_node_size
, src_page_offset
, dst_page_offset
;
407 struct dma_fence
*fence
= NULL
;
409 const uint64_t GTT_MAX_BYTES
= (AMDGPU_GTT_MAX_TRANSFER_SIZE
*
410 AMDGPU_GPU_PAGE_SIZE
);
412 if (!adev
->mman
.buffer_funcs_enabled
) {
413 DRM_ERROR("Trying to move memory with ring turned off.\n");
417 src_mm
= amdgpu_find_mm_node(src
->mem
, &src
->offset
);
418 src_node_start
= amdgpu_mm_node_addr(src
->bo
, src_mm
, src
->mem
) +
420 src_node_size
= (src_mm
->size
<< PAGE_SHIFT
) - src
->offset
;
421 src_page_offset
= src_node_start
& (PAGE_SIZE
- 1);
423 dst_mm
= amdgpu_find_mm_node(dst
->mem
, &dst
->offset
);
424 dst_node_start
= amdgpu_mm_node_addr(dst
->bo
, dst_mm
, dst
->mem
) +
426 dst_node_size
= (dst_mm
->size
<< PAGE_SHIFT
) - dst
->offset
;
427 dst_page_offset
= dst_node_start
& (PAGE_SIZE
- 1);
429 mutex_lock(&adev
->mman
.gtt_window_lock
);
432 unsigned long cur_size
;
433 uint64_t from
= src_node_start
, to
= dst_node_start
;
434 struct dma_fence
*next
;
436 /* Copy size cannot exceed GTT_MAX_BYTES. So if src or dst
437 * begins at an offset, then adjust the size accordingly
439 cur_size
= min3(min(src_node_size
, dst_node_size
), size
,
441 if (cur_size
+ src_page_offset
> GTT_MAX_BYTES
||
442 cur_size
+ dst_page_offset
> GTT_MAX_BYTES
)
443 cur_size
-= max(src_page_offset
, dst_page_offset
);
445 /* Map only what needs to be accessed. Map src to window 0 and
448 if (src
->mem
->mem_type
== TTM_PL_TT
&&
449 !amdgpu_gtt_mgr_has_gart_addr(src
->mem
)) {
450 r
= amdgpu_map_buffer(src
->bo
, src
->mem
,
451 PFN_UP(cur_size
+ src_page_offset
),
452 src_node_start
, 0, ring
,
456 /* Adjust the offset because amdgpu_map_buffer returns
457 * start of mapped page
459 from
+= src_page_offset
;
462 if (dst
->mem
->mem_type
== TTM_PL_TT
&&
463 !amdgpu_gtt_mgr_has_gart_addr(dst
->mem
)) {
464 r
= amdgpu_map_buffer(dst
->bo
, dst
->mem
,
465 PFN_UP(cur_size
+ dst_page_offset
),
466 dst_node_start
, 1, ring
,
470 to
+= dst_page_offset
;
473 r
= amdgpu_copy_buffer(ring
, from
, to
, cur_size
,
474 resv
, &next
, false, true);
478 dma_fence_put(fence
);
485 src_node_size
-= cur_size
;
486 if (!src_node_size
) {
487 src_node_start
= amdgpu_mm_node_addr(src
->bo
, ++src_mm
,
489 src_node_size
= (src_mm
->size
<< PAGE_SHIFT
);
491 src_node_start
+= cur_size
;
492 src_page_offset
= src_node_start
& (PAGE_SIZE
- 1);
494 dst_node_size
-= cur_size
;
495 if (!dst_node_size
) {
496 dst_node_start
= amdgpu_mm_node_addr(dst
->bo
, ++dst_mm
,
498 dst_node_size
= (dst_mm
->size
<< PAGE_SHIFT
);
500 dst_node_start
+= cur_size
;
501 dst_page_offset
= dst_node_start
& (PAGE_SIZE
- 1);
505 mutex_unlock(&adev
->mman
.gtt_window_lock
);
507 *f
= dma_fence_get(fence
);
508 dma_fence_put(fence
);
513 * amdgpu_move_blit - Copy an entire buffer to another buffer
515 * This is a helper called by amdgpu_bo_move() and
516 * amdgpu_move_vram_ram() to help move buffers to and from VRAM.
518 static int amdgpu_move_blit(struct ttm_buffer_object
*bo
,
519 bool evict
, bool no_wait_gpu
,
520 struct ttm_mem_reg
*new_mem
,
521 struct ttm_mem_reg
*old_mem
)
523 struct amdgpu_device
*adev
= amdgpu_ttm_adev(bo
->bdev
);
524 struct amdgpu_copy_mem src
, dst
;
525 struct dma_fence
*fence
= NULL
;
535 r
= amdgpu_ttm_copy_mem_to_mem(adev
, &src
, &dst
,
536 new_mem
->num_pages
<< PAGE_SHIFT
,
541 r
= ttm_bo_pipeline_move(bo
, fence
, evict
, new_mem
);
542 dma_fence_put(fence
);
547 dma_fence_wait(fence
, false);
548 dma_fence_put(fence
);
553 * amdgpu_move_vram_ram - Copy VRAM buffer to RAM buffer
555 * Called by amdgpu_bo_move().
557 static int amdgpu_move_vram_ram(struct ttm_buffer_object
*bo
, bool evict
,
558 struct ttm_operation_ctx
*ctx
,
559 struct ttm_mem_reg
*new_mem
)
561 struct amdgpu_device
*adev
;
562 struct ttm_mem_reg
*old_mem
= &bo
->mem
;
563 struct ttm_mem_reg tmp_mem
;
564 struct ttm_place placements
;
565 struct ttm_placement placement
;
568 adev
= amdgpu_ttm_adev(bo
->bdev
);
570 /* create space/pages for new_mem in GTT space */
572 tmp_mem
.mm_node
= NULL
;
573 placement
.num_placement
= 1;
574 placement
.placement
= &placements
;
575 placement
.num_busy_placement
= 1;
576 placement
.busy_placement
= &placements
;
579 placements
.flags
= TTM_PL_MASK_CACHING
| TTM_PL_FLAG_TT
;
580 r
= ttm_bo_mem_space(bo
, &placement
, &tmp_mem
, ctx
);
585 /* set caching flags */
586 r
= ttm_tt_set_placement_caching(bo
->ttm
, tmp_mem
.placement
);
591 /* Bind the memory to the GTT space */
592 r
= ttm_tt_bind(bo
->ttm
, &tmp_mem
, ctx
);
597 /* blit VRAM to GTT */
598 r
= amdgpu_move_blit(bo
, true, ctx
->no_wait_gpu
, &tmp_mem
, old_mem
);
603 /* move BO (in tmp_mem) to new_mem */
604 r
= ttm_bo_move_ttm(bo
, ctx
, new_mem
);
606 ttm_bo_mem_put(bo
, &tmp_mem
);
611 * amdgpu_move_ram_vram - Copy buffer from RAM to VRAM
613 * Called by amdgpu_bo_move().
615 static int amdgpu_move_ram_vram(struct ttm_buffer_object
*bo
, bool evict
,
616 struct ttm_operation_ctx
*ctx
,
617 struct ttm_mem_reg
*new_mem
)
619 struct amdgpu_device
*adev
;
620 struct ttm_mem_reg
*old_mem
= &bo
->mem
;
621 struct ttm_mem_reg tmp_mem
;
622 struct ttm_placement placement
;
623 struct ttm_place placements
;
626 adev
= amdgpu_ttm_adev(bo
->bdev
);
628 /* make space in GTT for old_mem buffer */
630 tmp_mem
.mm_node
= NULL
;
631 placement
.num_placement
= 1;
632 placement
.placement
= &placements
;
633 placement
.num_busy_placement
= 1;
634 placement
.busy_placement
= &placements
;
637 placements
.flags
= TTM_PL_MASK_CACHING
| TTM_PL_FLAG_TT
;
638 r
= ttm_bo_mem_space(bo
, &placement
, &tmp_mem
, ctx
);
643 /* move/bind old memory to GTT space */
644 r
= ttm_bo_move_ttm(bo
, ctx
, &tmp_mem
);
650 r
= amdgpu_move_blit(bo
, true, ctx
->no_wait_gpu
, new_mem
, old_mem
);
655 ttm_bo_mem_put(bo
, &tmp_mem
);
660 * amdgpu_bo_move - Move a buffer object to a new memory location
662 * Called by ttm_bo_handle_move_mem()
664 static int amdgpu_bo_move(struct ttm_buffer_object
*bo
, bool evict
,
665 struct ttm_operation_ctx
*ctx
,
666 struct ttm_mem_reg
*new_mem
)
668 struct amdgpu_device
*adev
;
669 struct amdgpu_bo
*abo
;
670 struct ttm_mem_reg
*old_mem
= &bo
->mem
;
673 /* Can't move a pinned BO */
674 abo
= ttm_to_amdgpu_bo(bo
);
675 if (WARN_ON_ONCE(abo
->pin_count
> 0))
678 adev
= amdgpu_ttm_adev(bo
->bdev
);
680 if (old_mem
->mem_type
== TTM_PL_SYSTEM
&& bo
->ttm
== NULL
) {
681 amdgpu_move_null(bo
, new_mem
);
684 if ((old_mem
->mem_type
== TTM_PL_TT
&&
685 new_mem
->mem_type
== TTM_PL_SYSTEM
) ||
686 (old_mem
->mem_type
== TTM_PL_SYSTEM
&&
687 new_mem
->mem_type
== TTM_PL_TT
)) {
689 amdgpu_move_null(bo
, new_mem
);
693 if (!adev
->mman
.buffer_funcs_enabled
)
696 if (old_mem
->mem_type
== TTM_PL_VRAM
&&
697 new_mem
->mem_type
== TTM_PL_SYSTEM
) {
698 r
= amdgpu_move_vram_ram(bo
, evict
, ctx
, new_mem
);
699 } else if (old_mem
->mem_type
== TTM_PL_SYSTEM
&&
700 new_mem
->mem_type
== TTM_PL_VRAM
) {
701 r
= amdgpu_move_ram_vram(bo
, evict
, ctx
, new_mem
);
703 r
= amdgpu_move_blit(bo
, evict
, ctx
->no_wait_gpu
,
709 r
= ttm_bo_move_memcpy(bo
, ctx
, new_mem
);
715 if (bo
->type
== ttm_bo_type_device
&&
716 new_mem
->mem_type
== TTM_PL_VRAM
&&
717 old_mem
->mem_type
!= TTM_PL_VRAM
) {
718 /* amdgpu_bo_fault_reserve_notify will re-set this if the CPU
719 * accesses the BO after it's moved.
721 abo
->flags
&= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED
;
724 /* update statistics */
725 atomic64_add((u64
)bo
->num_pages
<< PAGE_SHIFT
, &adev
->num_bytes_moved
);
730 * amdgpu_ttm_io_mem_reserve - Reserve a block of memory during a fault
732 * Called by ttm_mem_io_reserve() ultimately via ttm_bo_vm_fault()
734 static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device
*bdev
, struct ttm_mem_reg
*mem
)
736 struct ttm_mem_type_manager
*man
= &bdev
->man
[mem
->mem_type
];
737 struct amdgpu_device
*adev
= amdgpu_ttm_adev(bdev
);
738 struct drm_mm_node
*mm_node
= mem
->mm_node
;
740 mem
->bus
.addr
= NULL
;
742 mem
->bus
.size
= mem
->num_pages
<< PAGE_SHIFT
;
744 mem
->bus
.is_iomem
= false;
745 if (!(man
->flags
& TTM_MEMTYPE_FLAG_MAPPABLE
))
747 switch (mem
->mem_type
) {
754 mem
->bus
.offset
= mem
->start
<< PAGE_SHIFT
;
755 /* check if it's visible */
756 if ((mem
->bus
.offset
+ mem
->bus
.size
) > adev
->gmc
.visible_vram_size
)
758 /* Only physically contiguous buffers apply. In a contiguous
759 * buffer, size of the first mm_node would match the number of
760 * pages in ttm_mem_reg.
762 if (adev
->mman
.aper_base_kaddr
&&
763 (mm_node
->size
== mem
->num_pages
))
764 mem
->bus
.addr
= (u8
*)adev
->mman
.aper_base_kaddr
+
767 mem
->bus
.base
= adev
->gmc
.aper_base
;
768 mem
->bus
.is_iomem
= true;
776 static void amdgpu_ttm_io_mem_free(struct ttm_bo_device
*bdev
, struct ttm_mem_reg
*mem
)
780 static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object
*bo
,
781 unsigned long page_offset
)
783 struct drm_mm_node
*mm
;
784 unsigned long offset
= (page_offset
<< PAGE_SHIFT
);
786 mm
= amdgpu_find_mm_node(&bo
->mem
, &offset
);
787 return (bo
->mem
.bus
.base
>> PAGE_SHIFT
) + mm
->start
+
788 (offset
>> PAGE_SHIFT
);
792 * TTM backend functions.
794 struct amdgpu_ttm_gup_task_list
{
795 struct list_head list
;
796 struct task_struct
*task
;
799 struct amdgpu_ttm_tt
{
800 struct ttm_dma_tt ttm
;
803 struct task_struct
*usertask
;
805 spinlock_t guptasklock
;
806 struct list_head guptasks
;
807 atomic_t mmu_invalidations
;
808 uint32_t last_set_pages
;
812 * amdgpu_ttm_tt_get_user_pages - Pin pages of memory pointed to
813 * by a USERPTR pointer to memory
815 * Called by amdgpu_gem_userptr_ioctl() and amdgpu_cs_parser_bos().
816 * This provides a wrapper around the get_user_pages() call to provide
817 * device accessible pages that back user memory.
819 int amdgpu_ttm_tt_get_user_pages(struct ttm_tt
*ttm
, struct page
**pages
)
821 struct amdgpu_ttm_tt
*gtt
= (void *)ttm
;
822 struct mm_struct
*mm
= gtt
->usertask
->mm
;
823 unsigned int flags
= 0;
827 if (!mm
) /* Happens during process shutdown */
830 if (!(gtt
->userflags
& AMDGPU_GEM_USERPTR_READONLY
))
833 down_read(&mm
->mmap_sem
);
835 if (gtt
->userflags
& AMDGPU_GEM_USERPTR_ANONONLY
) {
836 /* check that we only use anonymous memory
837 to prevent problems with writeback */
838 unsigned long end
= gtt
->userptr
+ ttm
->num_pages
* PAGE_SIZE
;
839 struct vm_area_struct
*vma
;
841 vma
= find_vma(mm
, gtt
->userptr
);
842 if (!vma
|| vma
->vm_file
|| vma
->vm_end
< end
) {
843 up_read(&mm
->mmap_sem
);
848 /* loop enough times using contiguous pages of memory */
850 unsigned num_pages
= ttm
->num_pages
- pinned
;
851 uint64_t userptr
= gtt
->userptr
+ pinned
* PAGE_SIZE
;
852 struct page
**p
= pages
+ pinned
;
853 struct amdgpu_ttm_gup_task_list guptask
;
855 guptask
.task
= current
;
856 spin_lock(>t
->guptasklock
);
857 list_add(&guptask
.list
, >t
->guptasks
);
858 spin_unlock(>t
->guptasklock
);
860 if (mm
== current
->mm
)
861 r
= get_user_pages(userptr
, num_pages
, flags
, p
, NULL
);
863 r
= get_user_pages_remote(gtt
->usertask
,
864 mm
, userptr
, num_pages
,
865 flags
, p
, NULL
, NULL
);
867 spin_lock(>t
->guptasklock
);
868 list_del(&guptask
.list
);
869 spin_unlock(>t
->guptasklock
);
876 } while (pinned
< ttm
->num_pages
);
878 up_read(&mm
->mmap_sem
);
882 release_pages(pages
, pinned
);
883 up_read(&mm
->mmap_sem
);
888 * amdgpu_ttm_tt_set_user_pages - Copy pages in, putting old pages
891 * Called by amdgpu_cs_list_validate(). This creates the page list
892 * that backs user memory and will ultimately be mapped into the device
895 void amdgpu_ttm_tt_set_user_pages(struct ttm_tt
*ttm
, struct page
**pages
)
897 struct amdgpu_ttm_tt
*gtt
= (void *)ttm
;
900 gtt
->last_set_pages
= atomic_read(>t
->mmu_invalidations
);
901 for (i
= 0; i
< ttm
->num_pages
; ++i
) {
903 put_page(ttm
->pages
[i
]);
905 ttm
->pages
[i
] = pages
? pages
[i
] : NULL
;
910 * amdgpu_ttm_tt_mark_user_page - Mark pages as dirty
912 * Called while unpinning userptr pages
914 void amdgpu_ttm_tt_mark_user_pages(struct ttm_tt
*ttm
)
916 struct amdgpu_ttm_tt
*gtt
= (void *)ttm
;
919 for (i
= 0; i
< ttm
->num_pages
; ++i
) {
920 struct page
*page
= ttm
->pages
[i
];
925 if (!(gtt
->userflags
& AMDGPU_GEM_USERPTR_READONLY
))
926 set_page_dirty(page
);
928 mark_page_accessed(page
);
933 * amdgpu_ttm_tt_pin_userptr - prepare the sg table with the
936 * Called by amdgpu_ttm_backend_bind()
938 static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt
*ttm
)
940 struct amdgpu_device
*adev
= amdgpu_ttm_adev(ttm
->bdev
);
941 struct amdgpu_ttm_tt
*gtt
= (void *)ttm
;
945 int write
= !(gtt
->userflags
& AMDGPU_GEM_USERPTR_READONLY
);
946 enum dma_data_direction direction
= write
?
947 DMA_BIDIRECTIONAL
: DMA_TO_DEVICE
;
949 /* Allocate an SG array and squash pages into it */
950 r
= sg_alloc_table_from_pages(ttm
->sg
, ttm
->pages
, ttm
->num_pages
, 0,
951 ttm
->num_pages
<< PAGE_SHIFT
,
956 /* Map SG to device */
958 nents
= dma_map_sg(adev
->dev
, ttm
->sg
->sgl
, ttm
->sg
->nents
, direction
);
959 if (nents
!= ttm
->sg
->nents
)
962 /* convert SG to linear array of pages and dma addresses */
963 drm_prime_sg_to_page_addr_arrays(ttm
->sg
, ttm
->pages
,
964 gtt
->ttm
.dma_address
, ttm
->num_pages
);
974 * amdgpu_ttm_tt_unpin_userptr - Unpin and unmap userptr pages
976 static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt
*ttm
)
978 struct amdgpu_device
*adev
= amdgpu_ttm_adev(ttm
->bdev
);
979 struct amdgpu_ttm_tt
*gtt
= (void *)ttm
;
981 int write
= !(gtt
->userflags
& AMDGPU_GEM_USERPTR_READONLY
);
982 enum dma_data_direction direction
= write
?
983 DMA_BIDIRECTIONAL
: DMA_TO_DEVICE
;
985 /* double check that we don't free the table twice */
989 /* unmap the pages mapped to the device */
990 dma_unmap_sg(adev
->dev
, ttm
->sg
->sgl
, ttm
->sg
->nents
, direction
);
992 /* mark the pages as dirty */
993 amdgpu_ttm_tt_mark_user_pages(ttm
);
995 sg_free_table(ttm
->sg
);
998 int amdgpu_ttm_gart_bind(struct amdgpu_device
*adev
,
999 struct ttm_buffer_object
*tbo
,
1002 struct amdgpu_bo
*abo
= ttm_to_amdgpu_bo(tbo
);
1003 struct ttm_tt
*ttm
= tbo
->ttm
;
1004 struct amdgpu_ttm_tt
*gtt
= (void *)ttm
;
1007 if (abo
->flags
& AMDGPU_GEM_CREATE_MQD_GFX9
) {
1008 uint64_t page_idx
= 1;
1010 r
= amdgpu_gart_bind(adev
, gtt
->offset
, page_idx
,
1011 ttm
->pages
, gtt
->ttm
.dma_address
, flags
);
1013 goto gart_bind_fail
;
1015 /* Patch mtype of the second part BO */
1016 flags
&= ~AMDGPU_PTE_MTYPE_MASK
;
1017 flags
|= AMDGPU_PTE_MTYPE(AMDGPU_MTYPE_NC
);
1019 r
= amdgpu_gart_bind(adev
,
1020 gtt
->offset
+ (page_idx
<< PAGE_SHIFT
),
1021 ttm
->num_pages
- page_idx
,
1022 &ttm
->pages
[page_idx
],
1023 &(gtt
->ttm
.dma_address
[page_idx
]), flags
);
1025 r
= amdgpu_gart_bind(adev
, gtt
->offset
, ttm
->num_pages
,
1026 ttm
->pages
, gtt
->ttm
.dma_address
, flags
);
1031 DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
1032 ttm
->num_pages
, gtt
->offset
);
1038 * amdgpu_ttm_backend_bind - Bind GTT memory
1040 * Called by ttm_tt_bind() on behalf of ttm_bo_handle_move_mem().
1041 * This handles binding GTT memory to the device address space.
1043 static int amdgpu_ttm_backend_bind(struct ttm_tt
*ttm
,
1044 struct ttm_mem_reg
*bo_mem
)
1046 struct amdgpu_device
*adev
= amdgpu_ttm_adev(ttm
->bdev
);
1047 struct amdgpu_ttm_tt
*gtt
= (void*)ttm
;
1052 r
= amdgpu_ttm_tt_pin_userptr(ttm
);
1054 DRM_ERROR("failed to pin userptr\n");
1058 if (!ttm
->num_pages
) {
1059 WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
1060 ttm
->num_pages
, bo_mem
, ttm
);
1063 if (bo_mem
->mem_type
== AMDGPU_PL_GDS
||
1064 bo_mem
->mem_type
== AMDGPU_PL_GWS
||
1065 bo_mem
->mem_type
== AMDGPU_PL_OA
)
1068 if (!amdgpu_gtt_mgr_has_gart_addr(bo_mem
)) {
1069 gtt
->offset
= AMDGPU_BO_INVALID_OFFSET
;
1073 /* compute PTE flags relevant to this BO memory */
1074 flags
= amdgpu_ttm_tt_pte_flags(adev
, ttm
, bo_mem
);
1076 /* bind pages into GART page tables */
1077 gtt
->offset
= (u64
)bo_mem
->start
<< PAGE_SHIFT
;
1078 r
= amdgpu_gart_bind(adev
, gtt
->offset
, ttm
->num_pages
,
1079 ttm
->pages
, gtt
->ttm
.dma_address
, flags
);
1082 DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
1083 ttm
->num_pages
, gtt
->offset
);
1088 * amdgpu_ttm_alloc_gart - Allocate GART memory for buffer object
1090 int amdgpu_ttm_alloc_gart(struct ttm_buffer_object
*bo
)
1092 struct amdgpu_device
*adev
= amdgpu_ttm_adev(bo
->bdev
);
1093 struct ttm_operation_ctx ctx
= { false, false };
1094 struct amdgpu_ttm_tt
*gtt
= (void*)bo
->ttm
;
1095 struct ttm_mem_reg tmp
;
1096 struct ttm_placement placement
;
1097 struct ttm_place placements
;
1101 if (bo
->mem
.mem_type
!= TTM_PL_TT
||
1102 amdgpu_gtt_mgr_has_gart_addr(&bo
->mem
))
1105 /* allocate GTT space */
1108 placement
.num_placement
= 1;
1109 placement
.placement
= &placements
;
1110 placement
.num_busy_placement
= 1;
1111 placement
.busy_placement
= &placements
;
1112 placements
.fpfn
= 0;
1113 placements
.lpfn
= adev
->gmc
.gart_size
>> PAGE_SHIFT
;
1114 placements
.flags
= (bo
->mem
.placement
& ~TTM_PL_MASK_MEM
) |
1117 r
= ttm_bo_mem_space(bo
, &placement
, &tmp
, &ctx
);
1121 /* compute PTE flags for this buffer object */
1122 flags
= amdgpu_ttm_tt_pte_flags(adev
, bo
->ttm
, &tmp
);
1125 gtt
->offset
= (u64
)tmp
.start
<< PAGE_SHIFT
;
1126 r
= amdgpu_ttm_gart_bind(adev
, bo
, flags
);
1128 ttm_bo_mem_put(bo
, &tmp
);
1132 ttm_bo_mem_put(bo
, &bo
->mem
);
1134 bo
->offset
= (bo
->mem
.start
<< PAGE_SHIFT
) +
1135 bo
->bdev
->man
[bo
->mem
.mem_type
].gpu_offset
;
1141 * amdgpu_ttm_recover_gart - Rebind GTT pages
1143 * Called by amdgpu_gtt_mgr_recover() from amdgpu_device_reset() to
1144 * rebind GTT pages during a GPU reset.
1146 int amdgpu_ttm_recover_gart(struct ttm_buffer_object
*tbo
)
1148 struct amdgpu_device
*adev
= amdgpu_ttm_adev(tbo
->bdev
);
1155 flags
= amdgpu_ttm_tt_pte_flags(adev
, tbo
->ttm
, &tbo
->mem
);
1156 r
= amdgpu_ttm_gart_bind(adev
, tbo
, flags
);
1162 * amdgpu_ttm_backend_unbind - Unbind GTT mapped pages
1164 * Called by ttm_tt_unbind() on behalf of ttm_bo_move_ttm() and
1167 static int amdgpu_ttm_backend_unbind(struct ttm_tt
*ttm
)
1169 struct amdgpu_device
*adev
= amdgpu_ttm_adev(ttm
->bdev
);
1170 struct amdgpu_ttm_tt
*gtt
= (void *)ttm
;
1173 /* if the pages have userptr pinning then clear that first */
1175 amdgpu_ttm_tt_unpin_userptr(ttm
);
1177 if (gtt
->offset
== AMDGPU_BO_INVALID_OFFSET
)
1180 /* unbind shouldn't be done for GDS/GWS/OA in ttm_bo_clean_mm */
1181 r
= amdgpu_gart_unbind(adev
, gtt
->offset
, ttm
->num_pages
);
1183 DRM_ERROR("failed to unbind %lu pages at 0x%08llX\n",
1184 gtt
->ttm
.ttm
.num_pages
, gtt
->offset
);
1188 static void amdgpu_ttm_backend_destroy(struct ttm_tt
*ttm
)
1190 struct amdgpu_ttm_tt
*gtt
= (void *)ttm
;
1193 put_task_struct(gtt
->usertask
);
1195 ttm_dma_tt_fini(>t
->ttm
);
1199 static struct ttm_backend_func amdgpu_backend_func
= {
1200 .bind
= &amdgpu_ttm_backend_bind
,
1201 .unbind
= &amdgpu_ttm_backend_unbind
,
1202 .destroy
= &amdgpu_ttm_backend_destroy
,
1206 * amdgpu_ttm_tt_create - Create a ttm_tt object for a given BO
1208 * @bo: The buffer object to create a GTT ttm_tt object around
1210 * Called by ttm_tt_create().
1212 static struct ttm_tt
*amdgpu_ttm_tt_create(struct ttm_buffer_object
*bo
,
1213 uint32_t page_flags
)
1215 struct amdgpu_device
*adev
;
1216 struct amdgpu_ttm_tt
*gtt
;
1218 adev
= amdgpu_ttm_adev(bo
->bdev
);
1220 gtt
= kzalloc(sizeof(struct amdgpu_ttm_tt
), GFP_KERNEL
);
1224 gtt
->ttm
.ttm
.func
= &amdgpu_backend_func
;
1226 /* allocate space for the uninitialized page entries */
1227 if (ttm_sg_tt_init(>t
->ttm
, bo
, page_flags
)) {
1231 return >t
->ttm
.ttm
;
1235 * amdgpu_ttm_tt_populate - Map GTT pages visible to the device
1237 * Map the pages of a ttm_tt object to an address space visible
1238 * to the underlying device.
1240 static int amdgpu_ttm_tt_populate(struct ttm_tt
*ttm
,
1241 struct ttm_operation_ctx
*ctx
)
1243 struct amdgpu_device
*adev
= amdgpu_ttm_adev(ttm
->bdev
);
1244 struct amdgpu_ttm_tt
*gtt
= (void *)ttm
;
1245 bool slave
= !!(ttm
->page_flags
& TTM_PAGE_FLAG_SG
);
1247 /* user pages are bound by amdgpu_ttm_tt_pin_userptr() */
1248 if (gtt
&& gtt
->userptr
) {
1249 ttm
->sg
= kzalloc(sizeof(struct sg_table
), GFP_KERNEL
);
1253 ttm
->page_flags
|= TTM_PAGE_FLAG_SG
;
1254 ttm
->state
= tt_unbound
;
1258 if (slave
&& ttm
->sg
) {
1259 drm_prime_sg_to_page_addr_arrays(ttm
->sg
, ttm
->pages
,
1260 gtt
->ttm
.dma_address
,
1262 ttm
->state
= tt_unbound
;
1266 #ifdef CONFIG_SWIOTLB
1267 if (adev
->need_swiotlb
&& swiotlb_nr_tbl()) {
1268 return ttm_dma_populate(>t
->ttm
, adev
->dev
, ctx
);
1272 /* fall back to generic helper to populate the page array
1273 * and map them to the device */
1274 return ttm_populate_and_map_pages(adev
->dev
, >t
->ttm
, ctx
);
1278 * amdgpu_ttm_tt_unpopulate - unmap GTT pages and unpopulate page arrays
1280 * Unmaps pages of a ttm_tt object from the device address space and
1281 * unpopulates the page array backing it.
1283 static void amdgpu_ttm_tt_unpopulate(struct ttm_tt
*ttm
)
1285 struct amdgpu_device
*adev
;
1286 struct amdgpu_ttm_tt
*gtt
= (void *)ttm
;
1287 bool slave
= !!(ttm
->page_flags
& TTM_PAGE_FLAG_SG
);
1289 if (gtt
&& gtt
->userptr
) {
1290 amdgpu_ttm_tt_set_user_pages(ttm
, NULL
);
1292 ttm
->page_flags
&= ~TTM_PAGE_FLAG_SG
;
1299 adev
= amdgpu_ttm_adev(ttm
->bdev
);
1301 #ifdef CONFIG_SWIOTLB
1302 if (adev
->need_swiotlb
&& swiotlb_nr_tbl()) {
1303 ttm_dma_unpopulate(>t
->ttm
, adev
->dev
);
1308 /* fall back to generic helper to unmap and unpopulate array */
1309 ttm_unmap_and_unpopulate_pages(adev
->dev
, >t
->ttm
);
1313 * amdgpu_ttm_tt_set_userptr - Initialize userptr GTT ttm_tt
1314 * for the current task
1316 * @ttm: The ttm_tt object to bind this userptr object to
1317 * @addr: The address in the current tasks VM space to use
1318 * @flags: Requirements of userptr object.
1320 * Called by amdgpu_gem_userptr_ioctl() to bind userptr pages
1323 int amdgpu_ttm_tt_set_userptr(struct ttm_tt
*ttm
, uint64_t addr
,
1326 struct amdgpu_ttm_tt
*gtt
= (void *)ttm
;
1331 gtt
->userptr
= addr
;
1332 gtt
->userflags
= flags
;
1335 put_task_struct(gtt
->usertask
);
1336 gtt
->usertask
= current
->group_leader
;
1337 get_task_struct(gtt
->usertask
);
1339 spin_lock_init(>t
->guptasklock
);
1340 INIT_LIST_HEAD(>t
->guptasks
);
1341 atomic_set(>t
->mmu_invalidations
, 0);
1342 gtt
->last_set_pages
= 0;
1348 * amdgpu_ttm_tt_get_usermm - Return memory manager for ttm_tt object
1350 struct mm_struct
*amdgpu_ttm_tt_get_usermm(struct ttm_tt
*ttm
)
1352 struct amdgpu_ttm_tt
*gtt
= (void *)ttm
;
1357 if (gtt
->usertask
== NULL
)
1360 return gtt
->usertask
->mm
;
1364 * amdgpu_ttm_tt_affect_userptr - Determine if a ttm_tt object lays
1365 * inside an address range for the
1369 bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt
*ttm
, unsigned long start
,
1372 struct amdgpu_ttm_tt
*gtt
= (void *)ttm
;
1373 struct amdgpu_ttm_gup_task_list
*entry
;
1376 if (gtt
== NULL
|| !gtt
->userptr
)
1379 /* Return false if no part of the ttm_tt object lies within
1382 size
= (unsigned long)gtt
->ttm
.ttm
.num_pages
* PAGE_SIZE
;
1383 if (gtt
->userptr
> end
|| gtt
->userptr
+ size
<= start
)
1386 /* Search the lists of tasks that hold this mapping and see
1387 * if current is one of them. If it is return false.
1389 spin_lock(>t
->guptasklock
);
1390 list_for_each_entry(entry
, >t
->guptasks
, list
) {
1391 if (entry
->task
== current
) {
1392 spin_unlock(>t
->guptasklock
);
1396 spin_unlock(>t
->guptasklock
);
1398 atomic_inc(>t
->mmu_invalidations
);
1404 * amdgpu_ttm_tt_userptr_invalidated - Has the ttm_tt object been
1407 bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt
*ttm
,
1408 int *last_invalidated
)
1410 struct amdgpu_ttm_tt
*gtt
= (void *)ttm
;
1411 int prev_invalidated
= *last_invalidated
;
1413 *last_invalidated
= atomic_read(>t
->mmu_invalidations
);
1414 return prev_invalidated
!= *last_invalidated
;
1418 * amdgpu_ttm_tt_userptr_needs_pages - Have the pages backing this
1419 * ttm_tt object been invalidated
1420 * since the last time they've
1423 bool amdgpu_ttm_tt_userptr_needs_pages(struct ttm_tt
*ttm
)
1425 struct amdgpu_ttm_tt
*gtt
= (void *)ttm
;
1427 if (gtt
== NULL
|| !gtt
->userptr
)
1430 return atomic_read(>t
->mmu_invalidations
) != gtt
->last_set_pages
;
1434 * amdgpu_ttm_tt_is_readonly - Is the ttm_tt object read only?
1436 bool amdgpu_ttm_tt_is_readonly(struct ttm_tt
*ttm
)
1438 struct amdgpu_ttm_tt
*gtt
= (void *)ttm
;
1443 return !!(gtt
->userflags
& AMDGPU_GEM_USERPTR_READONLY
);
1447 * amdgpu_ttm_tt_pte_flags - Compute PTE flags for ttm_tt object
1449 * @ttm: The ttm_tt object to compute the flags for
1450 * @mem: The memory registry backing this ttm_tt object
1452 uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device
*adev
, struct ttm_tt
*ttm
,
1453 struct ttm_mem_reg
*mem
)
1457 if (mem
&& mem
->mem_type
!= TTM_PL_SYSTEM
)
1458 flags
|= AMDGPU_PTE_VALID
;
1460 if (mem
&& mem
->mem_type
== TTM_PL_TT
) {
1461 flags
|= AMDGPU_PTE_SYSTEM
;
1463 if (ttm
->caching_state
== tt_cached
)
1464 flags
|= AMDGPU_PTE_SNOOPED
;
1467 flags
|= adev
->gart
.gart_pte_flags
;
1468 flags
|= AMDGPU_PTE_READABLE
;
1470 if (!amdgpu_ttm_tt_is_readonly(ttm
))
1471 flags
|= AMDGPU_PTE_WRITEABLE
;
1477 * amdgpu_ttm_bo_eviction_valuable - Check to see if we can evict
1480 * Return true if eviction is sensible. Called by
1481 * ttm_mem_evict_first() on behalf of ttm_bo_mem_force_space()
1482 * which tries to evict buffer objects until it can find space
1483 * for a new object and by ttm_bo_force_list_clean() which is
1484 * used to clean out a memory space.
1486 static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object
*bo
,
1487 const struct ttm_place
*place
)
1489 unsigned long num_pages
= bo
->mem
.num_pages
;
1490 struct drm_mm_node
*node
= bo
->mem
.mm_node
;
1491 struct reservation_object_list
*flist
;
1492 struct dma_fence
*f
;
1495 /* If bo is a KFD BO, check if the bo belongs to the current process.
1496 * If true, then return false as any KFD process needs all its BOs to
1497 * be resident to run successfully
1499 flist
= reservation_object_get_list(bo
->resv
);
1501 for (i
= 0; i
< flist
->shared_count
; ++i
) {
1502 f
= rcu_dereference_protected(flist
->shared
[i
],
1503 reservation_object_held(bo
->resv
));
1504 if (amdkfd_fence_check_mm(f
, current
->mm
))
1509 switch (bo
->mem
.mem_type
) {
1514 /* Check each drm MM node individually */
1516 if (place
->fpfn
< (node
->start
+ node
->size
) &&
1517 !(place
->lpfn
&& place
->lpfn
<= node
->start
))
1520 num_pages
-= node
->size
;
1529 return ttm_bo_eviction_valuable(bo
, place
);
1533 * amdgpu_ttm_access_memory - Read or Write memory that backs a
1536 * @bo: The buffer object to read/write
1537 * @offset: Offset into buffer object
1538 * @buf: Secondary buffer to write/read from
1539 * @len: Length in bytes of access
1540 * @write: true if writing
1542 * This is used to access VRAM that backs a buffer object via MMIO
1543 * access for debugging purposes.
1545 static int amdgpu_ttm_access_memory(struct ttm_buffer_object
*bo
,
1546 unsigned long offset
,
1547 void *buf
, int len
, int write
)
1549 struct amdgpu_bo
*abo
= ttm_to_amdgpu_bo(bo
);
1550 struct amdgpu_device
*adev
= amdgpu_ttm_adev(abo
->tbo
.bdev
);
1551 struct drm_mm_node
*nodes
;
1555 unsigned long flags
;
1557 if (bo
->mem
.mem_type
!= TTM_PL_VRAM
)
1560 nodes
= amdgpu_find_mm_node(&abo
->tbo
.mem
, &offset
);
1561 pos
= (nodes
->start
<< PAGE_SHIFT
) + offset
;
1563 while (len
&& pos
< adev
->gmc
.mc_vram_size
) {
1564 uint64_t aligned_pos
= pos
& ~(uint64_t)3;
1565 uint32_t bytes
= 4 - (pos
& 3);
1566 uint32_t shift
= (pos
& 3) * 8;
1567 uint32_t mask
= 0xffffffff << shift
;
1570 mask
&= 0xffffffff >> (bytes
- len
) * 8;
1574 spin_lock_irqsave(&adev
->mmio_idx_lock
, flags
);
1575 WREG32_NO_KIQ(mmMM_INDEX
, ((uint32_t)aligned_pos
) | 0x80000000);
1576 WREG32_NO_KIQ(mmMM_INDEX_HI
, aligned_pos
>> 31);
1577 if (!write
|| mask
!= 0xffffffff)
1578 value
= RREG32_NO_KIQ(mmMM_DATA
);
1581 value
|= (*(uint32_t *)buf
<< shift
) & mask
;
1582 WREG32_NO_KIQ(mmMM_DATA
, value
);
1584 spin_unlock_irqrestore(&adev
->mmio_idx_lock
, flags
);
1586 value
= (value
& mask
) >> shift
;
1587 memcpy(buf
, &value
, bytes
);
1591 buf
= (uint8_t *)buf
+ bytes
;
1594 if (pos
>= (nodes
->start
+ nodes
->size
) << PAGE_SHIFT
) {
1596 pos
= (nodes
->start
<< PAGE_SHIFT
);
1603 static struct ttm_bo_driver amdgpu_bo_driver
= {
1604 .ttm_tt_create
= &amdgpu_ttm_tt_create
,
1605 .ttm_tt_populate
= &amdgpu_ttm_tt_populate
,
1606 .ttm_tt_unpopulate
= &amdgpu_ttm_tt_unpopulate
,
1607 .invalidate_caches
= &amdgpu_invalidate_caches
,
1608 .init_mem_type
= &amdgpu_init_mem_type
,
1609 .eviction_valuable
= amdgpu_ttm_bo_eviction_valuable
,
1610 .evict_flags
= &amdgpu_evict_flags
,
1611 .move
= &amdgpu_bo_move
,
1612 .verify_access
= &amdgpu_verify_access
,
1613 .move_notify
= &amdgpu_bo_move_notify
,
1614 .fault_reserve_notify
= &amdgpu_bo_fault_reserve_notify
,
1615 .io_mem_reserve
= &amdgpu_ttm_io_mem_reserve
,
1616 .io_mem_free
= &amdgpu_ttm_io_mem_free
,
1617 .io_mem_pfn
= amdgpu_ttm_io_mem_pfn
,
1618 .access_memory
= &amdgpu_ttm_access_memory
1622 * Firmware Reservation functions
1625 * amdgpu_ttm_fw_reserve_vram_fini - free fw reserved vram
1627 * @adev: amdgpu_device pointer
1629 * free fw reserved vram if it has been reserved.
1631 static void amdgpu_ttm_fw_reserve_vram_fini(struct amdgpu_device
*adev
)
1633 amdgpu_bo_free_kernel(&adev
->fw_vram_usage
.reserved_bo
,
1634 NULL
, &adev
->fw_vram_usage
.va
);
1638 * amdgpu_ttm_fw_reserve_vram_init - create bo vram reservation from fw
1640 * @adev: amdgpu_device pointer
1642 * create bo vram reservation from fw.
1644 static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device
*adev
)
1646 struct ttm_operation_ctx ctx
= { false, false };
1647 struct amdgpu_bo_param bp
;
1650 u64 vram_size
= adev
->gmc
.visible_vram_size
;
1651 u64 offset
= adev
->fw_vram_usage
.start_offset
;
1652 u64 size
= adev
->fw_vram_usage
.size
;
1653 struct amdgpu_bo
*bo
;
1655 memset(&bp
, 0, sizeof(bp
));
1656 bp
.size
= adev
->fw_vram_usage
.size
;
1657 bp
.byte_align
= PAGE_SIZE
;
1658 bp
.domain
= AMDGPU_GEM_DOMAIN_VRAM
;
1659 bp
.flags
= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED
|
1660 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS
;
1661 bp
.type
= ttm_bo_type_kernel
;
1663 adev
->fw_vram_usage
.va
= NULL
;
1664 adev
->fw_vram_usage
.reserved_bo
= NULL
;
1666 if (adev
->fw_vram_usage
.size
> 0 &&
1667 adev
->fw_vram_usage
.size
<= vram_size
) {
1669 r
= amdgpu_bo_create(adev
, &bp
,
1670 &adev
->fw_vram_usage
.reserved_bo
);
1674 r
= amdgpu_bo_reserve(adev
->fw_vram_usage
.reserved_bo
, false);
1678 /* remove the original mem node and create a new one at the
1681 bo
= adev
->fw_vram_usage
.reserved_bo
;
1682 offset
= ALIGN(offset
, PAGE_SIZE
);
1683 for (i
= 0; i
< bo
->placement
.num_placement
; ++i
) {
1684 bo
->placements
[i
].fpfn
= offset
>> PAGE_SHIFT
;
1685 bo
->placements
[i
].lpfn
= (offset
+ size
) >> PAGE_SHIFT
;
1688 ttm_bo_mem_put(&bo
->tbo
, &bo
->tbo
.mem
);
1689 r
= ttm_bo_mem_space(&bo
->tbo
, &bo
->placement
,
1690 &bo
->tbo
.mem
, &ctx
);
1694 r
= amdgpu_bo_pin_restricted(adev
->fw_vram_usage
.reserved_bo
,
1695 AMDGPU_GEM_DOMAIN_VRAM
,
1696 adev
->fw_vram_usage
.start_offset
,
1697 (adev
->fw_vram_usage
.start_offset
+
1698 adev
->fw_vram_usage
.size
), NULL
);
1701 r
= amdgpu_bo_kmap(adev
->fw_vram_usage
.reserved_bo
,
1702 &adev
->fw_vram_usage
.va
);
1706 amdgpu_bo_unreserve(adev
->fw_vram_usage
.reserved_bo
);
1711 amdgpu_bo_unpin(adev
->fw_vram_usage
.reserved_bo
);
1713 amdgpu_bo_unreserve(adev
->fw_vram_usage
.reserved_bo
);
1715 amdgpu_bo_unref(&adev
->fw_vram_usage
.reserved_bo
);
1717 adev
->fw_vram_usage
.va
= NULL
;
1718 adev
->fw_vram_usage
.reserved_bo
= NULL
;
1722 * amdgpu_ttm_init - Init the memory management (ttm) as well as
1723 * various gtt/vram related fields.
1725 * This initializes all of the memory space pools that the TTM layer
1726 * will need such as the GTT space (system memory mapped to the device),
1727 * VRAM (on-board memory), and on-chip memories (GDS, GWS, OA) which
1728 * can be mapped per VMID.
1730 int amdgpu_ttm_init(struct amdgpu_device
*adev
)
1736 /* initialize global references for vram/gtt */
1737 r
= amdgpu_ttm_global_init(adev
);
1741 /* No others user of address space so set it to 0 */
1742 r
= ttm_bo_device_init(&adev
->mman
.bdev
,
1743 adev
->mman
.bo_global_ref
.ref
.object
,
1745 adev
->ddev
->anon_inode
->i_mapping
,
1746 DRM_FILE_PAGE_OFFSET
,
1749 DRM_ERROR("failed initializing buffer object driver(%d).\n", r
);
1752 adev
->mman
.initialized
= true;
1754 /* We opt to avoid OOM on system pages allocations */
1755 adev
->mman
.bdev
.no_retry
= true;
1757 /* Initialize VRAM pool with all of VRAM divided into pages */
1758 r
= ttm_bo_init_mm(&adev
->mman
.bdev
, TTM_PL_VRAM
,
1759 adev
->gmc
.real_vram_size
>> PAGE_SHIFT
);
1761 DRM_ERROR("Failed initializing VRAM heap.\n");
1765 /* Reduce size of CPU-visible VRAM if requested */
1766 vis_vram_limit
= (u64
)amdgpu_vis_vram_limit
* 1024 * 1024;
1767 if (amdgpu_vis_vram_limit
> 0 &&
1768 vis_vram_limit
<= adev
->gmc
.visible_vram_size
)
1769 adev
->gmc
.visible_vram_size
= vis_vram_limit
;
1771 /* Change the size here instead of the init above so only lpfn is affected */
1772 amdgpu_ttm_set_buffer_funcs_status(adev
, false);
1774 adev
->mman
.aper_base_kaddr
= ioremap_wc(adev
->gmc
.aper_base
,
1775 adev
->gmc
.visible_vram_size
);
1779 *The reserved vram for firmware must be pinned to the specified
1780 *place on the VRAM, so reserve it early.
1782 r
= amdgpu_ttm_fw_reserve_vram_init(adev
);
1787 /* allocate memory as required for VGA
1788 * This is used for VGA emulation and pre-OS scanout buffers to
1789 * avoid display artifacts while transitioning between pre-OS
1791 if (adev
->gmc
.stolen_size
) {
1792 r
= amdgpu_bo_create_kernel(adev
, adev
->gmc
.stolen_size
, PAGE_SIZE
,
1793 AMDGPU_GEM_DOMAIN_VRAM
,
1794 &adev
->stolen_vga_memory
,
1799 DRM_INFO("amdgpu: %uM of VRAM memory ready\n",
1800 (unsigned) (adev
->gmc
.real_vram_size
/ (1024 * 1024)));
1802 /* Compute GTT size, either bsaed on 3/4th the size of RAM size
1803 * or whatever the user passed on module init */
1804 if (amdgpu_gtt_size
== -1) {
1808 gtt_size
= min(max((AMDGPU_DEFAULT_GTT_SIZE_MB
<< 20),
1809 adev
->gmc
.mc_vram_size
),
1810 ((uint64_t)si
.totalram
* si
.mem_unit
* 3/4));
1813 gtt_size
= (uint64_t)amdgpu_gtt_size
<< 20;
1815 /* Initialize GTT memory pool */
1816 r
= ttm_bo_init_mm(&adev
->mman
.bdev
, TTM_PL_TT
, gtt_size
>> PAGE_SHIFT
);
1818 DRM_ERROR("Failed initializing GTT heap.\n");
1821 DRM_INFO("amdgpu: %uM of GTT memory ready.\n",
1822 (unsigned)(gtt_size
/ (1024 * 1024)));
1824 /* Initialize various on-chip memory pools */
1825 adev
->gds
.mem
.total_size
= adev
->gds
.mem
.total_size
<< AMDGPU_GDS_SHIFT
;
1826 adev
->gds
.mem
.gfx_partition_size
= adev
->gds
.mem
.gfx_partition_size
<< AMDGPU_GDS_SHIFT
;
1827 adev
->gds
.mem
.cs_partition_size
= adev
->gds
.mem
.cs_partition_size
<< AMDGPU_GDS_SHIFT
;
1828 adev
->gds
.gws
.total_size
= adev
->gds
.gws
.total_size
<< AMDGPU_GWS_SHIFT
;
1829 adev
->gds
.gws
.gfx_partition_size
= adev
->gds
.gws
.gfx_partition_size
<< AMDGPU_GWS_SHIFT
;
1830 adev
->gds
.gws
.cs_partition_size
= adev
->gds
.gws
.cs_partition_size
<< AMDGPU_GWS_SHIFT
;
1831 adev
->gds
.oa
.total_size
= adev
->gds
.oa
.total_size
<< AMDGPU_OA_SHIFT
;
1832 adev
->gds
.oa
.gfx_partition_size
= adev
->gds
.oa
.gfx_partition_size
<< AMDGPU_OA_SHIFT
;
1833 adev
->gds
.oa
.cs_partition_size
= adev
->gds
.oa
.cs_partition_size
<< AMDGPU_OA_SHIFT
;
1835 if (adev
->gds
.mem
.total_size
) {
1836 r
= ttm_bo_init_mm(&adev
->mman
.bdev
, AMDGPU_PL_GDS
,
1837 adev
->gds
.mem
.total_size
>> PAGE_SHIFT
);
1839 DRM_ERROR("Failed initializing GDS heap.\n");
1845 if (adev
->gds
.gws
.total_size
) {
1846 r
= ttm_bo_init_mm(&adev
->mman
.bdev
, AMDGPU_PL_GWS
,
1847 adev
->gds
.gws
.total_size
>> PAGE_SHIFT
);
1849 DRM_ERROR("Failed initializing gws heap.\n");
1855 if (adev
->gds
.oa
.total_size
) {
1856 r
= ttm_bo_init_mm(&adev
->mman
.bdev
, AMDGPU_PL_OA
,
1857 adev
->gds
.oa
.total_size
>> PAGE_SHIFT
);
1859 DRM_ERROR("Failed initializing oa heap.\n");
1864 /* Register debugfs entries for amdgpu_ttm */
1865 r
= amdgpu_ttm_debugfs_init(adev
);
1867 DRM_ERROR("Failed to init debugfs\n");
1874 * amdgpu_ttm_late_init - Handle any late initialization for
1877 void amdgpu_ttm_late_init(struct amdgpu_device
*adev
)
1879 /* return the VGA stolen memory (if any) back to VRAM */
1880 amdgpu_bo_free_kernel(&adev
->stolen_vga_memory
, NULL
, NULL
);
1884 * amdgpu_ttm_fini - De-initialize the TTM memory pools
1886 void amdgpu_ttm_fini(struct amdgpu_device
*adev
)
1888 if (!adev
->mman
.initialized
)
1891 amdgpu_ttm_debugfs_fini(adev
);
1892 amdgpu_ttm_fw_reserve_vram_fini(adev
);
1893 if (adev
->mman
.aper_base_kaddr
)
1894 iounmap(adev
->mman
.aper_base_kaddr
);
1895 adev
->mman
.aper_base_kaddr
= NULL
;
1897 ttm_bo_clean_mm(&adev
->mman
.bdev
, TTM_PL_VRAM
);
1898 ttm_bo_clean_mm(&adev
->mman
.bdev
, TTM_PL_TT
);
1899 if (adev
->gds
.mem
.total_size
)
1900 ttm_bo_clean_mm(&adev
->mman
.bdev
, AMDGPU_PL_GDS
);
1901 if (adev
->gds
.gws
.total_size
)
1902 ttm_bo_clean_mm(&adev
->mman
.bdev
, AMDGPU_PL_GWS
);
1903 if (adev
->gds
.oa
.total_size
)
1904 ttm_bo_clean_mm(&adev
->mman
.bdev
, AMDGPU_PL_OA
);
1905 ttm_bo_device_release(&adev
->mman
.bdev
);
1906 amdgpu_ttm_global_fini(adev
);
1907 adev
->mman
.initialized
= false;
1908 DRM_INFO("amdgpu: ttm finalized\n");
1912 * amdgpu_ttm_set_buffer_funcs_status - enable/disable use of buffer functions
1914 * @adev: amdgpu_device pointer
1915 * @enable: true when we can use buffer functions.
1917 * Enable/disable use of buffer functions during suspend/resume. This should
1918 * only be called at bootup or when userspace isn't running.
1920 void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device
*adev
, bool enable
)
1922 struct ttm_mem_type_manager
*man
= &adev
->mman
.bdev
.man
[TTM_PL_VRAM
];
1925 if (!adev
->mman
.initialized
|| adev
->in_gpu_reset
)
1928 /* this just adjusts TTM size idea, which sets lpfn to the correct value */
1930 size
= adev
->gmc
.real_vram_size
;
1932 size
= adev
->gmc
.visible_vram_size
;
1933 man
->size
= size
>> PAGE_SHIFT
;
1934 adev
->mman
.buffer_funcs_enabled
= enable
;
1937 int amdgpu_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
1939 struct drm_file
*file_priv
;
1940 struct amdgpu_device
*adev
;
1942 if (unlikely(vma
->vm_pgoff
< DRM_FILE_PAGE_OFFSET
))
1945 file_priv
= filp
->private_data
;
1946 adev
= file_priv
->minor
->dev
->dev_private
;
1950 return ttm_bo_mmap(filp
, vma
, &adev
->mman
.bdev
);
1953 static int amdgpu_map_buffer(struct ttm_buffer_object
*bo
,
1954 struct ttm_mem_reg
*mem
, unsigned num_pages
,
1955 uint64_t offset
, unsigned window
,
1956 struct amdgpu_ring
*ring
,
1959 struct amdgpu_ttm_tt
*gtt
= (void *)bo
->ttm
;
1960 struct amdgpu_device
*adev
= ring
->adev
;
1961 struct ttm_tt
*ttm
= bo
->ttm
;
1962 struct amdgpu_job
*job
;
1963 unsigned num_dw
, num_bytes
;
1964 dma_addr_t
*dma_address
;
1965 struct dma_fence
*fence
;
1966 uint64_t src_addr
, dst_addr
;
1970 BUG_ON(adev
->mman
.buffer_funcs
->copy_max_bytes
<
1971 AMDGPU_GTT_MAX_TRANSFER_SIZE
* 8);
1973 *addr
= adev
->gmc
.gart_start
;
1974 *addr
+= (u64
)window
* AMDGPU_GTT_MAX_TRANSFER_SIZE
*
1975 AMDGPU_GPU_PAGE_SIZE
;
1977 num_dw
= adev
->mman
.buffer_funcs
->copy_num_dw
;
1978 while (num_dw
& 0x7)
1981 num_bytes
= num_pages
* 8;
1983 r
= amdgpu_job_alloc_with_ib(adev
, num_dw
* 4 + num_bytes
, &job
);
1987 src_addr
= num_dw
* 4;
1988 src_addr
+= job
->ibs
[0].gpu_addr
;
1990 dst_addr
= adev
->gart
.table_addr
;
1991 dst_addr
+= window
* AMDGPU_GTT_MAX_TRANSFER_SIZE
* 8;
1992 amdgpu_emit_copy_buffer(adev
, &job
->ibs
[0], src_addr
,
1993 dst_addr
, num_bytes
);
1995 amdgpu_ring_pad_ib(ring
, &job
->ibs
[0]);
1996 WARN_ON(job
->ibs
[0].length_dw
> num_dw
);
1998 dma_address
= >t
->ttm
.dma_address
[offset
>> PAGE_SHIFT
];
1999 flags
= amdgpu_ttm_tt_pte_flags(adev
, ttm
, mem
);
2000 r
= amdgpu_gart_map(adev
, 0, num_pages
, dma_address
, flags
,
2001 &job
->ibs
[0].ptr
[num_dw
]);
2005 r
= amdgpu_job_submit(job
, ring
, &adev
->mman
.entity
,
2006 AMDGPU_FENCE_OWNER_UNDEFINED
, &fence
);
2010 dma_fence_put(fence
);
2015 amdgpu_job_free(job
);
2019 int amdgpu_copy_buffer(struct amdgpu_ring
*ring
, uint64_t src_offset
,
2020 uint64_t dst_offset
, uint32_t byte_count
,
2021 struct reservation_object
*resv
,
2022 struct dma_fence
**fence
, bool direct_submit
,
2023 bool vm_needs_flush
)
2025 struct amdgpu_device
*adev
= ring
->adev
;
2026 struct amdgpu_job
*job
;
2029 unsigned num_loops
, num_dw
;
2033 if (direct_submit
&& !ring
->ready
) {
2034 DRM_ERROR("Trying to move memory with ring turned off.\n");
2038 max_bytes
= adev
->mman
.buffer_funcs
->copy_max_bytes
;
2039 num_loops
= DIV_ROUND_UP(byte_count
, max_bytes
);
2040 num_dw
= num_loops
* adev
->mman
.buffer_funcs
->copy_num_dw
;
2042 /* for IB padding */
2043 while (num_dw
& 0x7)
2046 r
= amdgpu_job_alloc_with_ib(adev
, num_dw
* 4, &job
);
2050 job
->vm_needs_flush
= vm_needs_flush
;
2052 r
= amdgpu_sync_resv(adev
, &job
->sync
, resv
,
2053 AMDGPU_FENCE_OWNER_UNDEFINED
,
2056 DRM_ERROR("sync failed (%d).\n", r
);
2061 for (i
= 0; i
< num_loops
; i
++) {
2062 uint32_t cur_size_in_bytes
= min(byte_count
, max_bytes
);
2064 amdgpu_emit_copy_buffer(adev
, &job
->ibs
[0], src_offset
,
2065 dst_offset
, cur_size_in_bytes
);
2067 src_offset
+= cur_size_in_bytes
;
2068 dst_offset
+= cur_size_in_bytes
;
2069 byte_count
-= cur_size_in_bytes
;
2072 amdgpu_ring_pad_ib(ring
, &job
->ibs
[0]);
2073 WARN_ON(job
->ibs
[0].length_dw
> num_dw
);
2074 if (direct_submit
) {
2075 r
= amdgpu_ib_schedule(ring
, job
->num_ibs
, job
->ibs
,
2077 job
->fence
= dma_fence_get(*fence
);
2079 DRM_ERROR("Error scheduling IBs (%d)\n", r
);
2080 amdgpu_job_free(job
);
2082 r
= amdgpu_job_submit(job
, ring
, &adev
->mman
.entity
,
2083 AMDGPU_FENCE_OWNER_UNDEFINED
, fence
);
2091 amdgpu_job_free(job
);
2095 int amdgpu_fill_buffer(struct amdgpu_bo
*bo
,
2097 struct reservation_object
*resv
,
2098 struct dma_fence
**fence
)
2100 struct amdgpu_device
*adev
= amdgpu_ttm_adev(bo
->tbo
.bdev
);
2101 uint32_t max_bytes
= adev
->mman
.buffer_funcs
->fill_max_bytes
;
2102 struct amdgpu_ring
*ring
= adev
->mman
.buffer_funcs_ring
;
2104 struct drm_mm_node
*mm_node
;
2105 unsigned long num_pages
;
2106 unsigned int num_loops
, num_dw
;
2108 struct amdgpu_job
*job
;
2111 if (!adev
->mman
.buffer_funcs_enabled
) {
2112 DRM_ERROR("Trying to clear memory with ring turned off.\n");
2116 if (bo
->tbo
.mem
.mem_type
== TTM_PL_TT
) {
2117 r
= amdgpu_ttm_alloc_gart(&bo
->tbo
);
2122 num_pages
= bo
->tbo
.num_pages
;
2123 mm_node
= bo
->tbo
.mem
.mm_node
;
2126 uint32_t byte_count
= mm_node
->size
<< PAGE_SHIFT
;
2128 num_loops
+= DIV_ROUND_UP(byte_count
, max_bytes
);
2129 num_pages
-= mm_node
->size
;
2132 num_dw
= num_loops
* adev
->mman
.buffer_funcs
->fill_num_dw
;
2134 /* for IB padding */
2137 r
= amdgpu_job_alloc_with_ib(adev
, num_dw
* 4, &job
);
2142 r
= amdgpu_sync_resv(adev
, &job
->sync
, resv
,
2143 AMDGPU_FENCE_OWNER_UNDEFINED
, false);
2145 DRM_ERROR("sync failed (%d).\n", r
);
2150 num_pages
= bo
->tbo
.num_pages
;
2151 mm_node
= bo
->tbo
.mem
.mm_node
;
2154 uint32_t byte_count
= mm_node
->size
<< PAGE_SHIFT
;
2157 dst_addr
= amdgpu_mm_node_addr(&bo
->tbo
, mm_node
, &bo
->tbo
.mem
);
2158 while (byte_count
) {
2159 uint32_t cur_size_in_bytes
= min(byte_count
, max_bytes
);
2161 amdgpu_emit_fill_buffer(adev
, &job
->ibs
[0], src_data
,
2162 dst_addr
, cur_size_in_bytes
);
2164 dst_addr
+= cur_size_in_bytes
;
2165 byte_count
-= cur_size_in_bytes
;
2168 num_pages
-= mm_node
->size
;
2172 amdgpu_ring_pad_ib(ring
, &job
->ibs
[0]);
2173 WARN_ON(job
->ibs
[0].length_dw
> num_dw
);
2174 r
= amdgpu_job_submit(job
, ring
, &adev
->mman
.entity
,
2175 AMDGPU_FENCE_OWNER_UNDEFINED
, fence
);
2182 amdgpu_job_free(job
);
2186 #if defined(CONFIG_DEBUG_FS)
2188 static int amdgpu_mm_dump_table(struct seq_file
*m
, void *data
)
2190 struct drm_info_node
*node
= (struct drm_info_node
*)m
->private;
2191 unsigned ttm_pl
= *(int *)node
->info_ent
->data
;
2192 struct drm_device
*dev
= node
->minor
->dev
;
2193 struct amdgpu_device
*adev
= dev
->dev_private
;
2194 struct ttm_mem_type_manager
*man
= &adev
->mman
.bdev
.man
[ttm_pl
];
2195 struct drm_printer p
= drm_seq_file_printer(m
);
2197 man
->func
->debug(man
, &p
);
2201 static int ttm_pl_vram
= TTM_PL_VRAM
;
2202 static int ttm_pl_tt
= TTM_PL_TT
;
2204 static const struct drm_info_list amdgpu_ttm_debugfs_list
[] = {
2205 {"amdgpu_vram_mm", amdgpu_mm_dump_table
, 0, &ttm_pl_vram
},
2206 {"amdgpu_gtt_mm", amdgpu_mm_dump_table
, 0, &ttm_pl_tt
},
2207 {"ttm_page_pool", ttm_page_alloc_debugfs
, 0, NULL
},
2208 #ifdef CONFIG_SWIOTLB
2209 {"ttm_dma_page_pool", ttm_dma_page_alloc_debugfs
, 0, NULL
}
2214 * amdgpu_ttm_vram_read - Linear read access to VRAM
2216 * Accesses VRAM via MMIO for debugging purposes.
2218 static ssize_t
amdgpu_ttm_vram_read(struct file
*f
, char __user
*buf
,
2219 size_t size
, loff_t
*pos
)
2221 struct amdgpu_device
*adev
= file_inode(f
)->i_private
;
2225 if (size
& 0x3 || *pos
& 0x3)
2228 if (*pos
>= adev
->gmc
.mc_vram_size
)
2232 unsigned long flags
;
2235 if (*pos
>= adev
->gmc
.mc_vram_size
)
2238 spin_lock_irqsave(&adev
->mmio_idx_lock
, flags
);
2239 WREG32_NO_KIQ(mmMM_INDEX
, ((uint32_t)*pos
) | 0x80000000);
2240 WREG32_NO_KIQ(mmMM_INDEX_HI
, *pos
>> 31);
2241 value
= RREG32_NO_KIQ(mmMM_DATA
);
2242 spin_unlock_irqrestore(&adev
->mmio_idx_lock
, flags
);
2244 r
= put_user(value
, (uint32_t *)buf
);
2258 * amdgpu_ttm_vram_write - Linear write access to VRAM
2260 * Accesses VRAM via MMIO for debugging purposes.
2262 static ssize_t
amdgpu_ttm_vram_write(struct file
*f
, const char __user
*buf
,
2263 size_t size
, loff_t
*pos
)
2265 struct amdgpu_device
*adev
= file_inode(f
)->i_private
;
2269 if (size
& 0x3 || *pos
& 0x3)
2272 if (*pos
>= adev
->gmc
.mc_vram_size
)
2276 unsigned long flags
;
2279 if (*pos
>= adev
->gmc
.mc_vram_size
)
2282 r
= get_user(value
, (uint32_t *)buf
);
2286 spin_lock_irqsave(&adev
->mmio_idx_lock
, flags
);
2287 WREG32_NO_KIQ(mmMM_INDEX
, ((uint32_t)*pos
) | 0x80000000);
2288 WREG32_NO_KIQ(mmMM_INDEX_HI
, *pos
>> 31);
2289 WREG32_NO_KIQ(mmMM_DATA
, value
);
2290 spin_unlock_irqrestore(&adev
->mmio_idx_lock
, flags
);
2301 static const struct file_operations amdgpu_ttm_vram_fops
= {
2302 .owner
= THIS_MODULE
,
2303 .read
= amdgpu_ttm_vram_read
,
2304 .write
= amdgpu_ttm_vram_write
,
2305 .llseek
= default_llseek
,
2308 #ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
2311 * amdgpu_ttm_gtt_read - Linear read access to GTT memory
2313 static ssize_t
amdgpu_ttm_gtt_read(struct file
*f
, char __user
*buf
,
2314 size_t size
, loff_t
*pos
)
2316 struct amdgpu_device
*adev
= file_inode(f
)->i_private
;
2321 loff_t p
= *pos
/ PAGE_SIZE
;
2322 unsigned off
= *pos
& ~PAGE_MASK
;
2323 size_t cur_size
= min_t(size_t, size
, PAGE_SIZE
- off
);
2327 if (p
>= adev
->gart
.num_cpu_pages
)
2330 page
= adev
->gart
.pages
[p
];
2335 r
= copy_to_user(buf
, ptr
, cur_size
);
2336 kunmap(adev
->gart
.pages
[p
]);
2338 r
= clear_user(buf
, cur_size
);
2352 static const struct file_operations amdgpu_ttm_gtt_fops
= {
2353 .owner
= THIS_MODULE
,
2354 .read
= amdgpu_ttm_gtt_read
,
2355 .llseek
= default_llseek
2361 * amdgpu_iomem_read - Virtual read access to GPU mapped memory
2363 * This function is used to read memory that has been mapped to the
2364 * GPU and the known addresses are not physical addresses but instead
2365 * bus addresses (e.g., what you'd put in an IB or ring buffer).
2367 static ssize_t
amdgpu_iomem_read(struct file
*f
, char __user
*buf
,
2368 size_t size
, loff_t
*pos
)
2370 struct amdgpu_device
*adev
= file_inode(f
)->i_private
;
2371 struct iommu_domain
*dom
;
2375 /* retrieve the IOMMU domain if any for this device */
2376 dom
= iommu_get_domain_for_dev(adev
->dev
);
2379 phys_addr_t addr
= *pos
& PAGE_MASK
;
2380 loff_t off
= *pos
& ~PAGE_MASK
;
2381 size_t bytes
= PAGE_SIZE
- off
;
2386 bytes
= bytes
< size
? bytes
: size
;
2388 /* Translate the bus address to a physical address. If
2389 * the domain is NULL it means there is no IOMMU active
2390 * and the address translation is the identity
2392 addr
= dom
? iommu_iova_to_phys(dom
, addr
) : addr
;
2394 pfn
= addr
>> PAGE_SHIFT
;
2395 if (!pfn_valid(pfn
))
2398 p
= pfn_to_page(pfn
);
2399 if (p
->mapping
!= adev
->mman
.bdev
.dev_mapping
)
2403 r
= copy_to_user(buf
, ptr
+ off
, bytes
);
2417 * amdgpu_iomem_write - Virtual write access to GPU mapped memory
2419 * This function is used to write memory that has been mapped to the
2420 * GPU and the known addresses are not physical addresses but instead
2421 * bus addresses (e.g., what you'd put in an IB or ring buffer).
2423 static ssize_t
amdgpu_iomem_write(struct file
*f
, const char __user
*buf
,
2424 size_t size
, loff_t
*pos
)
2426 struct amdgpu_device
*adev
= file_inode(f
)->i_private
;
2427 struct iommu_domain
*dom
;
2431 dom
= iommu_get_domain_for_dev(adev
->dev
);
2434 phys_addr_t addr
= *pos
& PAGE_MASK
;
2435 loff_t off
= *pos
& ~PAGE_MASK
;
2436 size_t bytes
= PAGE_SIZE
- off
;
2441 bytes
= bytes
< size
? bytes
: size
;
2443 addr
= dom
? iommu_iova_to_phys(dom
, addr
) : addr
;
2445 pfn
= addr
>> PAGE_SHIFT
;
2446 if (!pfn_valid(pfn
))
2449 p
= pfn_to_page(pfn
);
2450 if (p
->mapping
!= adev
->mman
.bdev
.dev_mapping
)
2454 r
= copy_from_user(ptr
+ off
, buf
, bytes
);
2467 static const struct file_operations amdgpu_ttm_iomem_fops
= {
2468 .owner
= THIS_MODULE
,
2469 .read
= amdgpu_iomem_read
,
2470 .write
= amdgpu_iomem_write
,
2471 .llseek
= default_llseek
2474 static const struct {
2476 const struct file_operations
*fops
;
2478 } ttm_debugfs_entries
[] = {
2479 { "amdgpu_vram", &amdgpu_ttm_vram_fops
, TTM_PL_VRAM
},
2480 #ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
2481 { "amdgpu_gtt", &amdgpu_ttm_gtt_fops
, TTM_PL_TT
},
2483 { "amdgpu_iomem", &amdgpu_ttm_iomem_fops
, TTM_PL_SYSTEM
},
2488 static int amdgpu_ttm_debugfs_init(struct amdgpu_device
*adev
)
2490 #if defined(CONFIG_DEBUG_FS)
2493 struct drm_minor
*minor
= adev
->ddev
->primary
;
2494 struct dentry
*ent
, *root
= minor
->debugfs_root
;
2496 for (count
= 0; count
< ARRAY_SIZE(ttm_debugfs_entries
); count
++) {
2497 ent
= debugfs_create_file(
2498 ttm_debugfs_entries
[count
].name
,
2499 S_IFREG
| S_IRUGO
, root
,
2501 ttm_debugfs_entries
[count
].fops
);
2503 return PTR_ERR(ent
);
2504 if (ttm_debugfs_entries
[count
].domain
== TTM_PL_VRAM
)
2505 i_size_write(ent
->d_inode
, adev
->gmc
.mc_vram_size
);
2506 else if (ttm_debugfs_entries
[count
].domain
== TTM_PL_TT
)
2507 i_size_write(ent
->d_inode
, adev
->gmc
.gart_size
);
2508 adev
->mman
.debugfs_entries
[count
] = ent
;
2511 count
= ARRAY_SIZE(amdgpu_ttm_debugfs_list
);
2513 #ifdef CONFIG_SWIOTLB
2514 if (!(adev
->need_swiotlb
&& swiotlb_nr_tbl()))
2518 return amdgpu_debugfs_add_files(adev
, amdgpu_ttm_debugfs_list
, count
);
2524 static void amdgpu_ttm_debugfs_fini(struct amdgpu_device
*adev
)
2526 #if defined(CONFIG_DEBUG_FS)
2529 for (i
= 0; i
< ARRAY_SIZE(ttm_debugfs_entries
); i
++)
2530 debugfs_remove(adev
->mman
.debugfs_entries
[i
]);