2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
28 #include <linux/dma-fence-array.h>
29 #include <linux/interval_tree_generic.h>
30 #include <linux/idr.h>
31 #include <linux/dma-buf.h>
33 #include <drm/amdgpu_drm.h>
35 #include "amdgpu_trace.h"
36 #include "amdgpu_amdkfd.h"
37 #include "amdgpu_gmc.h"
38 #include "amdgpu_xgmi.h"
39 #include "amdgpu_dma_buf.h"
44 * GPUVM is similar to the legacy gart on older asics, however
45 * rather than there being a single global gart table
46 * for the entire GPU, there are multiple VM page tables active
47 * at any given time. The VM page tables can contain a mix
48 * vram pages and system memory pages and system memory pages
49 * can be mapped as snooped (cached system pages) or unsnooped
50 * (uncached system pages).
51 * Each VM has an ID associated with it and there is a page table
52 * associated with each VMID. When execting a command buffer,
53 * the kernel tells the the ring what VMID to use for that command
54 * buffer. VMIDs are allocated dynamically as commands are submitted.
55 * The userspace drivers maintain their own address space and the kernel
56 * sets up their pages tables accordingly when they submit their
57 * command buffers and a VMID is assigned.
58 * Cayman/Trinity support up to 8 active VMs at any given time;
62 #define START(node) ((node)->start)
63 #define LAST(node) ((node)->last)
65 INTERVAL_TREE_DEFINE(struct amdgpu_bo_va_mapping
, rb
, uint64_t, __subtree_last
,
66 START
, LAST
, static, amdgpu_vm_it
)
72 * struct amdgpu_prt_cb - Helper to disable partial resident texture feature from a fence callback
74 struct amdgpu_prt_cb
{
77 * @adev: amdgpu device
79 struct amdgpu_device
*adev
;
84 struct dma_fence_cb cb
;
88 * vm eviction_lock can be taken in MMU notifiers. Make sure no reclaim-FS
89 * happens while holding this lock anywhere to prevent deadlocks when
90 * an MMU notifier runs in reclaim-FS context.
92 static inline void amdgpu_vm_eviction_lock(struct amdgpu_vm
*vm
)
94 mutex_lock(&vm
->eviction_lock
);
95 vm
->saved_flags
= memalloc_nofs_save();
98 static inline int amdgpu_vm_eviction_trylock(struct amdgpu_vm
*vm
)
100 if (mutex_trylock(&vm
->eviction_lock
)) {
101 vm
->saved_flags
= memalloc_nofs_save();
107 static inline void amdgpu_vm_eviction_unlock(struct amdgpu_vm
*vm
)
109 memalloc_nofs_restore(vm
->saved_flags
);
110 mutex_unlock(&vm
->eviction_lock
);
114 * amdgpu_vm_level_shift - return the addr shift for each level
116 * @adev: amdgpu_device pointer
120 * The number of bits the pfn needs to be right shifted for a level.
122 static unsigned amdgpu_vm_level_shift(struct amdgpu_device
*adev
,
129 return 9 * (AMDGPU_VM_PDB0
- level
) +
130 adev
->vm_manager
.block_size
;
139 * amdgpu_vm_num_entries - return the number of entries in a PD/PT
141 * @adev: amdgpu_device pointer
145 * The number of entries in a page directory or page table.
147 static unsigned amdgpu_vm_num_entries(struct amdgpu_device
*adev
,
150 unsigned shift
= amdgpu_vm_level_shift(adev
,
151 adev
->vm_manager
.root_level
);
153 if (level
== adev
->vm_manager
.root_level
)
154 /* For the root directory */
155 return round_up(adev
->vm_manager
.max_pfn
, 1ULL << shift
)
157 else if (level
!= AMDGPU_VM_PTB
)
158 /* Everything in between */
161 /* For the page tables on the leaves */
162 return AMDGPU_VM_PTE_COUNT(adev
);
166 * amdgpu_vm_num_ats_entries - return the number of ATS entries in the root PD
168 * @adev: amdgpu_device pointer
171 * The number of entries in the root page directory which needs the ATS setting.
173 static unsigned amdgpu_vm_num_ats_entries(struct amdgpu_device
*adev
)
177 shift
= amdgpu_vm_level_shift(adev
, adev
->vm_manager
.root_level
);
178 return AMDGPU_GMC_HOLE_START
>> (shift
+ AMDGPU_GPU_PAGE_SHIFT
);
182 * amdgpu_vm_entries_mask - the mask to get the entry number of a PD/PT
184 * @adev: amdgpu_device pointer
188 * The mask to extract the entry number of a PD/PT from an address.
190 static uint32_t amdgpu_vm_entries_mask(struct amdgpu_device
*adev
,
193 if (level
<= adev
->vm_manager
.root_level
)
195 else if (level
!= AMDGPU_VM_PTB
)
198 return AMDGPU_VM_PTE_COUNT(adev
) - 1;
202 * amdgpu_vm_bo_size - returns the size of the BOs in bytes
204 * @adev: amdgpu_device pointer
208 * The size of the BO for a page directory or page table in bytes.
210 static unsigned amdgpu_vm_bo_size(struct amdgpu_device
*adev
, unsigned level
)
212 return AMDGPU_GPU_PAGE_ALIGN(amdgpu_vm_num_entries(adev
, level
) * 8);
216 * amdgpu_vm_bo_evicted - vm_bo is evicted
218 * @vm_bo: vm_bo which is evicted
220 * State for PDs/PTs and per VM BOs which are not at the location they should
223 static void amdgpu_vm_bo_evicted(struct amdgpu_vm_bo_base
*vm_bo
)
225 struct amdgpu_vm
*vm
= vm_bo
->vm
;
226 struct amdgpu_bo
*bo
= vm_bo
->bo
;
229 if (bo
->tbo
.type
== ttm_bo_type_kernel
)
230 list_move(&vm_bo
->vm_status
, &vm
->evicted
);
232 list_move_tail(&vm_bo
->vm_status
, &vm
->evicted
);
235 * amdgpu_vm_bo_moved - vm_bo is moved
237 * @vm_bo: vm_bo which is moved
239 * State for per VM BOs which are moved, but that change is not yet reflected
240 * in the page tables.
242 static void amdgpu_vm_bo_moved(struct amdgpu_vm_bo_base
*vm_bo
)
244 list_move(&vm_bo
->vm_status
, &vm_bo
->vm
->moved
);
248 * amdgpu_vm_bo_idle - vm_bo is idle
250 * @vm_bo: vm_bo which is now idle
252 * State for PDs/PTs and per VM BOs which have gone through the state machine
255 static void amdgpu_vm_bo_idle(struct amdgpu_vm_bo_base
*vm_bo
)
257 list_move(&vm_bo
->vm_status
, &vm_bo
->vm
->idle
);
258 vm_bo
->moved
= false;
262 * amdgpu_vm_bo_invalidated - vm_bo is invalidated
264 * @vm_bo: vm_bo which is now invalidated
266 * State for normal BOs which are invalidated and that change not yet reflected
269 static void amdgpu_vm_bo_invalidated(struct amdgpu_vm_bo_base
*vm_bo
)
271 spin_lock(&vm_bo
->vm
->invalidated_lock
);
272 list_move(&vm_bo
->vm_status
, &vm_bo
->vm
->invalidated
);
273 spin_unlock(&vm_bo
->vm
->invalidated_lock
);
277 * amdgpu_vm_bo_relocated - vm_bo is reloacted
279 * @vm_bo: vm_bo which is relocated
281 * State for PDs/PTs which needs to update their parent PD.
282 * For the root PD, just move to idle state.
284 static void amdgpu_vm_bo_relocated(struct amdgpu_vm_bo_base
*vm_bo
)
286 if (vm_bo
->bo
->parent
)
287 list_move(&vm_bo
->vm_status
, &vm_bo
->vm
->relocated
);
289 amdgpu_vm_bo_idle(vm_bo
);
293 * amdgpu_vm_bo_done - vm_bo is done
295 * @vm_bo: vm_bo which is now done
297 * State for normal BOs which are invalidated and that change has been updated
300 static void amdgpu_vm_bo_done(struct amdgpu_vm_bo_base
*vm_bo
)
302 spin_lock(&vm_bo
->vm
->invalidated_lock
);
303 list_move(&vm_bo
->vm_status
, &vm_bo
->vm
->done
);
304 spin_unlock(&vm_bo
->vm
->invalidated_lock
);
308 * amdgpu_vm_bo_base_init - Adds bo to the list of bos associated with the vm
310 * @base: base structure for tracking BO usage in a VM
311 * @vm: vm to which bo is to be added
312 * @bo: amdgpu buffer object
314 * Initialize a bo_va_base structure and add it to the appropriate lists
317 static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base
*base
,
318 struct amdgpu_vm
*vm
,
319 struct amdgpu_bo
*bo
)
324 INIT_LIST_HEAD(&base
->vm_status
);
328 base
->next
= bo
->vm_bo
;
331 if (bo
->tbo
.base
.resv
!= vm
->root
.base
.bo
->tbo
.base
.resv
)
334 vm
->bulk_moveable
= false;
335 if (bo
->tbo
.type
== ttm_bo_type_kernel
&& bo
->parent
)
336 amdgpu_vm_bo_relocated(base
);
338 amdgpu_vm_bo_idle(base
);
340 if (bo
->preferred_domains
&
341 amdgpu_mem_type_to_domain(bo
->tbo
.mem
.mem_type
))
345 * we checked all the prerequisites, but it looks like this per vm bo
346 * is currently evicted. add the bo to the evicted list to make sure it
347 * is validated on next vm use to avoid fault.
349 amdgpu_vm_bo_evicted(base
);
353 * amdgpu_vm_pt_parent - get the parent page directory
355 * @pt: child page table
357 * Helper to get the parent entry for the child page table. NULL if we are at
358 * the root page directory.
360 static struct amdgpu_vm_pt
*amdgpu_vm_pt_parent(struct amdgpu_vm_pt
*pt
)
362 struct amdgpu_bo
*parent
= pt
->base
.bo
->parent
;
367 return container_of(parent
->vm_bo
, struct amdgpu_vm_pt
, base
);
371 * amdgpu_vm_pt_cursor - state for for_each_amdgpu_vm_pt
373 struct amdgpu_vm_pt_cursor
{
375 struct amdgpu_vm_pt
*parent
;
376 struct amdgpu_vm_pt
*entry
;
381 * amdgpu_vm_pt_start - start PD/PT walk
383 * @adev: amdgpu_device pointer
384 * @vm: amdgpu_vm structure
385 * @start: start address of the walk
386 * @cursor: state to initialize
388 * Initialize a amdgpu_vm_pt_cursor to start a walk.
390 static void amdgpu_vm_pt_start(struct amdgpu_device
*adev
,
391 struct amdgpu_vm
*vm
, uint64_t start
,
392 struct amdgpu_vm_pt_cursor
*cursor
)
395 cursor
->parent
= NULL
;
396 cursor
->entry
= &vm
->root
;
397 cursor
->level
= adev
->vm_manager
.root_level
;
401 * amdgpu_vm_pt_descendant - go to child node
403 * @adev: amdgpu_device pointer
404 * @cursor: current state
406 * Walk to the child node of the current node.
408 * True if the walk was possible, false otherwise.
410 static bool amdgpu_vm_pt_descendant(struct amdgpu_device
*adev
,
411 struct amdgpu_vm_pt_cursor
*cursor
)
413 unsigned mask
, shift
, idx
;
415 if (!cursor
->entry
->entries
)
418 BUG_ON(!cursor
->entry
->base
.bo
);
419 mask
= amdgpu_vm_entries_mask(adev
, cursor
->level
);
420 shift
= amdgpu_vm_level_shift(adev
, cursor
->level
);
423 idx
= (cursor
->pfn
>> shift
) & mask
;
424 cursor
->parent
= cursor
->entry
;
425 cursor
->entry
= &cursor
->entry
->entries
[idx
];
430 * amdgpu_vm_pt_sibling - go to sibling node
432 * @adev: amdgpu_device pointer
433 * @cursor: current state
435 * Walk to the sibling node of the current node.
437 * True if the walk was possible, false otherwise.
439 static bool amdgpu_vm_pt_sibling(struct amdgpu_device
*adev
,
440 struct amdgpu_vm_pt_cursor
*cursor
)
442 unsigned shift
, num_entries
;
444 /* Root doesn't have a sibling */
448 /* Go to our parents and see if we got a sibling */
449 shift
= amdgpu_vm_level_shift(adev
, cursor
->level
- 1);
450 num_entries
= amdgpu_vm_num_entries(adev
, cursor
->level
- 1);
452 if (cursor
->entry
== &cursor
->parent
->entries
[num_entries
- 1])
455 cursor
->pfn
+= 1ULL << shift
;
456 cursor
->pfn
&= ~((1ULL << shift
) - 1);
462 * amdgpu_vm_pt_ancestor - go to parent node
464 * @cursor: current state
466 * Walk to the parent node of the current node.
468 * True if the walk was possible, false otherwise.
470 static bool amdgpu_vm_pt_ancestor(struct amdgpu_vm_pt_cursor
*cursor
)
476 cursor
->entry
= cursor
->parent
;
477 cursor
->parent
= amdgpu_vm_pt_parent(cursor
->parent
);
482 * amdgpu_vm_pt_next - get next PD/PT in hieratchy
484 * @adev: amdgpu_device pointer
485 * @cursor: current state
487 * Walk the PD/PT tree to the next node.
489 static void amdgpu_vm_pt_next(struct amdgpu_device
*adev
,
490 struct amdgpu_vm_pt_cursor
*cursor
)
492 /* First try a newborn child */
493 if (amdgpu_vm_pt_descendant(adev
, cursor
))
496 /* If that didn't worked try to find a sibling */
497 while (!amdgpu_vm_pt_sibling(adev
, cursor
)) {
498 /* No sibling, go to our parents and grandparents */
499 if (!amdgpu_vm_pt_ancestor(cursor
)) {
507 * amdgpu_vm_pt_first_dfs - start a deep first search
509 * @adev: amdgpu_device structure
510 * @vm: amdgpu_vm structure
511 * @start: optional cursor to start with
512 * @cursor: state to initialize
514 * Starts a deep first traversal of the PD/PT tree.
516 static void amdgpu_vm_pt_first_dfs(struct amdgpu_device
*adev
,
517 struct amdgpu_vm
*vm
,
518 struct amdgpu_vm_pt_cursor
*start
,
519 struct amdgpu_vm_pt_cursor
*cursor
)
524 amdgpu_vm_pt_start(adev
, vm
, 0, cursor
);
525 while (amdgpu_vm_pt_descendant(adev
, cursor
));
529 * amdgpu_vm_pt_continue_dfs - check if the deep first search should continue
531 * @start: starting point for the search
532 * @entry: current entry
535 * True when the search should continue, false otherwise.
537 static bool amdgpu_vm_pt_continue_dfs(struct amdgpu_vm_pt_cursor
*start
,
538 struct amdgpu_vm_pt
*entry
)
540 return entry
&& (!start
|| entry
!= start
->entry
);
544 * amdgpu_vm_pt_next_dfs - get the next node for a deep first search
546 * @adev: amdgpu_device structure
547 * @cursor: current state
549 * Move the cursor to the next node in a deep first search.
551 static void amdgpu_vm_pt_next_dfs(struct amdgpu_device
*adev
,
552 struct amdgpu_vm_pt_cursor
*cursor
)
558 cursor
->entry
= NULL
;
559 else if (amdgpu_vm_pt_sibling(adev
, cursor
))
560 while (amdgpu_vm_pt_descendant(adev
, cursor
));
562 amdgpu_vm_pt_ancestor(cursor
);
566 * for_each_amdgpu_vm_pt_dfs_safe - safe deep first search of all PDs/PTs
568 #define for_each_amdgpu_vm_pt_dfs_safe(adev, vm, start, cursor, entry) \
569 for (amdgpu_vm_pt_first_dfs((adev), (vm), (start), &(cursor)), \
570 (entry) = (cursor).entry, amdgpu_vm_pt_next_dfs((adev), &(cursor));\
571 amdgpu_vm_pt_continue_dfs((start), (entry)); \
572 (entry) = (cursor).entry, amdgpu_vm_pt_next_dfs((adev), &(cursor)))
575 * amdgpu_vm_get_pd_bo - add the VM PD to a validation list
577 * @vm: vm providing the BOs
578 * @validated: head of validation list
579 * @entry: entry to add
581 * Add the page directory to the list of BOs to
582 * validate for command submission.
584 void amdgpu_vm_get_pd_bo(struct amdgpu_vm
*vm
,
585 struct list_head
*validated
,
586 struct amdgpu_bo_list_entry
*entry
)
589 entry
->tv
.bo
= &vm
->root
.base
.bo
->tbo
;
590 /* Two for VM updates, one for TTM and one for the CS job */
591 entry
->tv
.num_shared
= 4;
592 entry
->user_pages
= NULL
;
593 list_add(&entry
->tv
.head
, validated
);
597 * amdgpu_vm_del_from_lru_notify - update bulk_moveable flag
599 * @bo: BO which was removed from the LRU
601 * Make sure the bulk_moveable flag is updated when a BO is removed from the
604 void amdgpu_vm_del_from_lru_notify(struct ttm_buffer_object
*bo
)
606 struct amdgpu_bo
*abo
;
607 struct amdgpu_vm_bo_base
*bo_base
;
609 if (!amdgpu_bo_is_amdgpu_bo(bo
))
615 abo
= ttm_to_amdgpu_bo(bo
);
618 for (bo_base
= abo
->vm_bo
; bo_base
; bo_base
= bo_base
->next
) {
619 struct amdgpu_vm
*vm
= bo_base
->vm
;
621 if (abo
->tbo
.base
.resv
== vm
->root
.base
.bo
->tbo
.base
.resv
)
622 vm
->bulk_moveable
= false;
627 * amdgpu_vm_move_to_lru_tail - move all BOs to the end of LRU
629 * @adev: amdgpu device pointer
630 * @vm: vm providing the BOs
632 * Move all BOs to the end of LRU and remember their positions to put them
635 void amdgpu_vm_move_to_lru_tail(struct amdgpu_device
*adev
,
636 struct amdgpu_vm
*vm
)
638 struct amdgpu_vm_bo_base
*bo_base
;
640 if (vm
->bulk_moveable
) {
641 spin_lock(&ttm_bo_glob
.lru_lock
);
642 ttm_bo_bulk_move_lru_tail(&vm
->lru_bulk_move
);
643 spin_unlock(&ttm_bo_glob
.lru_lock
);
647 memset(&vm
->lru_bulk_move
, 0, sizeof(vm
->lru_bulk_move
));
649 spin_lock(&ttm_bo_glob
.lru_lock
);
650 list_for_each_entry(bo_base
, &vm
->idle
, vm_status
) {
651 struct amdgpu_bo
*bo
= bo_base
->bo
;
656 ttm_bo_move_to_lru_tail(&bo
->tbo
, &vm
->lru_bulk_move
);
658 ttm_bo_move_to_lru_tail(&bo
->shadow
->tbo
,
661 spin_unlock(&ttm_bo_glob
.lru_lock
);
663 vm
->bulk_moveable
= true;
667 * amdgpu_vm_validate_pt_bos - validate the page table BOs
669 * @adev: amdgpu device pointer
670 * @vm: vm providing the BOs
671 * @validate: callback to do the validation
672 * @param: parameter for the validation callback
674 * Validate the page table BOs on command submission if neccessary.
679 int amdgpu_vm_validate_pt_bos(struct amdgpu_device
*adev
, struct amdgpu_vm
*vm
,
680 int (*validate
)(void *p
, struct amdgpu_bo
*bo
),
683 struct amdgpu_vm_bo_base
*bo_base
, *tmp
;
686 vm
->bulk_moveable
&= list_empty(&vm
->evicted
);
688 list_for_each_entry_safe(bo_base
, tmp
, &vm
->evicted
, vm_status
) {
689 struct amdgpu_bo
*bo
= bo_base
->bo
;
691 r
= validate(param
, bo
);
695 if (bo
->tbo
.type
!= ttm_bo_type_kernel
) {
696 amdgpu_vm_bo_moved(bo_base
);
698 vm
->update_funcs
->map_table(bo
);
699 amdgpu_vm_bo_relocated(bo_base
);
703 amdgpu_vm_eviction_lock(vm
);
704 vm
->evicting
= false;
705 amdgpu_vm_eviction_unlock(vm
);
711 * amdgpu_vm_ready - check VM is ready for updates
715 * Check if all VM PDs/PTs are ready for updates
718 * True if eviction list is empty.
720 bool amdgpu_vm_ready(struct amdgpu_vm
*vm
)
722 return list_empty(&vm
->evicted
);
726 * amdgpu_vm_clear_bo - initially clear the PDs/PTs
728 * @adev: amdgpu_device pointer
729 * @vm: VM to clear BO from
731 * @immediate: use an immediate update
733 * Root PD needs to be reserved when calling this.
736 * 0 on success, errno otherwise.
738 static int amdgpu_vm_clear_bo(struct amdgpu_device
*adev
,
739 struct amdgpu_vm
*vm
,
740 struct amdgpu_bo
*bo
,
743 struct ttm_operation_ctx ctx
= { true, false };
744 unsigned level
= adev
->vm_manager
.root_level
;
745 struct amdgpu_vm_update_params params
;
746 struct amdgpu_bo
*ancestor
= bo
;
747 unsigned entries
, ats_entries
;
751 /* Figure out our place in the hierarchy */
752 if (ancestor
->parent
) {
754 while (ancestor
->parent
->parent
) {
756 ancestor
= ancestor
->parent
;
760 entries
= amdgpu_bo_size(bo
) / 8;
761 if (!vm
->pte_support_ats
) {
764 } else if (!bo
->parent
) {
765 ats_entries
= amdgpu_vm_num_ats_entries(adev
);
766 ats_entries
= min(ats_entries
, entries
);
767 entries
-= ats_entries
;
770 struct amdgpu_vm_pt
*pt
;
772 pt
= container_of(ancestor
->vm_bo
, struct amdgpu_vm_pt
, base
);
773 ats_entries
= amdgpu_vm_num_ats_entries(adev
);
774 if ((pt
- vm
->root
.entries
) >= ats_entries
) {
777 ats_entries
= entries
;
782 r
= ttm_bo_validate(&bo
->tbo
, &bo
->placement
, &ctx
);
787 r
= ttm_bo_validate(&bo
->shadow
->tbo
, &bo
->shadow
->placement
,
793 r
= vm
->update_funcs
->map_table(bo
);
797 memset(¶ms
, 0, sizeof(params
));
800 params
.immediate
= immediate
;
802 r
= vm
->update_funcs
->prepare(¶ms
, NULL
, AMDGPU_SYNC_EXPLICIT
);
808 uint64_t value
= 0, flags
;
810 flags
= AMDGPU_PTE_DEFAULT_ATC
;
811 if (level
!= AMDGPU_VM_PTB
) {
812 /* Handle leaf PDEs as PTEs */
813 flags
|= AMDGPU_PDE_PTE
;
814 amdgpu_gmc_get_vm_pde(adev
, level
, &value
, &flags
);
817 r
= vm
->update_funcs
->update(¶ms
, bo
, addr
, 0, ats_entries
,
822 addr
+= ats_entries
* 8;
826 uint64_t value
= 0, flags
= 0;
828 if (adev
->asic_type
>= CHIP_VEGA10
) {
829 if (level
!= AMDGPU_VM_PTB
) {
830 /* Handle leaf PDEs as PTEs */
831 flags
|= AMDGPU_PDE_PTE
;
832 amdgpu_gmc_get_vm_pde(adev
, level
,
835 /* Workaround for fault priority problem on GMC9 */
836 flags
= AMDGPU_PTE_EXECUTABLE
;
840 r
= vm
->update_funcs
->update(¶ms
, bo
, addr
, 0, entries
,
846 return vm
->update_funcs
->commit(¶ms
, NULL
);
850 * amdgpu_vm_bo_param - fill in parameters for PD/PT allocation
852 * @adev: amdgpu_device pointer
854 * @level: the page table level
855 * @immediate: use a immediate update
856 * @bp: resulting BO allocation parameters
858 static void amdgpu_vm_bo_param(struct amdgpu_device
*adev
, struct amdgpu_vm
*vm
,
859 int level
, bool immediate
,
860 struct amdgpu_bo_param
*bp
)
862 memset(bp
, 0, sizeof(*bp
));
864 bp
->size
= amdgpu_vm_bo_size(adev
, level
);
865 bp
->byte_align
= AMDGPU_GPU_PAGE_SIZE
;
866 bp
->domain
= AMDGPU_GEM_DOMAIN_VRAM
;
867 bp
->domain
= amdgpu_bo_get_preferred_pin_domain(adev
, bp
->domain
);
868 bp
->flags
= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS
|
869 AMDGPU_GEM_CREATE_CPU_GTT_USWC
;
870 if (vm
->use_cpu_for_update
)
871 bp
->flags
|= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED
;
872 else if (!vm
->root
.base
.bo
|| vm
->root
.base
.bo
->shadow
)
873 bp
->flags
|= AMDGPU_GEM_CREATE_SHADOW
;
874 bp
->type
= ttm_bo_type_kernel
;
875 bp
->no_wait_gpu
= immediate
;
876 if (vm
->root
.base
.bo
)
877 bp
->resv
= vm
->root
.base
.bo
->tbo
.base
.resv
;
881 * amdgpu_vm_alloc_pts - Allocate a specific page table
883 * @adev: amdgpu_device pointer
884 * @vm: VM to allocate page tables for
885 * @cursor: Which page table to allocate
886 * @immediate: use an immediate update
888 * Make sure a specific page table or directory is allocated.
891 * 1 if page table needed to be allocated, 0 if page table was already
892 * allocated, negative errno if an error occurred.
894 static int amdgpu_vm_alloc_pts(struct amdgpu_device
*adev
,
895 struct amdgpu_vm
*vm
,
896 struct amdgpu_vm_pt_cursor
*cursor
,
899 struct amdgpu_vm_pt
*entry
= cursor
->entry
;
900 struct amdgpu_bo_param bp
;
901 struct amdgpu_bo
*pt
;
904 if (cursor
->level
< AMDGPU_VM_PTB
&& !entry
->entries
) {
905 unsigned num_entries
;
907 num_entries
= amdgpu_vm_num_entries(adev
, cursor
->level
);
908 entry
->entries
= kvmalloc_array(num_entries
,
909 sizeof(*entry
->entries
),
910 GFP_KERNEL
| __GFP_ZERO
);
918 amdgpu_vm_bo_param(adev
, vm
, cursor
->level
, immediate
, &bp
);
920 r
= amdgpu_bo_create(adev
, &bp
, &pt
);
924 /* Keep a reference to the root directory to avoid
925 * freeing them up in the wrong order.
927 pt
->parent
= amdgpu_bo_ref(cursor
->parent
->base
.bo
);
928 amdgpu_vm_bo_base_init(&entry
->base
, vm
, pt
);
930 r
= amdgpu_vm_clear_bo(adev
, vm
, pt
, immediate
);
937 amdgpu_bo_unref(&pt
->shadow
);
938 amdgpu_bo_unref(&pt
);
943 * amdgpu_vm_free_table - fre one PD/PT
945 * @entry: PDE to free
947 static void amdgpu_vm_free_table(struct amdgpu_vm_pt
*entry
)
949 if (entry
->base
.bo
) {
950 entry
->base
.bo
->vm_bo
= NULL
;
951 list_del(&entry
->base
.vm_status
);
952 amdgpu_bo_unref(&entry
->base
.bo
->shadow
);
953 amdgpu_bo_unref(&entry
->base
.bo
);
955 kvfree(entry
->entries
);
956 entry
->entries
= NULL
;
960 * amdgpu_vm_free_pts - free PD/PT levels
962 * @adev: amdgpu device structure
963 * @vm: amdgpu vm structure
964 * @start: optional cursor where to start freeing PDs/PTs
966 * Free the page directory or page table level and all sub levels.
968 static void amdgpu_vm_free_pts(struct amdgpu_device
*adev
,
969 struct amdgpu_vm
*vm
,
970 struct amdgpu_vm_pt_cursor
*start
)
972 struct amdgpu_vm_pt_cursor cursor
;
973 struct amdgpu_vm_pt
*entry
;
975 vm
->bulk_moveable
= false;
977 for_each_amdgpu_vm_pt_dfs_safe(adev
, vm
, start
, cursor
, entry
)
978 amdgpu_vm_free_table(entry
);
981 amdgpu_vm_free_table(start
->entry
);
985 * amdgpu_vm_check_compute_bug - check whether asic has compute vm bug
987 * @adev: amdgpu_device pointer
989 void amdgpu_vm_check_compute_bug(struct amdgpu_device
*adev
)
991 const struct amdgpu_ip_block
*ip_block
;
992 bool has_compute_vm_bug
;
993 struct amdgpu_ring
*ring
;
996 has_compute_vm_bug
= false;
998 ip_block
= amdgpu_device_ip_get_ip_block(adev
, AMD_IP_BLOCK_TYPE_GFX
);
1000 /* Compute has a VM bug for GFX version < 7.
1001 Compute has a VM bug for GFX 8 MEC firmware version < 673.*/
1002 if (ip_block
->version
->major
<= 7)
1003 has_compute_vm_bug
= true;
1004 else if (ip_block
->version
->major
== 8)
1005 if (adev
->gfx
.mec_fw_version
< 673)
1006 has_compute_vm_bug
= true;
1009 for (i
= 0; i
< adev
->num_rings
; i
++) {
1010 ring
= adev
->rings
[i
];
1011 if (ring
->funcs
->type
== AMDGPU_RING_TYPE_COMPUTE
)
1012 /* only compute rings */
1013 ring
->has_compute_vm_bug
= has_compute_vm_bug
;
1015 ring
->has_compute_vm_bug
= false;
1020 * amdgpu_vm_need_pipeline_sync - Check if pipe sync is needed for job.
1022 * @ring: ring on which the job will be submitted
1023 * @job: job to submit
1026 * True if sync is needed.
1028 bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring
*ring
,
1029 struct amdgpu_job
*job
)
1031 struct amdgpu_device
*adev
= ring
->adev
;
1032 unsigned vmhub
= ring
->funcs
->vmhub
;
1033 struct amdgpu_vmid_mgr
*id_mgr
= &adev
->vm_manager
.id_mgr
[vmhub
];
1034 struct amdgpu_vmid
*id
;
1035 bool gds_switch_needed
;
1036 bool vm_flush_needed
= job
->vm_needs_flush
|| ring
->has_compute_vm_bug
;
1040 id
= &id_mgr
->ids
[job
->vmid
];
1041 gds_switch_needed
= ring
->funcs
->emit_gds_switch
&& (
1042 id
->gds_base
!= job
->gds_base
||
1043 id
->gds_size
!= job
->gds_size
||
1044 id
->gws_base
!= job
->gws_base
||
1045 id
->gws_size
!= job
->gws_size
||
1046 id
->oa_base
!= job
->oa_base
||
1047 id
->oa_size
!= job
->oa_size
);
1049 if (amdgpu_vmid_had_gpu_reset(adev
, id
))
1052 return vm_flush_needed
|| gds_switch_needed
;
1056 * amdgpu_vm_flush - hardware flush the vm
1058 * @ring: ring to use for flush
1060 * @need_pipe_sync: is pipe sync needed
1062 * Emit a VM flush when it is necessary.
1065 * 0 on success, errno otherwise.
1067 int amdgpu_vm_flush(struct amdgpu_ring
*ring
, struct amdgpu_job
*job
,
1068 bool need_pipe_sync
)
1070 struct amdgpu_device
*adev
= ring
->adev
;
1071 unsigned vmhub
= ring
->funcs
->vmhub
;
1072 struct amdgpu_vmid_mgr
*id_mgr
= &adev
->vm_manager
.id_mgr
[vmhub
];
1073 struct amdgpu_vmid
*id
= &id_mgr
->ids
[job
->vmid
];
1074 bool gds_switch_needed
= ring
->funcs
->emit_gds_switch
&& (
1075 id
->gds_base
!= job
->gds_base
||
1076 id
->gds_size
!= job
->gds_size
||
1077 id
->gws_base
!= job
->gws_base
||
1078 id
->gws_size
!= job
->gws_size
||
1079 id
->oa_base
!= job
->oa_base
||
1080 id
->oa_size
!= job
->oa_size
);
1081 bool vm_flush_needed
= job
->vm_needs_flush
;
1082 struct dma_fence
*fence
= NULL
;
1083 bool pasid_mapping_needed
= false;
1084 unsigned patch_offset
= 0;
1085 bool update_spm_vmid_needed
= (job
->vm
&& (job
->vm
->reserved_vmid
[vmhub
] != NULL
));
1088 if (update_spm_vmid_needed
&& adev
->gfx
.rlc
.funcs
->update_spm_vmid
)
1089 adev
->gfx
.rlc
.funcs
->update_spm_vmid(adev
, job
->vmid
);
1091 if (amdgpu_vmid_had_gpu_reset(adev
, id
)) {
1092 gds_switch_needed
= true;
1093 vm_flush_needed
= true;
1094 pasid_mapping_needed
= true;
1097 mutex_lock(&id_mgr
->lock
);
1098 if (id
->pasid
!= job
->pasid
|| !id
->pasid_mapping
||
1099 !dma_fence_is_signaled(id
->pasid_mapping
))
1100 pasid_mapping_needed
= true;
1101 mutex_unlock(&id_mgr
->lock
);
1103 gds_switch_needed
&= !!ring
->funcs
->emit_gds_switch
;
1104 vm_flush_needed
&= !!ring
->funcs
->emit_vm_flush
&&
1105 job
->vm_pd_addr
!= AMDGPU_BO_INVALID_OFFSET
;
1106 pasid_mapping_needed
&= adev
->gmc
.gmc_funcs
->emit_pasid_mapping
&&
1107 ring
->funcs
->emit_wreg
;
1109 if (!vm_flush_needed
&& !gds_switch_needed
&& !need_pipe_sync
)
1112 if (ring
->funcs
->init_cond_exec
)
1113 patch_offset
= amdgpu_ring_init_cond_exec(ring
);
1116 amdgpu_ring_emit_pipeline_sync(ring
);
1118 if (vm_flush_needed
) {
1119 trace_amdgpu_vm_flush(ring
, job
->vmid
, job
->vm_pd_addr
);
1120 amdgpu_ring_emit_vm_flush(ring
, job
->vmid
, job
->vm_pd_addr
);
1123 if (pasid_mapping_needed
)
1124 amdgpu_gmc_emit_pasid_mapping(ring
, job
->vmid
, job
->pasid
);
1126 if (vm_flush_needed
|| pasid_mapping_needed
) {
1127 r
= amdgpu_fence_emit(ring
, &fence
, 0);
1132 if (vm_flush_needed
) {
1133 mutex_lock(&id_mgr
->lock
);
1134 dma_fence_put(id
->last_flush
);
1135 id
->last_flush
= dma_fence_get(fence
);
1136 id
->current_gpu_reset_count
=
1137 atomic_read(&adev
->gpu_reset_counter
);
1138 mutex_unlock(&id_mgr
->lock
);
1141 if (pasid_mapping_needed
) {
1142 mutex_lock(&id_mgr
->lock
);
1143 id
->pasid
= job
->pasid
;
1144 dma_fence_put(id
->pasid_mapping
);
1145 id
->pasid_mapping
= dma_fence_get(fence
);
1146 mutex_unlock(&id_mgr
->lock
);
1148 dma_fence_put(fence
);
1150 if (ring
->funcs
->emit_gds_switch
&& gds_switch_needed
) {
1151 id
->gds_base
= job
->gds_base
;
1152 id
->gds_size
= job
->gds_size
;
1153 id
->gws_base
= job
->gws_base
;
1154 id
->gws_size
= job
->gws_size
;
1155 id
->oa_base
= job
->oa_base
;
1156 id
->oa_size
= job
->oa_size
;
1157 amdgpu_ring_emit_gds_switch(ring
, job
->vmid
, job
->gds_base
,
1158 job
->gds_size
, job
->gws_base
,
1159 job
->gws_size
, job
->oa_base
,
1163 if (ring
->funcs
->patch_cond_exec
)
1164 amdgpu_ring_patch_cond_exec(ring
, patch_offset
);
1166 /* the double SWITCH_BUFFER here *cannot* be skipped by COND_EXEC */
1167 if (ring
->funcs
->emit_switch_buffer
) {
1168 amdgpu_ring_emit_switch_buffer(ring
);
1169 amdgpu_ring_emit_switch_buffer(ring
);
1175 * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo
1178 * @bo: requested buffer object
1180 * Find @bo inside the requested vm.
1181 * Search inside the @bos vm list for the requested vm
1182 * Returns the found bo_va or NULL if none is found
1184 * Object has to be reserved!
1187 * Found bo_va or NULL.
1189 struct amdgpu_bo_va
*amdgpu_vm_bo_find(struct amdgpu_vm
*vm
,
1190 struct amdgpu_bo
*bo
)
1192 struct amdgpu_vm_bo_base
*base
;
1194 for (base
= bo
->vm_bo
; base
; base
= base
->next
) {
1198 return container_of(base
, struct amdgpu_bo_va
, base
);
1204 * amdgpu_vm_map_gart - Resolve gart mapping of addr
1206 * @pages_addr: optional DMA address to use for lookup
1207 * @addr: the unmapped addr
1209 * Look up the physical address of the page that the pte resolves
1213 * The pointer for the page table entry.
1215 uint64_t amdgpu_vm_map_gart(const dma_addr_t
*pages_addr
, uint64_t addr
)
1219 /* page table offset */
1220 result
= pages_addr
[addr
>> PAGE_SHIFT
];
1222 /* in case cpu page size != gpu page size*/
1223 result
|= addr
& (~PAGE_MASK
);
1225 result
&= 0xFFFFFFFFFFFFF000ULL
;
1231 * amdgpu_vm_update_pde - update a single level in the hierarchy
1233 * @params: parameters for the update
1235 * @entry: entry to update
1237 * Makes sure the requested entry in parent is up to date.
1239 static int amdgpu_vm_update_pde(struct amdgpu_vm_update_params
*params
,
1240 struct amdgpu_vm
*vm
,
1241 struct amdgpu_vm_pt
*entry
)
1243 struct amdgpu_vm_pt
*parent
= amdgpu_vm_pt_parent(entry
);
1244 struct amdgpu_bo
*bo
= parent
->base
.bo
, *pbo
;
1245 uint64_t pde
, pt
, flags
;
1248 for (level
= 0, pbo
= bo
->parent
; pbo
; ++level
)
1251 level
+= params
->adev
->vm_manager
.root_level
;
1252 amdgpu_gmc_get_pde_for_bo(entry
->base
.bo
, level
, &pt
, &flags
);
1253 pde
= (entry
- parent
->entries
) * 8;
1254 return vm
->update_funcs
->update(params
, bo
, pde
, pt
, 1, 0, flags
);
1258 * amdgpu_vm_invalidate_pds - mark all PDs as invalid
1260 * @adev: amdgpu_device pointer
1263 * Mark all PD level as invalid after an error.
1265 static void amdgpu_vm_invalidate_pds(struct amdgpu_device
*adev
,
1266 struct amdgpu_vm
*vm
)
1268 struct amdgpu_vm_pt_cursor cursor
;
1269 struct amdgpu_vm_pt
*entry
;
1271 for_each_amdgpu_vm_pt_dfs_safe(adev
, vm
, NULL
, cursor
, entry
)
1272 if (entry
->base
.bo
&& !entry
->base
.moved
)
1273 amdgpu_vm_bo_relocated(&entry
->base
);
1277 * amdgpu_vm_update_pdes - make sure that all directories are valid
1279 * @adev: amdgpu_device pointer
1281 * @immediate: submit immediately to the paging queue
1283 * Makes sure all directories are up to date.
1286 * 0 for success, error for failure.
1288 int amdgpu_vm_update_pdes(struct amdgpu_device
*adev
,
1289 struct amdgpu_vm
*vm
, bool immediate
)
1291 struct amdgpu_vm_update_params params
;
1294 if (list_empty(&vm
->relocated
))
1297 memset(¶ms
, 0, sizeof(params
));
1300 params
.immediate
= immediate
;
1302 r
= vm
->update_funcs
->prepare(¶ms
, NULL
, AMDGPU_SYNC_EXPLICIT
);
1306 while (!list_empty(&vm
->relocated
)) {
1307 struct amdgpu_vm_pt
*entry
;
1309 entry
= list_first_entry(&vm
->relocated
, struct amdgpu_vm_pt
,
1311 amdgpu_vm_bo_idle(&entry
->base
);
1313 r
= amdgpu_vm_update_pde(¶ms
, vm
, entry
);
1318 r
= vm
->update_funcs
->commit(¶ms
, &vm
->last_update
);
1324 amdgpu_vm_invalidate_pds(adev
, vm
);
1329 * amdgpu_vm_update_flags - figure out flags for PTE updates
1331 * Make sure to set the right flags for the PTEs at the desired level.
1333 static void amdgpu_vm_update_flags(struct amdgpu_vm_update_params
*params
,
1334 struct amdgpu_bo
*bo
, unsigned level
,
1335 uint64_t pe
, uint64_t addr
,
1336 unsigned count
, uint32_t incr
,
1340 if (level
!= AMDGPU_VM_PTB
) {
1341 flags
|= AMDGPU_PDE_PTE
;
1342 amdgpu_gmc_get_vm_pde(params
->adev
, level
, &addr
, &flags
);
1344 } else if (params
->adev
->asic_type
>= CHIP_VEGA10
&&
1345 !(flags
& AMDGPU_PTE_VALID
) &&
1346 !(flags
& AMDGPU_PTE_PRT
)) {
1348 /* Workaround for fault priority problem on GMC9 */
1349 flags
|= AMDGPU_PTE_EXECUTABLE
;
1352 params
->vm
->update_funcs
->update(params
, bo
, pe
, addr
, count
, incr
,
1357 * amdgpu_vm_fragment - get fragment for PTEs
1359 * @params: see amdgpu_vm_update_params definition
1360 * @start: first PTE to handle
1361 * @end: last PTE to handle
1362 * @flags: hw mapping flags
1363 * @frag: resulting fragment size
1364 * @frag_end: end of this fragment
1366 * Returns the first possible fragment for the start and end address.
1368 static void amdgpu_vm_fragment(struct amdgpu_vm_update_params
*params
,
1369 uint64_t start
, uint64_t end
, uint64_t flags
,
1370 unsigned int *frag
, uint64_t *frag_end
)
1373 * The MC L1 TLB supports variable sized pages, based on a fragment
1374 * field in the PTE. When this field is set to a non-zero value, page
1375 * granularity is increased from 4KB to (1 << (12 + frag)). The PTE
1376 * flags are considered valid for all PTEs within the fragment range
1377 * and corresponding mappings are assumed to be physically contiguous.
1379 * The L1 TLB can store a single PTE for the whole fragment,
1380 * significantly increasing the space available for translation
1381 * caching. This leads to large improvements in throughput when the
1382 * TLB is under pressure.
1384 * The L2 TLB distributes small and large fragments into two
1385 * asymmetric partitions. The large fragment cache is significantly
1386 * larger. Thus, we try to use large fragments wherever possible.
1387 * Userspace can support this by aligning virtual base address and
1388 * allocation size to the fragment size.
1390 * Starting with Vega10 the fragment size only controls the L1. The L2
1391 * is now directly feed with small/huge/giant pages from the walker.
1395 if (params
->adev
->asic_type
< CHIP_VEGA10
)
1396 max_frag
= params
->adev
->vm_manager
.fragment_size
;
1400 /* system pages are non continuously */
1401 if (params
->pages_addr
) {
1407 /* This intentionally wraps around if no bit is set */
1408 *frag
= min((unsigned)ffs(start
) - 1, (unsigned)fls64(end
- start
) - 1);
1409 if (*frag
>= max_frag
) {
1411 *frag_end
= end
& ~((1ULL << max_frag
) - 1);
1413 *frag_end
= start
+ (1 << *frag
);
1418 * amdgpu_vm_update_ptes - make sure that page tables are valid
1420 * @params: see amdgpu_vm_update_params definition
1421 * @start: start of GPU address range
1422 * @end: end of GPU address range
1423 * @dst: destination address to map to, the next dst inside the function
1424 * @flags: mapping flags
1426 * Update the page tables in the range @start - @end.
1429 * 0 for success, -EINVAL for failure.
1431 static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params
*params
,
1432 uint64_t start
, uint64_t end
,
1433 uint64_t dst
, uint64_t flags
)
1435 struct amdgpu_device
*adev
= params
->adev
;
1436 struct amdgpu_vm_pt_cursor cursor
;
1437 uint64_t frag_start
= start
, frag_end
;
1441 /* figure out the initial fragment */
1442 amdgpu_vm_fragment(params
, frag_start
, end
, flags
, &frag
, &frag_end
);
1444 /* walk over the address space and update the PTs */
1445 amdgpu_vm_pt_start(adev
, params
->vm
, start
, &cursor
);
1446 while (cursor
.pfn
< end
) {
1447 unsigned shift
, parent_shift
, mask
;
1448 uint64_t incr
, entry_end
, pe_start
;
1449 struct amdgpu_bo
*pt
;
1451 if (!params
->unlocked
) {
1452 /* make sure that the page tables covering the
1453 * address range are actually allocated
1455 r
= amdgpu_vm_alloc_pts(params
->adev
, params
->vm
,
1456 &cursor
, params
->immediate
);
1461 shift
= amdgpu_vm_level_shift(adev
, cursor
.level
);
1462 parent_shift
= amdgpu_vm_level_shift(adev
, cursor
.level
- 1);
1463 if (params
->unlocked
) {
1464 /* Unlocked updates are only allowed on the leaves */
1465 if (amdgpu_vm_pt_descendant(adev
, &cursor
))
1467 } else if (adev
->asic_type
< CHIP_VEGA10
&&
1468 (flags
& AMDGPU_PTE_VALID
)) {
1469 /* No huge page support before GMC v9 */
1470 if (cursor
.level
!= AMDGPU_VM_PTB
) {
1471 if (!amdgpu_vm_pt_descendant(adev
, &cursor
))
1475 } else if (frag
< shift
) {
1476 /* We can't use this level when the fragment size is
1477 * smaller than the address shift. Go to the next
1478 * child entry and try again.
1480 if (amdgpu_vm_pt_descendant(adev
, &cursor
))
1482 } else if (frag
>= parent_shift
) {
1483 /* If the fragment size is even larger than the parent
1484 * shift we should go up one level and check it again.
1486 if (!amdgpu_vm_pt_ancestor(&cursor
))
1491 pt
= cursor
.entry
->base
.bo
;
1493 /* We need all PDs and PTs for mapping something, */
1494 if (flags
& AMDGPU_PTE_VALID
)
1497 /* but unmapping something can happen at a higher
1500 if (!amdgpu_vm_pt_ancestor(&cursor
))
1503 pt
= cursor
.entry
->base
.bo
;
1504 shift
= parent_shift
;
1505 frag_end
= max(frag_end
, ALIGN(frag_start
+ 1,
1509 /* Looks good so far, calculate parameters for the update */
1510 incr
= (uint64_t)AMDGPU_GPU_PAGE_SIZE
<< shift
;
1511 mask
= amdgpu_vm_entries_mask(adev
, cursor
.level
);
1512 pe_start
= ((cursor
.pfn
>> shift
) & mask
) * 8;
1513 entry_end
= ((uint64_t)mask
+ 1) << shift
;
1514 entry_end
+= cursor
.pfn
& ~(entry_end
- 1);
1515 entry_end
= min(entry_end
, end
);
1518 struct amdgpu_vm
*vm
= params
->vm
;
1519 uint64_t upd_end
= min(entry_end
, frag_end
);
1520 unsigned nptes
= (upd_end
- frag_start
) >> shift
;
1521 uint64_t upd_flags
= flags
| AMDGPU_PTE_FRAG(frag
);
1523 /* This can happen when we set higher level PDs to
1524 * silent to stop fault floods.
1526 nptes
= max(nptes
, 1u);
1528 trace_amdgpu_vm_update_ptes(params
, frag_start
, upd_end
,
1529 nptes
, dst
, incr
, upd_flags
,
1531 vm
->immediate
.fence_context
);
1532 amdgpu_vm_update_flags(params
, pt
, cursor
.level
,
1533 pe_start
, dst
, nptes
, incr
,
1536 pe_start
+= nptes
* 8;
1537 dst
+= nptes
* incr
;
1539 frag_start
= upd_end
;
1540 if (frag_start
>= frag_end
) {
1541 /* figure out the next fragment */
1542 amdgpu_vm_fragment(params
, frag_start
, end
,
1543 flags
, &frag
, &frag_end
);
1547 } while (frag_start
< entry_end
);
1549 if (amdgpu_vm_pt_descendant(adev
, &cursor
)) {
1550 /* Free all child entries.
1551 * Update the tables with the flags and addresses and free up subsequent
1552 * tables in the case of huge pages or freed up areas.
1553 * This is the maximum you can free, because all other page tables are not
1554 * completely covered by the range and so potentially still in use.
1556 while (cursor
.pfn
< frag_start
) {
1557 amdgpu_vm_free_pts(adev
, params
->vm
, &cursor
);
1558 amdgpu_vm_pt_next(adev
, &cursor
);
1561 } else if (frag
>= shift
) {
1562 /* or just move on to the next on the same level. */
1563 amdgpu_vm_pt_next(adev
, &cursor
);
1571 * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table
1573 * @adev: amdgpu_device pointer of the VM
1574 * @bo_adev: amdgpu_device pointer of the mapped BO
1576 * @immediate: immediate submission in a page fault
1577 * @unlocked: unlocked invalidation during MM callback
1578 * @resv: fences we need to sync to
1579 * @start: start of mapped range
1580 * @last: last mapped entry
1581 * @flags: flags for the entries
1582 * @offset: offset into nodes and pages_addr
1583 * @nodes: array of drm_mm_nodes with the MC addresses
1584 * @pages_addr: DMA addresses to use for mapping
1585 * @fence: optional resulting fence
1587 * Fill in the page table entries between @start and @last.
1590 * 0 for success, -EINVAL for failure.
1592 static int amdgpu_vm_bo_update_mapping(struct amdgpu_device
*adev
,
1593 struct amdgpu_device
*bo_adev
,
1594 struct amdgpu_vm
*vm
, bool immediate
,
1595 bool unlocked
, struct dma_resv
*resv
,
1596 uint64_t start
, uint64_t last
,
1597 uint64_t flags
, uint64_t offset
,
1598 struct drm_mm_node
*nodes
,
1599 dma_addr_t
*pages_addr
,
1600 struct dma_fence
**fence
)
1602 struct amdgpu_vm_update_params params
;
1603 enum amdgpu_sync_mode sync_mode
;
1607 memset(¶ms
, 0, sizeof(params
));
1610 params
.immediate
= immediate
;
1611 params
.pages_addr
= pages_addr
;
1612 params
.unlocked
= unlocked
;
1614 /* Implicitly sync to command submissions in the same VM before
1615 * unmapping. Sync to moving fences before mapping.
1617 if (!(flags
& AMDGPU_PTE_VALID
))
1618 sync_mode
= AMDGPU_SYNC_EQ_OWNER
;
1620 sync_mode
= AMDGPU_SYNC_EXPLICIT
;
1622 pfn
= offset
>> PAGE_SHIFT
;
1624 while (pfn
>= nodes
->size
) {
1630 amdgpu_vm_eviction_lock(vm
);
1636 if (!unlocked
&& !dma_fence_is_signaled(vm
->last_unlocked
)) {
1637 struct dma_fence
*tmp
= dma_fence_get_stub();
1639 amdgpu_bo_fence(vm
->root
.base
.bo
, vm
->last_unlocked
, true);
1640 swap(vm
->last_unlocked
, tmp
);
1644 r
= vm
->update_funcs
->prepare(¶ms
, resv
, sync_mode
);
1649 uint64_t tmp
, num_entries
, addr
;
1652 num_entries
= last
- start
+ 1;
1654 addr
= nodes
->start
<< PAGE_SHIFT
;
1655 num_entries
= min((nodes
->size
- pfn
) *
1656 AMDGPU_GPU_PAGES_IN_CPU_PAGE
, num_entries
);
1662 bool contiguous
= true;
1664 if (num_entries
> AMDGPU_GPU_PAGES_IN_CPU_PAGE
) {
1667 contiguous
= pages_addr
[pfn
+ 1] ==
1668 pages_addr
[pfn
] + PAGE_SIZE
;
1671 AMDGPU_GPU_PAGES_IN_CPU_PAGE
;
1672 for (count
= 2; count
< tmp
; ++count
) {
1673 uint64_t idx
= pfn
+ count
;
1675 if (contiguous
!= (pages_addr
[idx
] ==
1676 pages_addr
[idx
- 1] + PAGE_SIZE
))
1679 num_entries
= count
*
1680 AMDGPU_GPU_PAGES_IN_CPU_PAGE
;
1684 addr
= pfn
<< PAGE_SHIFT
;
1685 params
.pages_addr
= pages_addr
;
1687 addr
= pages_addr
[pfn
];
1688 params
.pages_addr
= NULL
;
1691 } else if (flags
& (AMDGPU_PTE_VALID
| AMDGPU_PTE_PRT
)) {
1692 addr
+= bo_adev
->vm_manager
.vram_base_offset
;
1693 addr
+= pfn
<< PAGE_SHIFT
;
1696 tmp
= start
+ num_entries
;
1697 r
= amdgpu_vm_update_ptes(¶ms
, start
, tmp
, addr
, flags
);
1701 pfn
+= num_entries
/ AMDGPU_GPU_PAGES_IN_CPU_PAGE
;
1702 if (nodes
&& nodes
->size
== pfn
) {
1708 } while (unlikely(start
!= last
+ 1));
1710 r
= vm
->update_funcs
->commit(¶ms
, fence
);
1713 amdgpu_vm_eviction_unlock(vm
);
1718 * amdgpu_vm_bo_update - update all BO mappings in the vm page table
1720 * @adev: amdgpu_device pointer
1721 * @bo_va: requested BO and VM object
1722 * @clear: if true clear the entries
1724 * Fill in the page table entries for @bo_va.
1727 * 0 for success, -EINVAL for failure.
1729 int amdgpu_vm_bo_update(struct amdgpu_device
*adev
, struct amdgpu_bo_va
*bo_va
,
1732 struct amdgpu_bo
*bo
= bo_va
->base
.bo
;
1733 struct amdgpu_vm
*vm
= bo_va
->base
.vm
;
1734 struct amdgpu_bo_va_mapping
*mapping
;
1735 dma_addr_t
*pages_addr
= NULL
;
1736 struct ttm_resource
*mem
;
1737 struct drm_mm_node
*nodes
;
1738 struct dma_fence
**last_update
;
1739 struct dma_resv
*resv
;
1741 struct amdgpu_device
*bo_adev
= adev
;
1747 resv
= vm
->root
.base
.bo
->tbo
.base
.resv
;
1749 struct drm_gem_object
*obj
= &bo
->tbo
.base
;
1751 resv
= bo
->tbo
.base
.resv
;
1752 if (obj
->import_attach
&& bo_va
->is_xgmi
) {
1753 struct dma_buf
*dma_buf
= obj
->import_attach
->dmabuf
;
1754 struct drm_gem_object
*gobj
= dma_buf
->priv
;
1755 struct amdgpu_bo
*abo
= gem_to_amdgpu_bo(gobj
);
1757 if (abo
->tbo
.mem
.mem_type
== TTM_PL_VRAM
)
1758 bo
= gem_to_amdgpu_bo(gobj
);
1761 nodes
= mem
->mm_node
;
1762 if (mem
->mem_type
== TTM_PL_TT
)
1763 pages_addr
= bo
->tbo
.ttm
->dma_address
;
1767 flags
= amdgpu_ttm_tt_pte_flags(adev
, bo
->tbo
.ttm
, mem
);
1769 if (amdgpu_bo_encrypted(bo
))
1770 flags
|= AMDGPU_PTE_TMZ
;
1772 bo_adev
= amdgpu_ttm_adev(bo
->tbo
.bdev
);
1777 if (clear
|| (bo
&& bo
->tbo
.base
.resv
==
1778 vm
->root
.base
.bo
->tbo
.base
.resv
))
1779 last_update
= &vm
->last_update
;
1781 last_update
= &bo_va
->last_pt_update
;
1783 if (!clear
&& bo_va
->base
.moved
) {
1784 bo_va
->base
.moved
= false;
1785 list_splice_init(&bo_va
->valids
, &bo_va
->invalids
);
1787 } else if (bo_va
->cleared
!= clear
) {
1788 list_splice_init(&bo_va
->valids
, &bo_va
->invalids
);
1791 list_for_each_entry(mapping
, &bo_va
->invalids
, list
) {
1792 uint64_t update_flags
= flags
;
1794 /* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
1795 * but in case of something, we filter the flags in first place
1797 if (!(mapping
->flags
& AMDGPU_PTE_READABLE
))
1798 update_flags
&= ~AMDGPU_PTE_READABLE
;
1799 if (!(mapping
->flags
& AMDGPU_PTE_WRITEABLE
))
1800 update_flags
&= ~AMDGPU_PTE_WRITEABLE
;
1802 /* Apply ASIC specific mapping flags */
1803 amdgpu_gmc_get_vm_pte(adev
, mapping
, &update_flags
);
1805 trace_amdgpu_vm_bo_update(mapping
);
1807 r
= amdgpu_vm_bo_update_mapping(adev
, bo_adev
, vm
, false, false,
1808 resv
, mapping
->start
,
1809 mapping
->last
, update_flags
,
1810 mapping
->offset
, nodes
,
1811 pages_addr
, last_update
);
1816 /* If the BO is not in its preferred location add it back to
1817 * the evicted list so that it gets validated again on the
1818 * next command submission.
1820 if (bo
&& bo
->tbo
.base
.resv
== vm
->root
.base
.bo
->tbo
.base
.resv
) {
1821 uint32_t mem_type
= bo
->tbo
.mem
.mem_type
;
1823 if (!(bo
->preferred_domains
&
1824 amdgpu_mem_type_to_domain(mem_type
)))
1825 amdgpu_vm_bo_evicted(&bo_va
->base
);
1827 amdgpu_vm_bo_idle(&bo_va
->base
);
1829 amdgpu_vm_bo_done(&bo_va
->base
);
1832 list_splice_init(&bo_va
->invalids
, &bo_va
->valids
);
1833 bo_va
->cleared
= clear
;
1835 if (trace_amdgpu_vm_bo_mapping_enabled()) {
1836 list_for_each_entry(mapping
, &bo_va
->valids
, list
)
1837 trace_amdgpu_vm_bo_mapping(mapping
);
1844 * amdgpu_vm_update_prt_state - update the global PRT state
1846 * @adev: amdgpu_device pointer
1848 static void amdgpu_vm_update_prt_state(struct amdgpu_device
*adev
)
1850 unsigned long flags
;
1853 spin_lock_irqsave(&adev
->vm_manager
.prt_lock
, flags
);
1854 enable
= !!atomic_read(&adev
->vm_manager
.num_prt_users
);
1855 adev
->gmc
.gmc_funcs
->set_prt(adev
, enable
);
1856 spin_unlock_irqrestore(&adev
->vm_manager
.prt_lock
, flags
);
1860 * amdgpu_vm_prt_get - add a PRT user
1862 * @adev: amdgpu_device pointer
1864 static void amdgpu_vm_prt_get(struct amdgpu_device
*adev
)
1866 if (!adev
->gmc
.gmc_funcs
->set_prt
)
1869 if (atomic_inc_return(&adev
->vm_manager
.num_prt_users
) == 1)
1870 amdgpu_vm_update_prt_state(adev
);
1874 * amdgpu_vm_prt_put - drop a PRT user
1876 * @adev: amdgpu_device pointer
1878 static void amdgpu_vm_prt_put(struct amdgpu_device
*adev
)
1880 if (atomic_dec_return(&adev
->vm_manager
.num_prt_users
) == 0)
1881 amdgpu_vm_update_prt_state(adev
);
1885 * amdgpu_vm_prt_cb - callback for updating the PRT status
1887 * @fence: fence for the callback
1888 * @_cb: the callback function
1890 static void amdgpu_vm_prt_cb(struct dma_fence
*fence
, struct dma_fence_cb
*_cb
)
1892 struct amdgpu_prt_cb
*cb
= container_of(_cb
, struct amdgpu_prt_cb
, cb
);
1894 amdgpu_vm_prt_put(cb
->adev
);
1899 * amdgpu_vm_add_prt_cb - add callback for updating the PRT status
1901 * @adev: amdgpu_device pointer
1902 * @fence: fence for the callback
1904 static void amdgpu_vm_add_prt_cb(struct amdgpu_device
*adev
,
1905 struct dma_fence
*fence
)
1907 struct amdgpu_prt_cb
*cb
;
1909 if (!adev
->gmc
.gmc_funcs
->set_prt
)
1912 cb
= kmalloc(sizeof(struct amdgpu_prt_cb
), GFP_KERNEL
);
1914 /* Last resort when we are OOM */
1916 dma_fence_wait(fence
, false);
1918 amdgpu_vm_prt_put(adev
);
1921 if (!fence
|| dma_fence_add_callback(fence
, &cb
->cb
,
1923 amdgpu_vm_prt_cb(fence
, &cb
->cb
);
1928 * amdgpu_vm_free_mapping - free a mapping
1930 * @adev: amdgpu_device pointer
1932 * @mapping: mapping to be freed
1933 * @fence: fence of the unmap operation
1935 * Free a mapping and make sure we decrease the PRT usage count if applicable.
1937 static void amdgpu_vm_free_mapping(struct amdgpu_device
*adev
,
1938 struct amdgpu_vm
*vm
,
1939 struct amdgpu_bo_va_mapping
*mapping
,
1940 struct dma_fence
*fence
)
1942 if (mapping
->flags
& AMDGPU_PTE_PRT
)
1943 amdgpu_vm_add_prt_cb(adev
, fence
);
1948 * amdgpu_vm_prt_fini - finish all prt mappings
1950 * @adev: amdgpu_device pointer
1953 * Register a cleanup callback to disable PRT support after VM dies.
1955 static void amdgpu_vm_prt_fini(struct amdgpu_device
*adev
, struct amdgpu_vm
*vm
)
1957 struct dma_resv
*resv
= vm
->root
.base
.bo
->tbo
.base
.resv
;
1958 struct dma_fence
*excl
, **shared
;
1959 unsigned i
, shared_count
;
1962 r
= dma_resv_get_fences_rcu(resv
, &excl
,
1963 &shared_count
, &shared
);
1965 /* Not enough memory to grab the fence list, as last resort
1966 * block for all the fences to complete.
1968 dma_resv_wait_timeout_rcu(resv
, true, false,
1969 MAX_SCHEDULE_TIMEOUT
);
1973 /* Add a callback for each fence in the reservation object */
1974 amdgpu_vm_prt_get(adev
);
1975 amdgpu_vm_add_prt_cb(adev
, excl
);
1977 for (i
= 0; i
< shared_count
; ++i
) {
1978 amdgpu_vm_prt_get(adev
);
1979 amdgpu_vm_add_prt_cb(adev
, shared
[i
]);
1986 * amdgpu_vm_clear_freed - clear freed BOs in the PT
1988 * @adev: amdgpu_device pointer
1990 * @fence: optional resulting fence (unchanged if no work needed to be done
1991 * or if an error occurred)
1993 * Make sure all freed BOs are cleared in the PT.
1994 * PTs have to be reserved and mutex must be locked!
2000 int amdgpu_vm_clear_freed(struct amdgpu_device
*adev
,
2001 struct amdgpu_vm
*vm
,
2002 struct dma_fence
**fence
)
2004 struct dma_resv
*resv
= vm
->root
.base
.bo
->tbo
.base
.resv
;
2005 struct amdgpu_bo_va_mapping
*mapping
;
2006 uint64_t init_pte_value
= 0;
2007 struct dma_fence
*f
= NULL
;
2010 while (!list_empty(&vm
->freed
)) {
2011 mapping
= list_first_entry(&vm
->freed
,
2012 struct amdgpu_bo_va_mapping
, list
);
2013 list_del(&mapping
->list
);
2015 if (vm
->pte_support_ats
&&
2016 mapping
->start
< AMDGPU_GMC_HOLE_START
)
2017 init_pte_value
= AMDGPU_PTE_DEFAULT_ATC
;
2019 r
= amdgpu_vm_bo_update_mapping(adev
, adev
, vm
, false, false,
2020 resv
, mapping
->start
,
2021 mapping
->last
, init_pte_value
,
2023 amdgpu_vm_free_mapping(adev
, vm
, mapping
, f
);
2031 dma_fence_put(*fence
);
2042 * amdgpu_vm_handle_moved - handle moved BOs in the PT
2044 * @adev: amdgpu_device pointer
2047 * Make sure all BOs which are moved are updated in the PTs.
2052 * PTs have to be reserved!
2054 int amdgpu_vm_handle_moved(struct amdgpu_device
*adev
,
2055 struct amdgpu_vm
*vm
)
2057 struct amdgpu_bo_va
*bo_va
, *tmp
;
2058 struct dma_resv
*resv
;
2062 list_for_each_entry_safe(bo_va
, tmp
, &vm
->moved
, base
.vm_status
) {
2063 /* Per VM BOs never need to bo cleared in the page tables */
2064 r
= amdgpu_vm_bo_update(adev
, bo_va
, false);
2069 spin_lock(&vm
->invalidated_lock
);
2070 while (!list_empty(&vm
->invalidated
)) {
2071 bo_va
= list_first_entry(&vm
->invalidated
, struct amdgpu_bo_va
,
2073 resv
= bo_va
->base
.bo
->tbo
.base
.resv
;
2074 spin_unlock(&vm
->invalidated_lock
);
2076 /* Try to reserve the BO to avoid clearing its ptes */
2077 if (!amdgpu_vm_debug
&& dma_resv_trylock(resv
))
2079 /* Somebody else is using the BO right now */
2083 r
= amdgpu_vm_bo_update(adev
, bo_va
, clear
);
2088 dma_resv_unlock(resv
);
2089 spin_lock(&vm
->invalidated_lock
);
2091 spin_unlock(&vm
->invalidated_lock
);
2097 * amdgpu_vm_bo_add - add a bo to a specific vm
2099 * @adev: amdgpu_device pointer
2101 * @bo: amdgpu buffer object
2103 * Add @bo into the requested vm.
2104 * Add @bo to the list of bos associated with the vm
2107 * Newly added bo_va or NULL for failure
2109 * Object has to be reserved!
2111 struct amdgpu_bo_va
*amdgpu_vm_bo_add(struct amdgpu_device
*adev
,
2112 struct amdgpu_vm
*vm
,
2113 struct amdgpu_bo
*bo
)
2115 struct amdgpu_bo_va
*bo_va
;
2117 bo_va
= kzalloc(sizeof(struct amdgpu_bo_va
), GFP_KERNEL
);
2118 if (bo_va
== NULL
) {
2121 amdgpu_vm_bo_base_init(&bo_va
->base
, vm
, bo
);
2123 bo_va
->ref_count
= 1;
2124 INIT_LIST_HEAD(&bo_va
->valids
);
2125 INIT_LIST_HEAD(&bo_va
->invalids
);
2130 if (amdgpu_dmabuf_is_xgmi_accessible(adev
, bo
)) {
2131 bo_va
->is_xgmi
= true;
2132 /* Power up XGMI if it can be potentially used */
2133 amdgpu_xgmi_set_pstate(adev
, AMDGPU_XGMI_PSTATE_MAX_VEGA20
);
2141 * amdgpu_vm_bo_insert_map - insert a new mapping
2143 * @adev: amdgpu_device pointer
2144 * @bo_va: bo_va to store the address
2145 * @mapping: the mapping to insert
2147 * Insert a new mapping into all structures.
2149 static void amdgpu_vm_bo_insert_map(struct amdgpu_device
*adev
,
2150 struct amdgpu_bo_va
*bo_va
,
2151 struct amdgpu_bo_va_mapping
*mapping
)
2153 struct amdgpu_vm
*vm
= bo_va
->base
.vm
;
2154 struct amdgpu_bo
*bo
= bo_va
->base
.bo
;
2156 mapping
->bo_va
= bo_va
;
2157 list_add(&mapping
->list
, &bo_va
->invalids
);
2158 amdgpu_vm_it_insert(mapping
, &vm
->va
);
2160 if (mapping
->flags
& AMDGPU_PTE_PRT
)
2161 amdgpu_vm_prt_get(adev
);
2163 if (bo
&& bo
->tbo
.base
.resv
== vm
->root
.base
.bo
->tbo
.base
.resv
&&
2164 !bo_va
->base
.moved
) {
2165 list_move(&bo_va
->base
.vm_status
, &vm
->moved
);
2167 trace_amdgpu_vm_bo_map(bo_va
, mapping
);
2171 * amdgpu_vm_bo_map - map bo inside a vm
2173 * @adev: amdgpu_device pointer
2174 * @bo_va: bo_va to store the address
2175 * @saddr: where to map the BO
2176 * @offset: requested offset in the BO
2177 * @size: BO size in bytes
2178 * @flags: attributes of pages (read/write/valid/etc.)
2180 * Add a mapping of the BO at the specefied addr into the VM.
2183 * 0 for success, error for failure.
2185 * Object has to be reserved and unreserved outside!
2187 int amdgpu_vm_bo_map(struct amdgpu_device
*adev
,
2188 struct amdgpu_bo_va
*bo_va
,
2189 uint64_t saddr
, uint64_t offset
,
2190 uint64_t size
, uint64_t flags
)
2192 struct amdgpu_bo_va_mapping
*mapping
, *tmp
;
2193 struct amdgpu_bo
*bo
= bo_va
->base
.bo
;
2194 struct amdgpu_vm
*vm
= bo_va
->base
.vm
;
2197 /* validate the parameters */
2198 if (saddr
& ~PAGE_MASK
|| offset
& ~PAGE_MASK
||
2199 size
== 0 || size
& ~PAGE_MASK
)
2202 /* make sure object fit at this offset */
2203 eaddr
= saddr
+ size
- 1;
2204 if (saddr
>= eaddr
||
2205 (bo
&& offset
+ size
> amdgpu_bo_size(bo
)) ||
2206 (eaddr
>= adev
->vm_manager
.max_pfn
<< AMDGPU_GPU_PAGE_SHIFT
))
2209 saddr
/= AMDGPU_GPU_PAGE_SIZE
;
2210 eaddr
/= AMDGPU_GPU_PAGE_SIZE
;
2212 tmp
= amdgpu_vm_it_iter_first(&vm
->va
, saddr
, eaddr
);
2214 /* bo and tmp overlap, invalid addr */
2215 dev_err(adev
->dev
, "bo %p va 0x%010Lx-0x%010Lx conflict with "
2216 "0x%010Lx-0x%010Lx\n", bo
, saddr
, eaddr
,
2217 tmp
->start
, tmp
->last
+ 1);
2221 mapping
= kmalloc(sizeof(*mapping
), GFP_KERNEL
);
2225 mapping
->start
= saddr
;
2226 mapping
->last
= eaddr
;
2227 mapping
->offset
= offset
;
2228 mapping
->flags
= flags
;
2230 amdgpu_vm_bo_insert_map(adev
, bo_va
, mapping
);
2236 * amdgpu_vm_bo_replace_map - map bo inside a vm, replacing existing mappings
2238 * @adev: amdgpu_device pointer
2239 * @bo_va: bo_va to store the address
2240 * @saddr: where to map the BO
2241 * @offset: requested offset in the BO
2242 * @size: BO size in bytes
2243 * @flags: attributes of pages (read/write/valid/etc.)
2245 * Add a mapping of the BO at the specefied addr into the VM. Replace existing
2246 * mappings as we do so.
2249 * 0 for success, error for failure.
2251 * Object has to be reserved and unreserved outside!
2253 int amdgpu_vm_bo_replace_map(struct amdgpu_device
*adev
,
2254 struct amdgpu_bo_va
*bo_va
,
2255 uint64_t saddr
, uint64_t offset
,
2256 uint64_t size
, uint64_t flags
)
2258 struct amdgpu_bo_va_mapping
*mapping
;
2259 struct amdgpu_bo
*bo
= bo_va
->base
.bo
;
2263 /* validate the parameters */
2264 if (saddr
& ~PAGE_MASK
|| offset
& ~PAGE_MASK
||
2265 size
== 0 || size
& ~PAGE_MASK
)
2268 /* make sure object fit at this offset */
2269 eaddr
= saddr
+ size
- 1;
2270 if (saddr
>= eaddr
||
2271 (bo
&& offset
+ size
> amdgpu_bo_size(bo
)) ||
2272 (eaddr
>= adev
->vm_manager
.max_pfn
<< AMDGPU_GPU_PAGE_SHIFT
))
2275 /* Allocate all the needed memory */
2276 mapping
= kmalloc(sizeof(*mapping
), GFP_KERNEL
);
2280 r
= amdgpu_vm_bo_clear_mappings(adev
, bo_va
->base
.vm
, saddr
, size
);
2286 saddr
/= AMDGPU_GPU_PAGE_SIZE
;
2287 eaddr
/= AMDGPU_GPU_PAGE_SIZE
;
2289 mapping
->start
= saddr
;
2290 mapping
->last
= eaddr
;
2291 mapping
->offset
= offset
;
2292 mapping
->flags
= flags
;
2294 amdgpu_vm_bo_insert_map(adev
, bo_va
, mapping
);
2300 * amdgpu_vm_bo_unmap - remove bo mapping from vm
2302 * @adev: amdgpu_device pointer
2303 * @bo_va: bo_va to remove the address from
2304 * @saddr: where to the BO is mapped
2306 * Remove a mapping of the BO at the specefied addr from the VM.
2309 * 0 for success, error for failure.
2311 * Object has to be reserved and unreserved outside!
2313 int amdgpu_vm_bo_unmap(struct amdgpu_device
*adev
,
2314 struct amdgpu_bo_va
*bo_va
,
2317 struct amdgpu_bo_va_mapping
*mapping
;
2318 struct amdgpu_vm
*vm
= bo_va
->base
.vm
;
2321 saddr
/= AMDGPU_GPU_PAGE_SIZE
;
2323 list_for_each_entry(mapping
, &bo_va
->valids
, list
) {
2324 if (mapping
->start
== saddr
)
2328 if (&mapping
->list
== &bo_va
->valids
) {
2331 list_for_each_entry(mapping
, &bo_va
->invalids
, list
) {
2332 if (mapping
->start
== saddr
)
2336 if (&mapping
->list
== &bo_va
->invalids
)
2340 list_del(&mapping
->list
);
2341 amdgpu_vm_it_remove(mapping
, &vm
->va
);
2342 mapping
->bo_va
= NULL
;
2343 trace_amdgpu_vm_bo_unmap(bo_va
, mapping
);
2346 list_add(&mapping
->list
, &vm
->freed
);
2348 amdgpu_vm_free_mapping(adev
, vm
, mapping
,
2349 bo_va
->last_pt_update
);
2355 * amdgpu_vm_bo_clear_mappings - remove all mappings in a specific range
2357 * @adev: amdgpu_device pointer
2358 * @vm: VM structure to use
2359 * @saddr: start of the range
2360 * @size: size of the range
2362 * Remove all mappings in a range, split them as appropriate.
2365 * 0 for success, error for failure.
2367 int amdgpu_vm_bo_clear_mappings(struct amdgpu_device
*adev
,
2368 struct amdgpu_vm
*vm
,
2369 uint64_t saddr
, uint64_t size
)
2371 struct amdgpu_bo_va_mapping
*before
, *after
, *tmp
, *next
;
2375 eaddr
= saddr
+ size
- 1;
2376 saddr
/= AMDGPU_GPU_PAGE_SIZE
;
2377 eaddr
/= AMDGPU_GPU_PAGE_SIZE
;
2379 /* Allocate all the needed memory */
2380 before
= kzalloc(sizeof(*before
), GFP_KERNEL
);
2383 INIT_LIST_HEAD(&before
->list
);
2385 after
= kzalloc(sizeof(*after
), GFP_KERNEL
);
2390 INIT_LIST_HEAD(&after
->list
);
2392 /* Now gather all removed mappings */
2393 tmp
= amdgpu_vm_it_iter_first(&vm
->va
, saddr
, eaddr
);
2395 /* Remember mapping split at the start */
2396 if (tmp
->start
< saddr
) {
2397 before
->start
= tmp
->start
;
2398 before
->last
= saddr
- 1;
2399 before
->offset
= tmp
->offset
;
2400 before
->flags
= tmp
->flags
;
2401 before
->bo_va
= tmp
->bo_va
;
2402 list_add(&before
->list
, &tmp
->bo_va
->invalids
);
2405 /* Remember mapping split at the end */
2406 if (tmp
->last
> eaddr
) {
2407 after
->start
= eaddr
+ 1;
2408 after
->last
= tmp
->last
;
2409 after
->offset
= tmp
->offset
;
2410 after
->offset
+= (after
->start
- tmp
->start
) << PAGE_SHIFT
;
2411 after
->flags
= tmp
->flags
;
2412 after
->bo_va
= tmp
->bo_va
;
2413 list_add(&after
->list
, &tmp
->bo_va
->invalids
);
2416 list_del(&tmp
->list
);
2417 list_add(&tmp
->list
, &removed
);
2419 tmp
= amdgpu_vm_it_iter_next(tmp
, saddr
, eaddr
);
2422 /* And free them up */
2423 list_for_each_entry_safe(tmp
, next
, &removed
, list
) {
2424 amdgpu_vm_it_remove(tmp
, &vm
->va
);
2425 list_del(&tmp
->list
);
2427 if (tmp
->start
< saddr
)
2429 if (tmp
->last
> eaddr
)
2433 list_add(&tmp
->list
, &vm
->freed
);
2434 trace_amdgpu_vm_bo_unmap(NULL
, tmp
);
2437 /* Insert partial mapping before the range */
2438 if (!list_empty(&before
->list
)) {
2439 amdgpu_vm_it_insert(before
, &vm
->va
);
2440 if (before
->flags
& AMDGPU_PTE_PRT
)
2441 amdgpu_vm_prt_get(adev
);
2446 /* Insert partial mapping after the range */
2447 if (!list_empty(&after
->list
)) {
2448 amdgpu_vm_it_insert(after
, &vm
->va
);
2449 if (after
->flags
& AMDGPU_PTE_PRT
)
2450 amdgpu_vm_prt_get(adev
);
2459 * amdgpu_vm_bo_lookup_mapping - find mapping by address
2461 * @vm: the requested VM
2462 * @addr: the address
2464 * Find a mapping by it's address.
2467 * The amdgpu_bo_va_mapping matching for addr or NULL
2470 struct amdgpu_bo_va_mapping
*amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm
*vm
,
2473 return amdgpu_vm_it_iter_first(&vm
->va
, addr
, addr
);
2477 * amdgpu_vm_bo_trace_cs - trace all reserved mappings
2479 * @vm: the requested vm
2480 * @ticket: CS ticket
2482 * Trace all mappings of BOs reserved during a command submission.
2484 void amdgpu_vm_bo_trace_cs(struct amdgpu_vm
*vm
, struct ww_acquire_ctx
*ticket
)
2486 struct amdgpu_bo_va_mapping
*mapping
;
2488 if (!trace_amdgpu_vm_bo_cs_enabled())
2491 for (mapping
= amdgpu_vm_it_iter_first(&vm
->va
, 0, U64_MAX
); mapping
;
2492 mapping
= amdgpu_vm_it_iter_next(mapping
, 0, U64_MAX
)) {
2493 if (mapping
->bo_va
&& mapping
->bo_va
->base
.bo
) {
2494 struct amdgpu_bo
*bo
;
2496 bo
= mapping
->bo_va
->base
.bo
;
2497 if (dma_resv_locking_ctx(bo
->tbo
.base
.resv
) !=
2502 trace_amdgpu_vm_bo_cs(mapping
);
2507 * amdgpu_vm_bo_rmv - remove a bo to a specific vm
2509 * @adev: amdgpu_device pointer
2510 * @bo_va: requested bo_va
2512 * Remove @bo_va->bo from the requested vm.
2514 * Object have to be reserved!
2516 void amdgpu_vm_bo_rmv(struct amdgpu_device
*adev
,
2517 struct amdgpu_bo_va
*bo_va
)
2519 struct amdgpu_bo_va_mapping
*mapping
, *next
;
2520 struct amdgpu_bo
*bo
= bo_va
->base
.bo
;
2521 struct amdgpu_vm
*vm
= bo_va
->base
.vm
;
2522 struct amdgpu_vm_bo_base
**base
;
2525 if (bo
->tbo
.base
.resv
== vm
->root
.base
.bo
->tbo
.base
.resv
)
2526 vm
->bulk_moveable
= false;
2528 for (base
= &bo_va
->base
.bo
->vm_bo
; *base
;
2529 base
= &(*base
)->next
) {
2530 if (*base
!= &bo_va
->base
)
2533 *base
= bo_va
->base
.next
;
2538 spin_lock(&vm
->invalidated_lock
);
2539 list_del(&bo_va
->base
.vm_status
);
2540 spin_unlock(&vm
->invalidated_lock
);
2542 list_for_each_entry_safe(mapping
, next
, &bo_va
->valids
, list
) {
2543 list_del(&mapping
->list
);
2544 amdgpu_vm_it_remove(mapping
, &vm
->va
);
2545 mapping
->bo_va
= NULL
;
2546 trace_amdgpu_vm_bo_unmap(bo_va
, mapping
);
2547 list_add(&mapping
->list
, &vm
->freed
);
2549 list_for_each_entry_safe(mapping
, next
, &bo_va
->invalids
, list
) {
2550 list_del(&mapping
->list
);
2551 amdgpu_vm_it_remove(mapping
, &vm
->va
);
2552 amdgpu_vm_free_mapping(adev
, vm
, mapping
,
2553 bo_va
->last_pt_update
);
2556 dma_fence_put(bo_va
->last_pt_update
);
2558 if (bo
&& bo_va
->is_xgmi
)
2559 amdgpu_xgmi_set_pstate(adev
, AMDGPU_XGMI_PSTATE_MIN
);
2565 * amdgpu_vm_evictable - check if we can evict a VM
2567 * @bo: A page table of the VM.
2569 * Check if it is possible to evict a VM.
2571 bool amdgpu_vm_evictable(struct amdgpu_bo
*bo
)
2573 struct amdgpu_vm_bo_base
*bo_base
= bo
->vm_bo
;
2575 /* Page tables of a destroyed VM can go away immediately */
2576 if (!bo_base
|| !bo_base
->vm
)
2579 /* Don't evict VM page tables while they are busy */
2580 if (!dma_resv_test_signaled_rcu(bo
->tbo
.base
.resv
, true))
2583 /* Try to block ongoing updates */
2584 if (!amdgpu_vm_eviction_trylock(bo_base
->vm
))
2587 /* Don't evict VM page tables while they are updated */
2588 if (!dma_fence_is_signaled(bo_base
->vm
->last_unlocked
)) {
2589 amdgpu_vm_eviction_unlock(bo_base
->vm
);
2593 bo_base
->vm
->evicting
= true;
2594 amdgpu_vm_eviction_unlock(bo_base
->vm
);
2599 * amdgpu_vm_bo_invalidate - mark the bo as invalid
2601 * @adev: amdgpu_device pointer
2602 * @bo: amdgpu buffer object
2603 * @evicted: is the BO evicted
2605 * Mark @bo as invalid.
2607 void amdgpu_vm_bo_invalidate(struct amdgpu_device
*adev
,
2608 struct amdgpu_bo
*bo
, bool evicted
)
2610 struct amdgpu_vm_bo_base
*bo_base
;
2612 /* shadow bo doesn't have bo base, its validation needs its parent */
2613 if (bo
->parent
&& bo
->parent
->shadow
== bo
)
2616 for (bo_base
= bo
->vm_bo
; bo_base
; bo_base
= bo_base
->next
) {
2617 struct amdgpu_vm
*vm
= bo_base
->vm
;
2619 if (evicted
&& bo
->tbo
.base
.resv
== vm
->root
.base
.bo
->tbo
.base
.resv
) {
2620 amdgpu_vm_bo_evicted(bo_base
);
2626 bo_base
->moved
= true;
2628 if (bo
->tbo
.type
== ttm_bo_type_kernel
)
2629 amdgpu_vm_bo_relocated(bo_base
);
2630 else if (bo
->tbo
.base
.resv
== vm
->root
.base
.bo
->tbo
.base
.resv
)
2631 amdgpu_vm_bo_moved(bo_base
);
2633 amdgpu_vm_bo_invalidated(bo_base
);
2638 * amdgpu_vm_get_block_size - calculate VM page table size as power of two
2643 * VM page table as power of two
2645 static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size
)
2647 /* Total bits covered by PD + PTs */
2648 unsigned bits
= ilog2(vm_size
) + 18;
2650 /* Make sure the PD is 4K in size up to 8GB address space.
2651 Above that split equal between PD and PTs */
2655 return ((bits
+ 3) / 2);
2659 * amdgpu_vm_adjust_size - adjust vm size, block size and fragment size
2661 * @adev: amdgpu_device pointer
2662 * @min_vm_size: the minimum vm size in GB if it's set auto
2663 * @fragment_size_default: Default PTE fragment size
2664 * @max_level: max VMPT level
2665 * @max_bits: max address space size in bits
2668 void amdgpu_vm_adjust_size(struct amdgpu_device
*adev
, uint32_t min_vm_size
,
2669 uint32_t fragment_size_default
, unsigned max_level
,
2672 unsigned int max_size
= 1 << (max_bits
- 30);
2673 unsigned int vm_size
;
2676 /* adjust vm size first */
2677 if (amdgpu_vm_size
!= -1) {
2678 vm_size
= amdgpu_vm_size
;
2679 if (vm_size
> max_size
) {
2680 dev_warn(adev
->dev
, "VM size (%d) too large, max is %u GB\n",
2681 amdgpu_vm_size
, max_size
);
2686 unsigned int phys_ram_gb
;
2688 /* Optimal VM size depends on the amount of physical
2689 * RAM available. Underlying requirements and
2692 * - Need to map system memory and VRAM from all GPUs
2693 * - VRAM from other GPUs not known here
2694 * - Assume VRAM <= system memory
2695 * - On GFX8 and older, VM space can be segmented for
2697 * - Need to allow room for fragmentation, guard pages etc.
2699 * This adds up to a rough guess of system memory x3.
2700 * Round up to power of two to maximize the available
2701 * VM size with the given page table size.
2704 phys_ram_gb
= ((uint64_t)si
.totalram
* si
.mem_unit
+
2705 (1 << 30) - 1) >> 30;
2706 vm_size
= roundup_pow_of_two(
2707 min(max(phys_ram_gb
* 3, min_vm_size
), max_size
));
2710 adev
->vm_manager
.max_pfn
= (uint64_t)vm_size
<< 18;
2712 tmp
= roundup_pow_of_two(adev
->vm_manager
.max_pfn
);
2713 if (amdgpu_vm_block_size
!= -1)
2714 tmp
>>= amdgpu_vm_block_size
- 9;
2715 tmp
= DIV_ROUND_UP(fls64(tmp
) - 1, 9) - 1;
2716 adev
->vm_manager
.num_level
= min(max_level
, (unsigned)tmp
);
2717 switch (adev
->vm_manager
.num_level
) {
2719 adev
->vm_manager
.root_level
= AMDGPU_VM_PDB2
;
2722 adev
->vm_manager
.root_level
= AMDGPU_VM_PDB1
;
2725 adev
->vm_manager
.root_level
= AMDGPU_VM_PDB0
;
2728 dev_err(adev
->dev
, "VMPT only supports 2~4+1 levels\n");
2730 /* block size depends on vm size and hw setup*/
2731 if (amdgpu_vm_block_size
!= -1)
2732 adev
->vm_manager
.block_size
=
2733 min((unsigned)amdgpu_vm_block_size
, max_bits
2734 - AMDGPU_GPU_PAGE_SHIFT
2735 - 9 * adev
->vm_manager
.num_level
);
2736 else if (adev
->vm_manager
.num_level
> 1)
2737 adev
->vm_manager
.block_size
= 9;
2739 adev
->vm_manager
.block_size
= amdgpu_vm_get_block_size(tmp
);
2741 if (amdgpu_vm_fragment_size
== -1)
2742 adev
->vm_manager
.fragment_size
= fragment_size_default
;
2744 adev
->vm_manager
.fragment_size
= amdgpu_vm_fragment_size
;
2746 DRM_INFO("vm size is %u GB, %u levels, block size is %u-bit, fragment size is %u-bit\n",
2747 vm_size
, adev
->vm_manager
.num_level
+ 1,
2748 adev
->vm_manager
.block_size
,
2749 adev
->vm_manager
.fragment_size
);
2753 * amdgpu_vm_wait_idle - wait for the VM to become idle
2755 * @vm: VM object to wait for
2756 * @timeout: timeout to wait for VM to become idle
2758 long amdgpu_vm_wait_idle(struct amdgpu_vm
*vm
, long timeout
)
2760 timeout
= dma_resv_wait_timeout_rcu(vm
->root
.base
.bo
->tbo
.base
.resv
,
2761 true, true, timeout
);
2765 return dma_fence_wait_timeout(vm
->last_unlocked
, true, timeout
);
2769 * amdgpu_vm_init - initialize a vm instance
2771 * @adev: amdgpu_device pointer
2773 * @vm_context: Indicates if it GFX or Compute context
2774 * @pasid: Process address space identifier
2779 * 0 for success, error for failure.
2781 int amdgpu_vm_init(struct amdgpu_device
*adev
, struct amdgpu_vm
*vm
,
2782 int vm_context
, u32 pasid
)
2784 struct amdgpu_bo_param bp
;
2785 struct amdgpu_bo
*root
;
2788 vm
->va
= RB_ROOT_CACHED
;
2789 for (i
= 0; i
< AMDGPU_MAX_VMHUBS
; i
++)
2790 vm
->reserved_vmid
[i
] = NULL
;
2791 INIT_LIST_HEAD(&vm
->evicted
);
2792 INIT_LIST_HEAD(&vm
->relocated
);
2793 INIT_LIST_HEAD(&vm
->moved
);
2794 INIT_LIST_HEAD(&vm
->idle
);
2795 INIT_LIST_HEAD(&vm
->invalidated
);
2796 spin_lock_init(&vm
->invalidated_lock
);
2797 INIT_LIST_HEAD(&vm
->freed
);
2798 INIT_LIST_HEAD(&vm
->done
);
2800 /* create scheduler entities for page table updates */
2801 r
= drm_sched_entity_init(&vm
->immediate
, DRM_SCHED_PRIORITY_NORMAL
,
2802 adev
->vm_manager
.vm_pte_scheds
,
2803 adev
->vm_manager
.vm_pte_num_scheds
, NULL
);
2807 r
= drm_sched_entity_init(&vm
->delayed
, DRM_SCHED_PRIORITY_NORMAL
,
2808 adev
->vm_manager
.vm_pte_scheds
,
2809 adev
->vm_manager
.vm_pte_num_scheds
, NULL
);
2811 goto error_free_immediate
;
2813 vm
->pte_support_ats
= false;
2814 vm
->is_compute_context
= false;
2816 if (vm_context
== AMDGPU_VM_CONTEXT_COMPUTE
) {
2817 vm
->use_cpu_for_update
= !!(adev
->vm_manager
.vm_update_mode
&
2818 AMDGPU_VM_USE_CPU_FOR_COMPUTE
);
2820 if (adev
->asic_type
== CHIP_RAVEN
)
2821 vm
->pte_support_ats
= true;
2823 vm
->use_cpu_for_update
= !!(adev
->vm_manager
.vm_update_mode
&
2824 AMDGPU_VM_USE_CPU_FOR_GFX
);
2826 DRM_DEBUG_DRIVER("VM update mode is %s\n",
2827 vm
->use_cpu_for_update
? "CPU" : "SDMA");
2828 WARN_ONCE((vm
->use_cpu_for_update
&&
2829 !amdgpu_gmc_vram_full_visible(&adev
->gmc
)),
2830 "CPU update of VM recommended only for large BAR system\n");
2832 if (vm
->use_cpu_for_update
)
2833 vm
->update_funcs
= &amdgpu_vm_cpu_funcs
;
2835 vm
->update_funcs
= &amdgpu_vm_sdma_funcs
;
2836 vm
->last_update
= NULL
;
2837 vm
->last_unlocked
= dma_fence_get_stub();
2839 mutex_init(&vm
->eviction_lock
);
2840 vm
->evicting
= false;
2842 amdgpu_vm_bo_param(adev
, vm
, adev
->vm_manager
.root_level
, false, &bp
);
2843 if (vm_context
== AMDGPU_VM_CONTEXT_COMPUTE
)
2844 bp
.flags
&= ~AMDGPU_GEM_CREATE_SHADOW
;
2845 r
= amdgpu_bo_create(adev
, &bp
, &root
);
2847 goto error_free_delayed
;
2849 r
= amdgpu_bo_reserve(root
, true);
2851 goto error_free_root
;
2853 r
= dma_resv_reserve_shared(root
->tbo
.base
.resv
, 1);
2855 goto error_unreserve
;
2857 amdgpu_vm_bo_base_init(&vm
->root
.base
, vm
, root
);
2859 r
= amdgpu_vm_clear_bo(adev
, vm
, root
, false);
2861 goto error_unreserve
;
2863 amdgpu_bo_unreserve(vm
->root
.base
.bo
);
2866 unsigned long flags
;
2868 spin_lock_irqsave(&adev
->vm_manager
.pasid_lock
, flags
);
2869 r
= idr_alloc(&adev
->vm_manager
.pasid_idr
, vm
, pasid
, pasid
+ 1,
2871 spin_unlock_irqrestore(&adev
->vm_manager
.pasid_lock
, flags
);
2873 goto error_free_root
;
2878 INIT_KFIFO(vm
->faults
);
2883 amdgpu_bo_unreserve(vm
->root
.base
.bo
);
2886 amdgpu_bo_unref(&vm
->root
.base
.bo
->shadow
);
2887 amdgpu_bo_unref(&vm
->root
.base
.bo
);
2888 vm
->root
.base
.bo
= NULL
;
2891 dma_fence_put(vm
->last_unlocked
);
2892 drm_sched_entity_destroy(&vm
->delayed
);
2894 error_free_immediate
:
2895 drm_sched_entity_destroy(&vm
->immediate
);
2901 * amdgpu_vm_check_clean_reserved - check if a VM is clean
2903 * @adev: amdgpu_device pointer
2904 * @vm: the VM to check
2906 * check all entries of the root PD, if any subsequent PDs are allocated,
2907 * it means there are page table creating and filling, and is no a clean
2911 * 0 if this VM is clean
2913 static int amdgpu_vm_check_clean_reserved(struct amdgpu_device
*adev
,
2914 struct amdgpu_vm
*vm
)
2916 enum amdgpu_vm_level root
= adev
->vm_manager
.root_level
;
2917 unsigned int entries
= amdgpu_vm_num_entries(adev
, root
);
2920 if (!(vm
->root
.entries
))
2923 for (i
= 0; i
< entries
; i
++) {
2924 if (vm
->root
.entries
[i
].base
.bo
)
2932 * amdgpu_vm_make_compute - Turn a GFX VM into a compute VM
2934 * @adev: amdgpu_device pointer
2936 * @pasid: pasid to use
2938 * This only works on GFX VMs that don't have any BOs added and no
2939 * page tables allocated yet.
2941 * Changes the following VM parameters:
2942 * - use_cpu_for_update
2943 * - pte_supports_ats
2944 * - pasid (old PASID is released, because compute manages its own PASIDs)
2946 * Reinitializes the page directory to reflect the changed ATS
2950 * 0 for success, -errno for errors.
2952 int amdgpu_vm_make_compute(struct amdgpu_device
*adev
, struct amdgpu_vm
*vm
,
2955 bool pte_support_ats
= (adev
->asic_type
== CHIP_RAVEN
);
2958 r
= amdgpu_bo_reserve(vm
->root
.base
.bo
, true);
2963 r
= amdgpu_vm_check_clean_reserved(adev
, vm
);
2968 unsigned long flags
;
2970 spin_lock_irqsave(&adev
->vm_manager
.pasid_lock
, flags
);
2971 r
= idr_alloc(&adev
->vm_manager
.pasid_idr
, vm
, pasid
, pasid
+ 1,
2973 spin_unlock_irqrestore(&adev
->vm_manager
.pasid_lock
, flags
);
2980 /* Check if PD needs to be reinitialized and do it before
2981 * changing any other state, in case it fails.
2983 if (pte_support_ats
!= vm
->pte_support_ats
) {
2984 vm
->pte_support_ats
= pte_support_ats
;
2985 r
= amdgpu_vm_clear_bo(adev
, vm
, vm
->root
.base
.bo
, false);
2990 /* Update VM state */
2991 vm
->use_cpu_for_update
= !!(adev
->vm_manager
.vm_update_mode
&
2992 AMDGPU_VM_USE_CPU_FOR_COMPUTE
);
2993 DRM_DEBUG_DRIVER("VM update mode is %s\n",
2994 vm
->use_cpu_for_update
? "CPU" : "SDMA");
2995 WARN_ONCE((vm
->use_cpu_for_update
&&
2996 !amdgpu_gmc_vram_full_visible(&adev
->gmc
)),
2997 "CPU update of VM recommended only for large BAR system\n");
2999 if (vm
->use_cpu_for_update
) {
3000 /* Sync with last SDMA update/clear before switching to CPU */
3001 r
= amdgpu_bo_sync_wait(vm
->root
.base
.bo
,
3002 AMDGPU_FENCE_OWNER_UNDEFINED
, true);
3006 vm
->update_funcs
= &amdgpu_vm_cpu_funcs
;
3008 vm
->update_funcs
= &amdgpu_vm_sdma_funcs
;
3010 dma_fence_put(vm
->last_update
);
3011 vm
->last_update
= NULL
;
3012 vm
->is_compute_context
= true;
3015 unsigned long flags
;
3017 spin_lock_irqsave(&adev
->vm_manager
.pasid_lock
, flags
);
3018 idr_remove(&adev
->vm_manager
.pasid_idr
, vm
->pasid
);
3019 spin_unlock_irqrestore(&adev
->vm_manager
.pasid_lock
, flags
);
3021 /* Free the original amdgpu allocated pasid
3022 * Will be replaced with kfd allocated pasid
3024 amdgpu_pasid_free(vm
->pasid
);
3028 /* Free the shadow bo for compute VM */
3029 amdgpu_bo_unref(&vm
->root
.base
.bo
->shadow
);
3038 unsigned long flags
;
3040 spin_lock_irqsave(&adev
->vm_manager
.pasid_lock
, flags
);
3041 idr_remove(&adev
->vm_manager
.pasid_idr
, pasid
);
3042 spin_unlock_irqrestore(&adev
->vm_manager
.pasid_lock
, flags
);
3045 amdgpu_bo_unreserve(vm
->root
.base
.bo
);
3050 * amdgpu_vm_release_compute - release a compute vm
3051 * @adev: amdgpu_device pointer
3052 * @vm: a vm turned into compute vm by calling amdgpu_vm_make_compute
3054 * This is a correspondant of amdgpu_vm_make_compute. It decouples compute
3055 * pasid from vm. Compute should stop use of vm after this call.
3057 void amdgpu_vm_release_compute(struct amdgpu_device
*adev
, struct amdgpu_vm
*vm
)
3060 unsigned long flags
;
3062 spin_lock_irqsave(&adev
->vm_manager
.pasid_lock
, flags
);
3063 idr_remove(&adev
->vm_manager
.pasid_idr
, vm
->pasid
);
3064 spin_unlock_irqrestore(&adev
->vm_manager
.pasid_lock
, flags
);
3067 vm
->is_compute_context
= false;
3071 * amdgpu_vm_fini - tear down a vm instance
3073 * @adev: amdgpu_device pointer
3077 * Unbind the VM and remove all bos from the vm bo list
3079 void amdgpu_vm_fini(struct amdgpu_device
*adev
, struct amdgpu_vm
*vm
)
3081 struct amdgpu_bo_va_mapping
*mapping
, *tmp
;
3082 bool prt_fini_needed
= !!adev
->gmc
.gmc_funcs
->set_prt
;
3083 struct amdgpu_bo
*root
;
3086 amdgpu_amdkfd_gpuvm_destroy_cb(adev
, vm
);
3088 root
= amdgpu_bo_ref(vm
->root
.base
.bo
);
3089 amdgpu_bo_reserve(root
, true);
3091 unsigned long flags
;
3093 spin_lock_irqsave(&adev
->vm_manager
.pasid_lock
, flags
);
3094 idr_remove(&adev
->vm_manager
.pasid_idr
, vm
->pasid
);
3095 spin_unlock_irqrestore(&adev
->vm_manager
.pasid_lock
, flags
);
3099 dma_fence_wait(vm
->last_unlocked
, false);
3100 dma_fence_put(vm
->last_unlocked
);
3102 list_for_each_entry_safe(mapping
, tmp
, &vm
->freed
, list
) {
3103 if (mapping
->flags
& AMDGPU_PTE_PRT
&& prt_fini_needed
) {
3104 amdgpu_vm_prt_fini(adev
, vm
);
3105 prt_fini_needed
= false;
3108 list_del(&mapping
->list
);
3109 amdgpu_vm_free_mapping(adev
, vm
, mapping
, NULL
);
3112 amdgpu_vm_free_pts(adev
, vm
, NULL
);
3113 amdgpu_bo_unreserve(root
);
3114 amdgpu_bo_unref(&root
);
3115 WARN_ON(vm
->root
.base
.bo
);
3117 drm_sched_entity_destroy(&vm
->immediate
);
3118 drm_sched_entity_destroy(&vm
->delayed
);
3120 if (!RB_EMPTY_ROOT(&vm
->va
.rb_root
)) {
3121 dev_err(adev
->dev
, "still active bo inside vm\n");
3123 rbtree_postorder_for_each_entry_safe(mapping
, tmp
,
3124 &vm
->va
.rb_root
, rb
) {
3125 /* Don't remove the mapping here, we don't want to trigger a
3126 * rebalance and the tree is about to be destroyed anyway.
3128 list_del(&mapping
->list
);
3132 dma_fence_put(vm
->last_update
);
3133 for (i
= 0; i
< AMDGPU_MAX_VMHUBS
; i
++)
3134 amdgpu_vmid_free_reserved(adev
, vm
, i
);
3138 * amdgpu_vm_manager_init - init the VM manager
3140 * @adev: amdgpu_device pointer
3142 * Initialize the VM manager structures
3144 void amdgpu_vm_manager_init(struct amdgpu_device
*adev
)
3148 amdgpu_vmid_mgr_init(adev
);
3150 adev
->vm_manager
.fence_context
=
3151 dma_fence_context_alloc(AMDGPU_MAX_RINGS
);
3152 for (i
= 0; i
< AMDGPU_MAX_RINGS
; ++i
)
3153 adev
->vm_manager
.seqno
[i
] = 0;
3155 spin_lock_init(&adev
->vm_manager
.prt_lock
);
3156 atomic_set(&adev
->vm_manager
.num_prt_users
, 0);
3158 /* If not overridden by the user, by default, only in large BAR systems
3159 * Compute VM tables will be updated by CPU
3161 #ifdef CONFIG_X86_64
3162 if (amdgpu_vm_update_mode
== -1) {
3163 if (amdgpu_gmc_vram_full_visible(&adev
->gmc
))
3164 adev
->vm_manager
.vm_update_mode
=
3165 AMDGPU_VM_USE_CPU_FOR_COMPUTE
;
3167 adev
->vm_manager
.vm_update_mode
= 0;
3169 adev
->vm_manager
.vm_update_mode
= amdgpu_vm_update_mode
;
3171 adev
->vm_manager
.vm_update_mode
= 0;
3174 idr_init(&adev
->vm_manager
.pasid_idr
);
3175 spin_lock_init(&adev
->vm_manager
.pasid_lock
);
3179 * amdgpu_vm_manager_fini - cleanup VM manager
3181 * @adev: amdgpu_device pointer
3183 * Cleanup the VM manager and free resources.
3185 void amdgpu_vm_manager_fini(struct amdgpu_device
*adev
)
3187 WARN_ON(!idr_is_empty(&adev
->vm_manager
.pasid_idr
));
3188 idr_destroy(&adev
->vm_manager
.pasid_idr
);
3190 amdgpu_vmid_mgr_fini(adev
);
3194 * amdgpu_vm_ioctl - Manages VMID reservation for vm hubs.
3196 * @dev: drm device pointer
3197 * @data: drm_amdgpu_vm
3198 * @filp: drm file pointer
3201 * 0 for success, -errno for errors.
3203 int amdgpu_vm_ioctl(struct drm_device
*dev
, void *data
, struct drm_file
*filp
)
3205 union drm_amdgpu_vm
*args
= data
;
3206 struct amdgpu_device
*adev
= drm_to_adev(dev
);
3207 struct amdgpu_fpriv
*fpriv
= filp
->driver_priv
;
3208 long timeout
= msecs_to_jiffies(2000);
3211 switch (args
->in
.op
) {
3212 case AMDGPU_VM_OP_RESERVE_VMID
:
3213 /* We only have requirement to reserve vmid from gfxhub */
3214 r
= amdgpu_vmid_alloc_reserved(adev
, &fpriv
->vm
,
3219 case AMDGPU_VM_OP_UNRESERVE_VMID
:
3220 if (amdgpu_sriov_runtime(adev
))
3221 timeout
= 8 * timeout
;
3223 /* Wait vm idle to make sure the vmid set in SPM_VMID is
3224 * not referenced anymore.
3226 r
= amdgpu_bo_reserve(fpriv
->vm
.root
.base
.bo
, true);
3230 r
= amdgpu_vm_wait_idle(&fpriv
->vm
, timeout
);
3234 amdgpu_bo_unreserve(fpriv
->vm
.root
.base
.bo
);
3235 amdgpu_vmid_free_reserved(adev
, &fpriv
->vm
, AMDGPU_GFXHUB_0
);
3245 * amdgpu_vm_get_task_info - Extracts task info for a PASID.
3247 * @adev: drm device pointer
3248 * @pasid: PASID identifier for VM
3249 * @task_info: task_info to fill.
3251 void amdgpu_vm_get_task_info(struct amdgpu_device
*adev
, u32 pasid
,
3252 struct amdgpu_task_info
*task_info
)
3254 struct amdgpu_vm
*vm
;
3255 unsigned long flags
;
3257 spin_lock_irqsave(&adev
->vm_manager
.pasid_lock
, flags
);
3259 vm
= idr_find(&adev
->vm_manager
.pasid_idr
, pasid
);
3261 *task_info
= vm
->task_info
;
3263 spin_unlock_irqrestore(&adev
->vm_manager
.pasid_lock
, flags
);
3267 * amdgpu_vm_set_task_info - Sets VMs task info.
3269 * @vm: vm for which to set the info
3271 void amdgpu_vm_set_task_info(struct amdgpu_vm
*vm
)
3273 if (vm
->task_info
.pid
)
3276 vm
->task_info
.pid
= current
->pid
;
3277 get_task_comm(vm
->task_info
.task_name
, current
);
3279 if (current
->group_leader
->mm
!= current
->mm
)
3282 vm
->task_info
.tgid
= current
->group_leader
->pid
;
3283 get_task_comm(vm
->task_info
.process_name
, current
->group_leader
);
3287 * amdgpu_vm_handle_fault - graceful handling of VM faults.
3288 * @adev: amdgpu device pointer
3289 * @pasid: PASID of the VM
3290 * @addr: Address of the fault
3292 * Try to gracefully handle a VM fault. Return true if the fault was handled and
3293 * shouldn't be reported any more.
3295 bool amdgpu_vm_handle_fault(struct amdgpu_device
*adev
, u32 pasid
,
3298 struct amdgpu_bo
*root
;
3299 uint64_t value
, flags
;
3300 struct amdgpu_vm
*vm
;
3303 spin_lock(&adev
->vm_manager
.pasid_lock
);
3304 vm
= idr_find(&adev
->vm_manager
.pasid_idr
, pasid
);
3306 root
= amdgpu_bo_ref(vm
->root
.base
.bo
);
3309 spin_unlock(&adev
->vm_manager
.pasid_lock
);
3314 r
= amdgpu_bo_reserve(root
, true);
3318 /* Double check that the VM still exists */
3319 spin_lock(&adev
->vm_manager
.pasid_lock
);
3320 vm
= idr_find(&adev
->vm_manager
.pasid_idr
, pasid
);
3321 if (vm
&& vm
->root
.base
.bo
!= root
)
3323 spin_unlock(&adev
->vm_manager
.pasid_lock
);
3327 addr
/= AMDGPU_GPU_PAGE_SIZE
;
3328 flags
= AMDGPU_PTE_VALID
| AMDGPU_PTE_SNOOPED
|
3331 if (vm
->is_compute_context
) {
3332 /* Intentionally setting invalid PTE flag
3333 * combination to force a no-retry-fault
3335 flags
= AMDGPU_PTE_EXECUTABLE
| AMDGPU_PDE_PTE
|
3339 } else if (amdgpu_vm_fault_stop
== AMDGPU_VM_FAULT_STOP_NEVER
) {
3340 /* Redirect the access to the dummy page */
3341 value
= adev
->dummy_page_addr
;
3342 flags
|= AMDGPU_PTE_EXECUTABLE
| AMDGPU_PTE_READABLE
|
3343 AMDGPU_PTE_WRITEABLE
;
3346 /* Let the hw retry silently on the PTE */
3350 r
= amdgpu_vm_bo_update_mapping(adev
, adev
, vm
, true, false, NULL
, addr
,
3351 addr
, flags
, value
, NULL
, NULL
,
3356 r
= amdgpu_vm_update_pdes(adev
, vm
, true);
3359 amdgpu_bo_unreserve(root
);
3361 DRM_ERROR("Can't handle page fault (%ld)\n", r
);
3364 amdgpu_bo_unref(&root
);
3369 #if defined(CONFIG_DEBUG_FS)
3371 * amdgpu_debugfs_vm_bo_info - print BO info for the VM
3373 * @vm: Requested VM for printing BO info
3376 * Print BO information in debugfs file for the VM
3378 void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm
*vm
, struct seq_file
*m
)
3380 struct amdgpu_bo_va
*bo_va
, *tmp
;
3382 u64 total_evicted
= 0;
3383 u64 total_relocated
= 0;
3384 u64 total_moved
= 0;
3385 u64 total_invalidated
= 0;
3387 unsigned int total_idle_objs
= 0;
3388 unsigned int total_evicted_objs
= 0;
3389 unsigned int total_relocated_objs
= 0;
3390 unsigned int total_moved_objs
= 0;
3391 unsigned int total_invalidated_objs
= 0;
3392 unsigned int total_done_objs
= 0;
3393 unsigned int id
= 0;
3395 seq_puts(m
, "\tIdle BOs:\n");
3396 list_for_each_entry_safe(bo_va
, tmp
, &vm
->idle
, base
.vm_status
) {
3397 if (!bo_va
->base
.bo
)
3399 total_idle
+= amdgpu_bo_print_info(id
++, bo_va
->base
.bo
, m
);
3401 total_idle_objs
= id
;
3404 seq_puts(m
, "\tEvicted BOs:\n");
3405 list_for_each_entry_safe(bo_va
, tmp
, &vm
->evicted
, base
.vm_status
) {
3406 if (!bo_va
->base
.bo
)
3408 total_evicted
+= amdgpu_bo_print_info(id
++, bo_va
->base
.bo
, m
);
3410 total_evicted_objs
= id
;
3413 seq_puts(m
, "\tRelocated BOs:\n");
3414 list_for_each_entry_safe(bo_va
, tmp
, &vm
->relocated
, base
.vm_status
) {
3415 if (!bo_va
->base
.bo
)
3417 total_relocated
+= amdgpu_bo_print_info(id
++, bo_va
->base
.bo
, m
);
3419 total_relocated_objs
= id
;
3422 seq_puts(m
, "\tMoved BOs:\n");
3423 list_for_each_entry_safe(bo_va
, tmp
, &vm
->moved
, base
.vm_status
) {
3424 if (!bo_va
->base
.bo
)
3426 total_moved
+= amdgpu_bo_print_info(id
++, bo_va
->base
.bo
, m
);
3428 total_moved_objs
= id
;
3431 seq_puts(m
, "\tInvalidated BOs:\n");
3432 spin_lock(&vm
->invalidated_lock
);
3433 list_for_each_entry_safe(bo_va
, tmp
, &vm
->invalidated
, base
.vm_status
) {
3434 if (!bo_va
->base
.bo
)
3436 total_invalidated
+= amdgpu_bo_print_info(id
++, bo_va
->base
.bo
, m
);
3438 total_invalidated_objs
= id
;
3441 seq_puts(m
, "\tDone BOs:\n");
3442 list_for_each_entry_safe(bo_va
, tmp
, &vm
->done
, base
.vm_status
) {
3443 if (!bo_va
->base
.bo
)
3445 total_done
+= amdgpu_bo_print_info(id
++, bo_va
->base
.bo
, m
);
3447 spin_unlock(&vm
->invalidated_lock
);
3448 total_done_objs
= id
;
3450 seq_printf(m
, "\tTotal idle size: %12lld\tobjs:\t%d\n", total_idle
,
3452 seq_printf(m
, "\tTotal evicted size: %12lld\tobjs:\t%d\n", total_evicted
,
3453 total_evicted_objs
);
3454 seq_printf(m
, "\tTotal relocated size: %12lld\tobjs:\t%d\n", total_relocated
,
3455 total_relocated_objs
);
3456 seq_printf(m
, "\tTotal moved size: %12lld\tobjs:\t%d\n", total_moved
,
3458 seq_printf(m
, "\tTotal invalidated size: %12lld\tobjs:\t%d\n", total_invalidated
,
3459 total_invalidated_objs
);
3460 seq_printf(m
, "\tTotal done size: %12lld\tobjs:\t%d\n", total_done
,