2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
29 #include <linux/dma-fence-array.h>
30 #include <linux/interval_tree_generic.h>
31 #include <linux/idr.h>
32 #include <linux/dma-buf.h>
34 #include <drm/amdgpu_drm.h>
35 #include <drm/drm_drv.h>
37 #include "amdgpu_trace.h"
38 #include "amdgpu_amdkfd.h"
39 #include "amdgpu_gmc.h"
40 #include "amdgpu_xgmi.h"
41 #include "amdgpu_dma_buf.h"
42 #include "amdgpu_res_cursor.h"
48 * GPUVM is similar to the legacy gart on older asics, however
49 * rather than there being a single global gart table
50 * for the entire GPU, there are multiple VM page tables active
51 * at any given time. The VM page tables can contain a mix
52 * vram pages and system memory pages and system memory pages
53 * can be mapped as snooped (cached system pages) or unsnooped
54 * (uncached system pages).
55 * Each VM has an ID associated with it and there is a page table
56 * associated with each VMID. When execting a command buffer,
57 * the kernel tells the the ring what VMID to use for that command
58 * buffer. VMIDs are allocated dynamically as commands are submitted.
59 * The userspace drivers maintain their own address space and the kernel
60 * sets up their pages tables accordingly when they submit their
61 * command buffers and a VMID is assigned.
62 * Cayman/Trinity support up to 8 active VMs at any given time;
66 #define START(node) ((node)->start)
67 #define LAST(node) ((node)->last)
69 INTERVAL_TREE_DEFINE(struct amdgpu_bo_va_mapping
, rb
, uint64_t, __subtree_last
,
70 START
, LAST
, static, amdgpu_vm_it
)
76 * struct amdgpu_prt_cb - Helper to disable partial resident texture feature from a fence callback
78 struct amdgpu_prt_cb
{
81 * @adev: amdgpu device
83 struct amdgpu_device
*adev
;
88 struct dma_fence_cb cb
;
92 * amdgpu_vm_set_pasid - manage pasid and vm ptr mapping
94 * @adev: amdgpu_device pointer
95 * @vm: amdgpu_vm pointer
96 * @pasid: the pasid the VM is using on this GPU
98 * Set the pasid this VM is using on this GPU, can also be used to remove the
99 * pasid by passing in zero.
102 int amdgpu_vm_set_pasid(struct amdgpu_device
*adev
, struct amdgpu_vm
*vm
,
107 if (vm
->pasid
== pasid
)
111 r
= xa_err(xa_erase_irq(&adev
->vm_manager
.pasids
, vm
->pasid
));
119 r
= xa_err(xa_store_irq(&adev
->vm_manager
.pasids
, pasid
, vm
,
132 * vm eviction_lock can be taken in MMU notifiers. Make sure no reclaim-FS
133 * happens while holding this lock anywhere to prevent deadlocks when
134 * an MMU notifier runs in reclaim-FS context.
136 static inline void amdgpu_vm_eviction_lock(struct amdgpu_vm
*vm
)
138 mutex_lock(&vm
->eviction_lock
);
139 vm
->saved_flags
= memalloc_noreclaim_save();
142 static inline int amdgpu_vm_eviction_trylock(struct amdgpu_vm
*vm
)
144 if (mutex_trylock(&vm
->eviction_lock
)) {
145 vm
->saved_flags
= memalloc_noreclaim_save();
151 static inline void amdgpu_vm_eviction_unlock(struct amdgpu_vm
*vm
)
153 memalloc_noreclaim_restore(vm
->saved_flags
);
154 mutex_unlock(&vm
->eviction_lock
);
158 * amdgpu_vm_level_shift - return the addr shift for each level
160 * @adev: amdgpu_device pointer
164 * The number of bits the pfn needs to be right shifted for a level.
166 static unsigned amdgpu_vm_level_shift(struct amdgpu_device
*adev
,
173 return 9 * (AMDGPU_VM_PDB0
- level
) +
174 adev
->vm_manager
.block_size
;
183 * amdgpu_vm_num_entries - return the number of entries in a PD/PT
185 * @adev: amdgpu_device pointer
189 * The number of entries in a page directory or page table.
191 static unsigned amdgpu_vm_num_entries(struct amdgpu_device
*adev
,
194 unsigned shift
= amdgpu_vm_level_shift(adev
,
195 adev
->vm_manager
.root_level
);
197 if (level
== adev
->vm_manager
.root_level
)
198 /* For the root directory */
199 return round_up(adev
->vm_manager
.max_pfn
, 1ULL << shift
)
201 else if (level
!= AMDGPU_VM_PTB
)
202 /* Everything in between */
205 /* For the page tables on the leaves */
206 return AMDGPU_VM_PTE_COUNT(adev
);
210 * amdgpu_vm_num_ats_entries - return the number of ATS entries in the root PD
212 * @adev: amdgpu_device pointer
215 * The number of entries in the root page directory which needs the ATS setting.
217 static unsigned amdgpu_vm_num_ats_entries(struct amdgpu_device
*adev
)
221 shift
= amdgpu_vm_level_shift(adev
, adev
->vm_manager
.root_level
);
222 return AMDGPU_GMC_HOLE_START
>> (shift
+ AMDGPU_GPU_PAGE_SHIFT
);
226 * amdgpu_vm_entries_mask - the mask to get the entry number of a PD/PT
228 * @adev: amdgpu_device pointer
232 * The mask to extract the entry number of a PD/PT from an address.
234 static uint32_t amdgpu_vm_entries_mask(struct amdgpu_device
*adev
,
237 if (level
<= adev
->vm_manager
.root_level
)
239 else if (level
!= AMDGPU_VM_PTB
)
242 return AMDGPU_VM_PTE_COUNT(adev
) - 1;
246 * amdgpu_vm_bo_size - returns the size of the BOs in bytes
248 * @adev: amdgpu_device pointer
252 * The size of the BO for a page directory or page table in bytes.
254 static unsigned amdgpu_vm_bo_size(struct amdgpu_device
*adev
, unsigned level
)
256 return AMDGPU_GPU_PAGE_ALIGN(amdgpu_vm_num_entries(adev
, level
) * 8);
260 * amdgpu_vm_bo_evicted - vm_bo is evicted
262 * @vm_bo: vm_bo which is evicted
264 * State for PDs/PTs and per VM BOs which are not at the location they should
267 static void amdgpu_vm_bo_evicted(struct amdgpu_vm_bo_base
*vm_bo
)
269 struct amdgpu_vm
*vm
= vm_bo
->vm
;
270 struct amdgpu_bo
*bo
= vm_bo
->bo
;
273 if (bo
->tbo
.type
== ttm_bo_type_kernel
)
274 list_move(&vm_bo
->vm_status
, &vm
->evicted
);
276 list_move_tail(&vm_bo
->vm_status
, &vm
->evicted
);
279 * amdgpu_vm_bo_moved - vm_bo is moved
281 * @vm_bo: vm_bo which is moved
283 * State for per VM BOs which are moved, but that change is not yet reflected
284 * in the page tables.
286 static void amdgpu_vm_bo_moved(struct amdgpu_vm_bo_base
*vm_bo
)
288 list_move(&vm_bo
->vm_status
, &vm_bo
->vm
->moved
);
292 * amdgpu_vm_bo_idle - vm_bo is idle
294 * @vm_bo: vm_bo which is now idle
296 * State for PDs/PTs and per VM BOs which have gone through the state machine
299 static void amdgpu_vm_bo_idle(struct amdgpu_vm_bo_base
*vm_bo
)
301 list_move(&vm_bo
->vm_status
, &vm_bo
->vm
->idle
);
302 vm_bo
->moved
= false;
306 * amdgpu_vm_bo_invalidated - vm_bo is invalidated
308 * @vm_bo: vm_bo which is now invalidated
310 * State for normal BOs which are invalidated and that change not yet reflected
313 static void amdgpu_vm_bo_invalidated(struct amdgpu_vm_bo_base
*vm_bo
)
315 spin_lock(&vm_bo
->vm
->invalidated_lock
);
316 list_move(&vm_bo
->vm_status
, &vm_bo
->vm
->invalidated
);
317 spin_unlock(&vm_bo
->vm
->invalidated_lock
);
321 * amdgpu_vm_bo_relocated - vm_bo is reloacted
323 * @vm_bo: vm_bo which is relocated
325 * State for PDs/PTs which needs to update their parent PD.
326 * For the root PD, just move to idle state.
328 static void amdgpu_vm_bo_relocated(struct amdgpu_vm_bo_base
*vm_bo
)
330 if (vm_bo
->bo
->parent
)
331 list_move(&vm_bo
->vm_status
, &vm_bo
->vm
->relocated
);
333 amdgpu_vm_bo_idle(vm_bo
);
337 * amdgpu_vm_bo_done - vm_bo is done
339 * @vm_bo: vm_bo which is now done
341 * State for normal BOs which are invalidated and that change has been updated
344 static void amdgpu_vm_bo_done(struct amdgpu_vm_bo_base
*vm_bo
)
346 spin_lock(&vm_bo
->vm
->invalidated_lock
);
347 list_move(&vm_bo
->vm_status
, &vm_bo
->vm
->done
);
348 spin_unlock(&vm_bo
->vm
->invalidated_lock
);
352 * amdgpu_vm_bo_base_init - Adds bo to the list of bos associated with the vm
354 * @base: base structure for tracking BO usage in a VM
355 * @vm: vm to which bo is to be added
356 * @bo: amdgpu buffer object
358 * Initialize a bo_va_base structure and add it to the appropriate lists
361 static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base
*base
,
362 struct amdgpu_vm
*vm
,
363 struct amdgpu_bo
*bo
)
368 INIT_LIST_HEAD(&base
->vm_status
);
372 base
->next
= bo
->vm_bo
;
375 if (bo
->tbo
.base
.resv
!= vm
->root
.bo
->tbo
.base
.resv
)
378 vm
->bulk_moveable
= false;
379 if (bo
->tbo
.type
== ttm_bo_type_kernel
&& bo
->parent
)
380 amdgpu_vm_bo_relocated(base
);
382 amdgpu_vm_bo_idle(base
);
384 if (bo
->preferred_domains
&
385 amdgpu_mem_type_to_domain(bo
->tbo
.resource
->mem_type
))
389 * we checked all the prerequisites, but it looks like this per vm bo
390 * is currently evicted. add the bo to the evicted list to make sure it
391 * is validated on next vm use to avoid fault.
393 amdgpu_vm_bo_evicted(base
);
397 * amdgpu_vm_pt_parent - get the parent page directory
399 * @pt: child page table
401 * Helper to get the parent entry for the child page table. NULL if we are at
402 * the root page directory.
404 static struct amdgpu_vm_bo_base
*amdgpu_vm_pt_parent(struct amdgpu_vm_bo_base
*pt
)
406 struct amdgpu_bo
*parent
= pt
->bo
->parent
;
411 return parent
->vm_bo
;
415 * amdgpu_vm_pt_cursor - state for for_each_amdgpu_vm_pt
417 struct amdgpu_vm_pt_cursor
{
419 struct amdgpu_vm_bo_base
*parent
;
420 struct amdgpu_vm_bo_base
*entry
;
425 * amdgpu_vm_pt_start - start PD/PT walk
427 * @adev: amdgpu_device pointer
428 * @vm: amdgpu_vm structure
429 * @start: start address of the walk
430 * @cursor: state to initialize
432 * Initialize a amdgpu_vm_pt_cursor to start a walk.
434 static void amdgpu_vm_pt_start(struct amdgpu_device
*adev
,
435 struct amdgpu_vm
*vm
, uint64_t start
,
436 struct amdgpu_vm_pt_cursor
*cursor
)
439 cursor
->parent
= NULL
;
440 cursor
->entry
= &vm
->root
;
441 cursor
->level
= adev
->vm_manager
.root_level
;
445 * amdgpu_vm_pt_descendant - go to child node
447 * @adev: amdgpu_device pointer
448 * @cursor: current state
450 * Walk to the child node of the current node.
452 * True if the walk was possible, false otherwise.
454 static bool amdgpu_vm_pt_descendant(struct amdgpu_device
*adev
,
455 struct amdgpu_vm_pt_cursor
*cursor
)
457 unsigned mask
, shift
, idx
;
459 if ((cursor
->level
== AMDGPU_VM_PTB
) || !cursor
->entry
||
463 mask
= amdgpu_vm_entries_mask(adev
, cursor
->level
);
464 shift
= amdgpu_vm_level_shift(adev
, cursor
->level
);
467 idx
= (cursor
->pfn
>> shift
) & mask
;
468 cursor
->parent
= cursor
->entry
;
469 cursor
->entry
= &to_amdgpu_bo_vm(cursor
->entry
->bo
)->entries
[idx
];
474 * amdgpu_vm_pt_sibling - go to sibling node
476 * @adev: amdgpu_device pointer
477 * @cursor: current state
479 * Walk to the sibling node of the current node.
481 * True if the walk was possible, false otherwise.
483 static bool amdgpu_vm_pt_sibling(struct amdgpu_device
*adev
,
484 struct amdgpu_vm_pt_cursor
*cursor
)
486 unsigned shift
, num_entries
;
488 /* Root doesn't have a sibling */
492 /* Go to our parents and see if we got a sibling */
493 shift
= amdgpu_vm_level_shift(adev
, cursor
->level
- 1);
494 num_entries
= amdgpu_vm_num_entries(adev
, cursor
->level
- 1);
496 if (cursor
->entry
== &to_amdgpu_bo_vm(cursor
->parent
->bo
)->entries
[num_entries
- 1])
499 cursor
->pfn
+= 1ULL << shift
;
500 cursor
->pfn
&= ~((1ULL << shift
) - 1);
506 * amdgpu_vm_pt_ancestor - go to parent node
508 * @cursor: current state
510 * Walk to the parent node of the current node.
512 * True if the walk was possible, false otherwise.
514 static bool amdgpu_vm_pt_ancestor(struct amdgpu_vm_pt_cursor
*cursor
)
520 cursor
->entry
= cursor
->parent
;
521 cursor
->parent
= amdgpu_vm_pt_parent(cursor
->parent
);
526 * amdgpu_vm_pt_next - get next PD/PT in hieratchy
528 * @adev: amdgpu_device pointer
529 * @cursor: current state
531 * Walk the PD/PT tree to the next node.
533 static void amdgpu_vm_pt_next(struct amdgpu_device
*adev
,
534 struct amdgpu_vm_pt_cursor
*cursor
)
536 /* First try a newborn child */
537 if (amdgpu_vm_pt_descendant(adev
, cursor
))
540 /* If that didn't worked try to find a sibling */
541 while (!amdgpu_vm_pt_sibling(adev
, cursor
)) {
542 /* No sibling, go to our parents and grandparents */
543 if (!amdgpu_vm_pt_ancestor(cursor
)) {
551 * amdgpu_vm_pt_first_dfs - start a deep first search
553 * @adev: amdgpu_device structure
554 * @vm: amdgpu_vm structure
555 * @start: optional cursor to start with
556 * @cursor: state to initialize
558 * Starts a deep first traversal of the PD/PT tree.
560 static void amdgpu_vm_pt_first_dfs(struct amdgpu_device
*adev
,
561 struct amdgpu_vm
*vm
,
562 struct amdgpu_vm_pt_cursor
*start
,
563 struct amdgpu_vm_pt_cursor
*cursor
)
568 amdgpu_vm_pt_start(adev
, vm
, 0, cursor
);
569 while (amdgpu_vm_pt_descendant(adev
, cursor
));
573 * amdgpu_vm_pt_continue_dfs - check if the deep first search should continue
575 * @start: starting point for the search
576 * @entry: current entry
579 * True when the search should continue, false otherwise.
581 static bool amdgpu_vm_pt_continue_dfs(struct amdgpu_vm_pt_cursor
*start
,
582 struct amdgpu_vm_bo_base
*entry
)
584 return entry
&& (!start
|| entry
!= start
->entry
);
588 * amdgpu_vm_pt_next_dfs - get the next node for a deep first search
590 * @adev: amdgpu_device structure
591 * @cursor: current state
593 * Move the cursor to the next node in a deep first search.
595 static void amdgpu_vm_pt_next_dfs(struct amdgpu_device
*adev
,
596 struct amdgpu_vm_pt_cursor
*cursor
)
602 cursor
->entry
= NULL
;
603 else if (amdgpu_vm_pt_sibling(adev
, cursor
))
604 while (amdgpu_vm_pt_descendant(adev
, cursor
));
606 amdgpu_vm_pt_ancestor(cursor
);
610 * for_each_amdgpu_vm_pt_dfs_safe - safe deep first search of all PDs/PTs
612 #define for_each_amdgpu_vm_pt_dfs_safe(adev, vm, start, cursor, entry) \
613 for (amdgpu_vm_pt_first_dfs((adev), (vm), (start), &(cursor)), \
614 (entry) = (cursor).entry, amdgpu_vm_pt_next_dfs((adev), &(cursor));\
615 amdgpu_vm_pt_continue_dfs((start), (entry)); \
616 (entry) = (cursor).entry, amdgpu_vm_pt_next_dfs((adev), &(cursor)))
619 * amdgpu_vm_get_pd_bo - add the VM PD to a validation list
621 * @vm: vm providing the BOs
622 * @validated: head of validation list
623 * @entry: entry to add
625 * Add the page directory to the list of BOs to
626 * validate for command submission.
628 void amdgpu_vm_get_pd_bo(struct amdgpu_vm
*vm
,
629 struct list_head
*validated
,
630 struct amdgpu_bo_list_entry
*entry
)
633 entry
->tv
.bo
= &vm
->root
.bo
->tbo
;
634 /* Two for VM updates, one for TTM and one for the CS job */
635 entry
->tv
.num_shared
= 4;
636 entry
->user_pages
= NULL
;
637 list_add(&entry
->tv
.head
, validated
);
641 * amdgpu_vm_del_from_lru_notify - update bulk_moveable flag
643 * @bo: BO which was removed from the LRU
645 * Make sure the bulk_moveable flag is updated when a BO is removed from the
648 void amdgpu_vm_del_from_lru_notify(struct ttm_buffer_object
*bo
)
650 struct amdgpu_bo
*abo
;
651 struct amdgpu_vm_bo_base
*bo_base
;
653 if (!amdgpu_bo_is_amdgpu_bo(bo
))
659 abo
= ttm_to_amdgpu_bo(bo
);
662 for (bo_base
= abo
->vm_bo
; bo_base
; bo_base
= bo_base
->next
) {
663 struct amdgpu_vm
*vm
= bo_base
->vm
;
665 if (abo
->tbo
.base
.resv
== vm
->root
.bo
->tbo
.base
.resv
)
666 vm
->bulk_moveable
= false;
671 * amdgpu_vm_move_to_lru_tail - move all BOs to the end of LRU
673 * @adev: amdgpu device pointer
674 * @vm: vm providing the BOs
676 * Move all BOs to the end of LRU and remember their positions to put them
679 void amdgpu_vm_move_to_lru_tail(struct amdgpu_device
*adev
,
680 struct amdgpu_vm
*vm
)
682 struct amdgpu_vm_bo_base
*bo_base
;
684 if (vm
->bulk_moveable
) {
685 spin_lock(&adev
->mman
.bdev
.lru_lock
);
686 ttm_bo_bulk_move_lru_tail(&vm
->lru_bulk_move
);
687 spin_unlock(&adev
->mman
.bdev
.lru_lock
);
691 memset(&vm
->lru_bulk_move
, 0, sizeof(vm
->lru_bulk_move
));
693 spin_lock(&adev
->mman
.bdev
.lru_lock
);
694 list_for_each_entry(bo_base
, &vm
->idle
, vm_status
) {
695 struct amdgpu_bo
*bo
= bo_base
->bo
;
696 struct amdgpu_bo
*shadow
= amdgpu_bo_shadowed(bo
);
701 ttm_bo_move_to_lru_tail(&bo
->tbo
, bo
->tbo
.resource
,
704 ttm_bo_move_to_lru_tail(&shadow
->tbo
,
705 shadow
->tbo
.resource
,
708 spin_unlock(&adev
->mman
.bdev
.lru_lock
);
710 vm
->bulk_moveable
= true;
714 * amdgpu_vm_validate_pt_bos - validate the page table BOs
716 * @adev: amdgpu device pointer
717 * @vm: vm providing the BOs
718 * @validate: callback to do the validation
719 * @param: parameter for the validation callback
721 * Validate the page table BOs on command submission if neccessary.
726 int amdgpu_vm_validate_pt_bos(struct amdgpu_device
*adev
, struct amdgpu_vm
*vm
,
727 int (*validate
)(void *p
, struct amdgpu_bo
*bo
),
730 struct amdgpu_vm_bo_base
*bo_base
, *tmp
;
733 vm
->bulk_moveable
&= list_empty(&vm
->evicted
);
735 list_for_each_entry_safe(bo_base
, tmp
, &vm
->evicted
, vm_status
) {
736 struct amdgpu_bo
*bo
= bo_base
->bo
;
737 struct amdgpu_bo
*shadow
= amdgpu_bo_shadowed(bo
);
739 r
= validate(param
, bo
);
743 r
= validate(param
, shadow
);
748 if (bo
->tbo
.type
!= ttm_bo_type_kernel
) {
749 amdgpu_vm_bo_moved(bo_base
);
751 vm
->update_funcs
->map_table(to_amdgpu_bo_vm(bo
));
752 amdgpu_vm_bo_relocated(bo_base
);
756 amdgpu_vm_eviction_lock(vm
);
757 vm
->evicting
= false;
758 amdgpu_vm_eviction_unlock(vm
);
764 * amdgpu_vm_ready - check VM is ready for updates
768 * Check if all VM PDs/PTs are ready for updates
771 * True if VM is not evicting.
773 bool amdgpu_vm_ready(struct amdgpu_vm
*vm
)
777 amdgpu_vm_eviction_lock(vm
);
779 amdgpu_vm_eviction_unlock(vm
);
784 * amdgpu_vm_clear_bo - initially clear the PDs/PTs
786 * @adev: amdgpu_device pointer
787 * @vm: VM to clear BO from
789 * @immediate: use an immediate update
791 * Root PD needs to be reserved when calling this.
794 * 0 on success, errno otherwise.
796 static int amdgpu_vm_clear_bo(struct amdgpu_device
*adev
,
797 struct amdgpu_vm
*vm
,
798 struct amdgpu_bo_vm
*vmbo
,
801 struct ttm_operation_ctx ctx
= { true, false };
802 unsigned level
= adev
->vm_manager
.root_level
;
803 struct amdgpu_vm_update_params params
;
804 struct amdgpu_bo
*ancestor
= &vmbo
->bo
;
805 struct amdgpu_bo
*bo
= &vmbo
->bo
;
806 unsigned entries
, ats_entries
;
810 /* Figure out our place in the hierarchy */
811 if (ancestor
->parent
) {
813 while (ancestor
->parent
->parent
) {
815 ancestor
= ancestor
->parent
;
819 entries
= amdgpu_bo_size(bo
) / 8;
820 if (!vm
->pte_support_ats
) {
823 } else if (!bo
->parent
) {
824 ats_entries
= amdgpu_vm_num_ats_entries(adev
);
825 ats_entries
= min(ats_entries
, entries
);
826 entries
-= ats_entries
;
829 struct amdgpu_vm_bo_base
*pt
;
831 pt
= ancestor
->vm_bo
;
832 ats_entries
= amdgpu_vm_num_ats_entries(adev
);
833 if ((pt
- to_amdgpu_bo_vm(vm
->root
.bo
)->entries
) >= ats_entries
) {
836 ats_entries
= entries
;
841 r
= ttm_bo_validate(&bo
->tbo
, &bo
->placement
, &ctx
);
846 struct amdgpu_bo
*shadow
= vmbo
->shadow
;
848 r
= ttm_bo_validate(&shadow
->tbo
, &shadow
->placement
, &ctx
);
853 r
= vm
->update_funcs
->map_table(vmbo
);
857 memset(¶ms
, 0, sizeof(params
));
860 params
.immediate
= immediate
;
862 r
= vm
->update_funcs
->prepare(¶ms
, NULL
, AMDGPU_SYNC_EXPLICIT
);
868 uint64_t value
= 0, flags
;
870 flags
= AMDGPU_PTE_DEFAULT_ATC
;
871 if (level
!= AMDGPU_VM_PTB
) {
872 /* Handle leaf PDEs as PTEs */
873 flags
|= AMDGPU_PDE_PTE
;
874 amdgpu_gmc_get_vm_pde(adev
, level
, &value
, &flags
);
877 r
= vm
->update_funcs
->update(¶ms
, vmbo
, addr
, 0, ats_entries
,
882 addr
+= ats_entries
* 8;
886 uint64_t value
= 0, flags
= 0;
888 if (adev
->asic_type
>= CHIP_VEGA10
) {
889 if (level
!= AMDGPU_VM_PTB
) {
890 /* Handle leaf PDEs as PTEs */
891 flags
|= AMDGPU_PDE_PTE
;
892 amdgpu_gmc_get_vm_pde(adev
, level
,
895 /* Workaround for fault priority problem on GMC9 */
896 flags
= AMDGPU_PTE_EXECUTABLE
;
900 r
= vm
->update_funcs
->update(¶ms
, vmbo
, addr
, 0, entries
,
906 return vm
->update_funcs
->commit(¶ms
, NULL
);
910 * amdgpu_vm_pt_create - create bo for PD/PT
912 * @adev: amdgpu_device pointer
914 * @level: the page table level
915 * @immediate: use a immediate update
916 * @vmbo: pointer to the buffer object pointer
918 static int amdgpu_vm_pt_create(struct amdgpu_device
*adev
,
919 struct amdgpu_vm
*vm
,
920 int level
, bool immediate
,
921 struct amdgpu_bo_vm
**vmbo
)
923 struct amdgpu_bo_param bp
;
924 struct amdgpu_bo
*bo
;
925 struct dma_resv
*resv
;
926 unsigned int num_entries
;
929 memset(&bp
, 0, sizeof(bp
));
931 bp
.size
= amdgpu_vm_bo_size(adev
, level
);
932 bp
.byte_align
= AMDGPU_GPU_PAGE_SIZE
;
933 bp
.domain
= AMDGPU_GEM_DOMAIN_VRAM
;
934 bp
.domain
= amdgpu_bo_get_preferred_domain(adev
, bp
.domain
);
935 bp
.flags
= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS
|
936 AMDGPU_GEM_CREATE_CPU_GTT_USWC
;
938 if (level
< AMDGPU_VM_PTB
)
939 num_entries
= amdgpu_vm_num_entries(adev
, level
);
943 bp
.bo_ptr_size
= struct_size((*vmbo
), entries
, num_entries
);
945 if (vm
->use_cpu_for_update
)
946 bp
.flags
|= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED
;
948 bp
.type
= ttm_bo_type_kernel
;
949 bp
.no_wait_gpu
= immediate
;
951 bp
.resv
= vm
->root
.bo
->tbo
.base
.resv
;
953 r
= amdgpu_bo_create_vm(adev
, &bp
, vmbo
);
958 if (vm
->is_compute_context
|| (adev
->flags
& AMD_IS_APU
)) {
959 (*vmbo
)->shadow
= NULL
;
964 WARN_ON(dma_resv_lock(bo
->tbo
.base
.resv
,
967 memset(&bp
, 0, sizeof(bp
));
968 bp
.size
= amdgpu_vm_bo_size(adev
, level
);
969 bp
.domain
= AMDGPU_GEM_DOMAIN_GTT
;
970 bp
.flags
= AMDGPU_GEM_CREATE_CPU_GTT_USWC
;
971 bp
.type
= ttm_bo_type_kernel
;
972 bp
.resv
= bo
->tbo
.base
.resv
;
973 bp
.bo_ptr_size
= sizeof(struct amdgpu_bo
);
975 r
= amdgpu_bo_create(adev
, &bp
, &(*vmbo
)->shadow
);
978 dma_resv_unlock(bo
->tbo
.base
.resv
);
981 amdgpu_bo_unref(&bo
);
985 (*vmbo
)->shadow
->parent
= amdgpu_bo_ref(bo
);
986 amdgpu_bo_add_to_shadow_list(*vmbo
);
992 * amdgpu_vm_alloc_pts - Allocate a specific page table
994 * @adev: amdgpu_device pointer
995 * @vm: VM to allocate page tables for
996 * @cursor: Which page table to allocate
997 * @immediate: use an immediate update
999 * Make sure a specific page table or directory is allocated.
1002 * 1 if page table needed to be allocated, 0 if page table was already
1003 * allocated, negative errno if an error occurred.
1005 static int amdgpu_vm_alloc_pts(struct amdgpu_device
*adev
,
1006 struct amdgpu_vm
*vm
,
1007 struct amdgpu_vm_pt_cursor
*cursor
,
1010 struct amdgpu_vm_bo_base
*entry
= cursor
->entry
;
1011 struct amdgpu_bo
*pt_bo
;
1012 struct amdgpu_bo_vm
*pt
;
1018 r
= amdgpu_vm_pt_create(adev
, vm
, cursor
->level
, immediate
, &pt
);
1022 /* Keep a reference to the root directory to avoid
1023 * freeing them up in the wrong order.
1026 pt_bo
->parent
= amdgpu_bo_ref(cursor
->parent
->bo
);
1027 amdgpu_vm_bo_base_init(entry
, vm
, pt_bo
);
1028 r
= amdgpu_vm_clear_bo(adev
, vm
, pt
, immediate
);
1035 amdgpu_bo_unref(&pt
->shadow
);
1036 amdgpu_bo_unref(&pt_bo
);
1041 * amdgpu_vm_free_table - fre one PD/PT
1043 * @entry: PDE to free
1045 static void amdgpu_vm_free_table(struct amdgpu_vm_bo_base
*entry
)
1047 struct amdgpu_bo
*shadow
;
1051 shadow
= amdgpu_bo_shadowed(entry
->bo
);
1052 entry
->bo
->vm_bo
= NULL
;
1053 list_del(&entry
->vm_status
);
1054 amdgpu_bo_unref(&shadow
);
1055 amdgpu_bo_unref(&entry
->bo
);
1059 * amdgpu_vm_free_pts - free PD/PT levels
1061 * @adev: amdgpu device structure
1062 * @vm: amdgpu vm structure
1063 * @start: optional cursor where to start freeing PDs/PTs
1065 * Free the page directory or page table level and all sub levels.
1067 static void amdgpu_vm_free_pts(struct amdgpu_device
*adev
,
1068 struct amdgpu_vm
*vm
,
1069 struct amdgpu_vm_pt_cursor
*start
)
1071 struct amdgpu_vm_pt_cursor cursor
;
1072 struct amdgpu_vm_bo_base
*entry
;
1074 vm
->bulk_moveable
= false;
1076 for_each_amdgpu_vm_pt_dfs_safe(adev
, vm
, start
, cursor
, entry
)
1077 amdgpu_vm_free_table(entry
);
1080 amdgpu_vm_free_table(start
->entry
);
1084 * amdgpu_vm_check_compute_bug - check whether asic has compute vm bug
1086 * @adev: amdgpu_device pointer
1088 void amdgpu_vm_check_compute_bug(struct amdgpu_device
*adev
)
1090 const struct amdgpu_ip_block
*ip_block
;
1091 bool has_compute_vm_bug
;
1092 struct amdgpu_ring
*ring
;
1095 has_compute_vm_bug
= false;
1097 ip_block
= amdgpu_device_ip_get_ip_block(adev
, AMD_IP_BLOCK_TYPE_GFX
);
1099 /* Compute has a VM bug for GFX version < 7.
1100 Compute has a VM bug for GFX 8 MEC firmware version < 673.*/
1101 if (ip_block
->version
->major
<= 7)
1102 has_compute_vm_bug
= true;
1103 else if (ip_block
->version
->major
== 8)
1104 if (adev
->gfx
.mec_fw_version
< 673)
1105 has_compute_vm_bug
= true;
1108 for (i
= 0; i
< adev
->num_rings
; i
++) {
1109 ring
= adev
->rings
[i
];
1110 if (ring
->funcs
->type
== AMDGPU_RING_TYPE_COMPUTE
)
1111 /* only compute rings */
1112 ring
->has_compute_vm_bug
= has_compute_vm_bug
;
1114 ring
->has_compute_vm_bug
= false;
1119 * amdgpu_vm_need_pipeline_sync - Check if pipe sync is needed for job.
1121 * @ring: ring on which the job will be submitted
1122 * @job: job to submit
1125 * True if sync is needed.
1127 bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring
*ring
,
1128 struct amdgpu_job
*job
)
1130 struct amdgpu_device
*adev
= ring
->adev
;
1131 unsigned vmhub
= ring
->funcs
->vmhub
;
1132 struct amdgpu_vmid_mgr
*id_mgr
= &adev
->vm_manager
.id_mgr
[vmhub
];
1133 struct amdgpu_vmid
*id
;
1134 bool gds_switch_needed
;
1135 bool vm_flush_needed
= job
->vm_needs_flush
|| ring
->has_compute_vm_bug
;
1139 id
= &id_mgr
->ids
[job
->vmid
];
1140 gds_switch_needed
= ring
->funcs
->emit_gds_switch
&& (
1141 id
->gds_base
!= job
->gds_base
||
1142 id
->gds_size
!= job
->gds_size
||
1143 id
->gws_base
!= job
->gws_base
||
1144 id
->gws_size
!= job
->gws_size
||
1145 id
->oa_base
!= job
->oa_base
||
1146 id
->oa_size
!= job
->oa_size
);
1148 if (amdgpu_vmid_had_gpu_reset(adev
, id
))
1151 return vm_flush_needed
|| gds_switch_needed
;
1155 * amdgpu_vm_flush - hardware flush the vm
1157 * @ring: ring to use for flush
1159 * @need_pipe_sync: is pipe sync needed
1161 * Emit a VM flush when it is necessary.
1164 * 0 on success, errno otherwise.
1166 int amdgpu_vm_flush(struct amdgpu_ring
*ring
, struct amdgpu_job
*job
,
1167 bool need_pipe_sync
)
1169 struct amdgpu_device
*adev
= ring
->adev
;
1170 unsigned vmhub
= ring
->funcs
->vmhub
;
1171 struct amdgpu_vmid_mgr
*id_mgr
= &adev
->vm_manager
.id_mgr
[vmhub
];
1172 struct amdgpu_vmid
*id
= &id_mgr
->ids
[job
->vmid
];
1173 bool gds_switch_needed
= ring
->funcs
->emit_gds_switch
&& (
1174 id
->gds_base
!= job
->gds_base
||
1175 id
->gds_size
!= job
->gds_size
||
1176 id
->gws_base
!= job
->gws_base
||
1177 id
->gws_size
!= job
->gws_size
||
1178 id
->oa_base
!= job
->oa_base
||
1179 id
->oa_size
!= job
->oa_size
);
1180 bool vm_flush_needed
= job
->vm_needs_flush
;
1181 struct dma_fence
*fence
= NULL
;
1182 bool pasid_mapping_needed
= false;
1183 unsigned patch_offset
= 0;
1184 bool update_spm_vmid_needed
= (job
->vm
&& (job
->vm
->reserved_vmid
[vmhub
] != NULL
));
1187 if (update_spm_vmid_needed
&& adev
->gfx
.rlc
.funcs
->update_spm_vmid
)
1188 adev
->gfx
.rlc
.funcs
->update_spm_vmid(adev
, job
->vmid
);
1190 if (amdgpu_vmid_had_gpu_reset(adev
, id
)) {
1191 gds_switch_needed
= true;
1192 vm_flush_needed
= true;
1193 pasid_mapping_needed
= true;
1196 mutex_lock(&id_mgr
->lock
);
1197 if (id
->pasid
!= job
->pasid
|| !id
->pasid_mapping
||
1198 !dma_fence_is_signaled(id
->pasid_mapping
))
1199 pasid_mapping_needed
= true;
1200 mutex_unlock(&id_mgr
->lock
);
1202 gds_switch_needed
&= !!ring
->funcs
->emit_gds_switch
;
1203 vm_flush_needed
&= !!ring
->funcs
->emit_vm_flush
&&
1204 job
->vm_pd_addr
!= AMDGPU_BO_INVALID_OFFSET
;
1205 pasid_mapping_needed
&= adev
->gmc
.gmc_funcs
->emit_pasid_mapping
&&
1206 ring
->funcs
->emit_wreg
;
1208 if (!vm_flush_needed
&& !gds_switch_needed
&& !need_pipe_sync
)
1211 if (ring
->funcs
->init_cond_exec
)
1212 patch_offset
= amdgpu_ring_init_cond_exec(ring
);
1215 amdgpu_ring_emit_pipeline_sync(ring
);
1217 if (vm_flush_needed
) {
1218 trace_amdgpu_vm_flush(ring
, job
->vmid
, job
->vm_pd_addr
);
1219 amdgpu_ring_emit_vm_flush(ring
, job
->vmid
, job
->vm_pd_addr
);
1222 if (pasid_mapping_needed
)
1223 amdgpu_gmc_emit_pasid_mapping(ring
, job
->vmid
, job
->pasid
);
1225 if (vm_flush_needed
|| pasid_mapping_needed
) {
1226 r
= amdgpu_fence_emit(ring
, &fence
, NULL
, 0);
1231 if (vm_flush_needed
) {
1232 mutex_lock(&id_mgr
->lock
);
1233 dma_fence_put(id
->last_flush
);
1234 id
->last_flush
= dma_fence_get(fence
);
1235 id
->current_gpu_reset_count
=
1236 atomic_read(&adev
->gpu_reset_counter
);
1237 mutex_unlock(&id_mgr
->lock
);
1240 if (pasid_mapping_needed
) {
1241 mutex_lock(&id_mgr
->lock
);
1242 id
->pasid
= job
->pasid
;
1243 dma_fence_put(id
->pasid_mapping
);
1244 id
->pasid_mapping
= dma_fence_get(fence
);
1245 mutex_unlock(&id_mgr
->lock
);
1247 dma_fence_put(fence
);
1249 if (ring
->funcs
->emit_gds_switch
&& gds_switch_needed
) {
1250 id
->gds_base
= job
->gds_base
;
1251 id
->gds_size
= job
->gds_size
;
1252 id
->gws_base
= job
->gws_base
;
1253 id
->gws_size
= job
->gws_size
;
1254 id
->oa_base
= job
->oa_base
;
1255 id
->oa_size
= job
->oa_size
;
1256 amdgpu_ring_emit_gds_switch(ring
, job
->vmid
, job
->gds_base
,
1257 job
->gds_size
, job
->gws_base
,
1258 job
->gws_size
, job
->oa_base
,
1262 if (ring
->funcs
->patch_cond_exec
)
1263 amdgpu_ring_patch_cond_exec(ring
, patch_offset
);
1265 /* the double SWITCH_BUFFER here *cannot* be skipped by COND_EXEC */
1266 if (ring
->funcs
->emit_switch_buffer
) {
1267 amdgpu_ring_emit_switch_buffer(ring
);
1268 amdgpu_ring_emit_switch_buffer(ring
);
1274 * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo
1277 * @bo: requested buffer object
1279 * Find @bo inside the requested vm.
1280 * Search inside the @bos vm list for the requested vm
1281 * Returns the found bo_va or NULL if none is found
1283 * Object has to be reserved!
1286 * Found bo_va or NULL.
1288 struct amdgpu_bo_va
*amdgpu_vm_bo_find(struct amdgpu_vm
*vm
,
1289 struct amdgpu_bo
*bo
)
1291 struct amdgpu_vm_bo_base
*base
;
1293 for (base
= bo
->vm_bo
; base
; base
= base
->next
) {
1297 return container_of(base
, struct amdgpu_bo_va
, base
);
1303 * amdgpu_vm_map_gart - Resolve gart mapping of addr
1305 * @pages_addr: optional DMA address to use for lookup
1306 * @addr: the unmapped addr
1308 * Look up the physical address of the page that the pte resolves
1312 * The pointer for the page table entry.
1314 uint64_t amdgpu_vm_map_gart(const dma_addr_t
*pages_addr
, uint64_t addr
)
1318 /* page table offset */
1319 result
= pages_addr
[addr
>> PAGE_SHIFT
];
1321 /* in case cpu page size != gpu page size*/
1322 result
|= addr
& (~PAGE_MASK
);
1324 result
&= 0xFFFFFFFFFFFFF000ULL
;
1330 * amdgpu_vm_update_pde - update a single level in the hierarchy
1332 * @params: parameters for the update
1334 * @entry: entry to update
1336 * Makes sure the requested entry in parent is up to date.
1338 static int amdgpu_vm_update_pde(struct amdgpu_vm_update_params
*params
,
1339 struct amdgpu_vm
*vm
,
1340 struct amdgpu_vm_bo_base
*entry
)
1342 struct amdgpu_vm_bo_base
*parent
= amdgpu_vm_pt_parent(entry
);
1343 struct amdgpu_bo
*bo
= parent
->bo
, *pbo
;
1344 uint64_t pde
, pt
, flags
;
1347 for (level
= 0, pbo
= bo
->parent
; pbo
; ++level
)
1350 level
+= params
->adev
->vm_manager
.root_level
;
1351 amdgpu_gmc_get_pde_for_bo(entry
->bo
, level
, &pt
, &flags
);
1352 pde
= (entry
- to_amdgpu_bo_vm(parent
->bo
)->entries
) * 8;
1353 return vm
->update_funcs
->update(params
, to_amdgpu_bo_vm(bo
), pde
, pt
,
1358 * amdgpu_vm_invalidate_pds - mark all PDs as invalid
1360 * @adev: amdgpu_device pointer
1363 * Mark all PD level as invalid after an error.
1365 static void amdgpu_vm_invalidate_pds(struct amdgpu_device
*adev
,
1366 struct amdgpu_vm
*vm
)
1368 struct amdgpu_vm_pt_cursor cursor
;
1369 struct amdgpu_vm_bo_base
*entry
;
1371 for_each_amdgpu_vm_pt_dfs_safe(adev
, vm
, NULL
, cursor
, entry
)
1372 if (entry
->bo
&& !entry
->moved
)
1373 amdgpu_vm_bo_relocated(entry
);
1377 * amdgpu_vm_update_pdes - make sure that all directories are valid
1379 * @adev: amdgpu_device pointer
1381 * @immediate: submit immediately to the paging queue
1383 * Makes sure all directories are up to date.
1386 * 0 for success, error for failure.
1388 int amdgpu_vm_update_pdes(struct amdgpu_device
*adev
,
1389 struct amdgpu_vm
*vm
, bool immediate
)
1391 struct amdgpu_vm_update_params params
;
1394 if (list_empty(&vm
->relocated
))
1397 memset(¶ms
, 0, sizeof(params
));
1400 params
.immediate
= immediate
;
1402 r
= vm
->update_funcs
->prepare(¶ms
, NULL
, AMDGPU_SYNC_EXPLICIT
);
1406 while (!list_empty(&vm
->relocated
)) {
1407 struct amdgpu_vm_bo_base
*entry
;
1409 entry
= list_first_entry(&vm
->relocated
,
1410 struct amdgpu_vm_bo_base
,
1412 amdgpu_vm_bo_idle(entry
);
1414 r
= amdgpu_vm_update_pde(¶ms
, vm
, entry
);
1419 r
= vm
->update_funcs
->commit(¶ms
, &vm
->last_update
);
1425 amdgpu_vm_invalidate_pds(adev
, vm
);
1430 * amdgpu_vm_update_flags - figure out flags for PTE updates
1432 * Make sure to set the right flags for the PTEs at the desired level.
1434 static void amdgpu_vm_update_flags(struct amdgpu_vm_update_params
*params
,
1435 struct amdgpu_bo_vm
*pt
, unsigned int level
,
1436 uint64_t pe
, uint64_t addr
,
1437 unsigned int count
, uint32_t incr
,
1441 if (level
!= AMDGPU_VM_PTB
) {
1442 flags
|= AMDGPU_PDE_PTE
;
1443 amdgpu_gmc_get_vm_pde(params
->adev
, level
, &addr
, &flags
);
1445 } else if (params
->adev
->asic_type
>= CHIP_VEGA10
&&
1446 !(flags
& AMDGPU_PTE_VALID
) &&
1447 !(flags
& AMDGPU_PTE_PRT
)) {
1449 /* Workaround for fault priority problem on GMC9 */
1450 flags
|= AMDGPU_PTE_EXECUTABLE
;
1453 params
->vm
->update_funcs
->update(params
, pt
, pe
, addr
, count
, incr
,
1458 * amdgpu_vm_fragment - get fragment for PTEs
1460 * @params: see amdgpu_vm_update_params definition
1461 * @start: first PTE to handle
1462 * @end: last PTE to handle
1463 * @flags: hw mapping flags
1464 * @frag: resulting fragment size
1465 * @frag_end: end of this fragment
1467 * Returns the first possible fragment for the start and end address.
1469 static void amdgpu_vm_fragment(struct amdgpu_vm_update_params
*params
,
1470 uint64_t start
, uint64_t end
, uint64_t flags
,
1471 unsigned int *frag
, uint64_t *frag_end
)
1474 * The MC L1 TLB supports variable sized pages, based on a fragment
1475 * field in the PTE. When this field is set to a non-zero value, page
1476 * granularity is increased from 4KB to (1 << (12 + frag)). The PTE
1477 * flags are considered valid for all PTEs within the fragment range
1478 * and corresponding mappings are assumed to be physically contiguous.
1480 * The L1 TLB can store a single PTE for the whole fragment,
1481 * significantly increasing the space available for translation
1482 * caching. This leads to large improvements in throughput when the
1483 * TLB is under pressure.
1485 * The L2 TLB distributes small and large fragments into two
1486 * asymmetric partitions. The large fragment cache is significantly
1487 * larger. Thus, we try to use large fragments wherever possible.
1488 * Userspace can support this by aligning virtual base address and
1489 * allocation size to the fragment size.
1491 * Starting with Vega10 the fragment size only controls the L1. The L2
1492 * is now directly feed with small/huge/giant pages from the walker.
1496 if (params
->adev
->asic_type
< CHIP_VEGA10
)
1497 max_frag
= params
->adev
->vm_manager
.fragment_size
;
1501 /* system pages are non continuously */
1502 if (params
->pages_addr
) {
1508 /* This intentionally wraps around if no bit is set */
1509 *frag
= min((unsigned)ffs(start
) - 1, (unsigned)fls64(end
- start
) - 1);
1510 if (*frag
>= max_frag
) {
1512 *frag_end
= end
& ~((1ULL << max_frag
) - 1);
1514 *frag_end
= start
+ (1 << *frag
);
1519 * amdgpu_vm_update_ptes - make sure that page tables are valid
1521 * @params: see amdgpu_vm_update_params definition
1522 * @start: start of GPU address range
1523 * @end: end of GPU address range
1524 * @dst: destination address to map to, the next dst inside the function
1525 * @flags: mapping flags
1527 * Update the page tables in the range @start - @end.
1530 * 0 for success, -EINVAL for failure.
1532 static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params
*params
,
1533 uint64_t start
, uint64_t end
,
1534 uint64_t dst
, uint64_t flags
)
1536 struct amdgpu_device
*adev
= params
->adev
;
1537 struct amdgpu_vm_pt_cursor cursor
;
1538 uint64_t frag_start
= start
, frag_end
;
1542 /* figure out the initial fragment */
1543 amdgpu_vm_fragment(params
, frag_start
, end
, flags
, &frag
, &frag_end
);
1545 /* walk over the address space and update the PTs */
1546 amdgpu_vm_pt_start(adev
, params
->vm
, start
, &cursor
);
1547 while (cursor
.pfn
< end
) {
1548 unsigned shift
, parent_shift
, mask
;
1549 uint64_t incr
, entry_end
, pe_start
;
1550 struct amdgpu_bo
*pt
;
1552 if (!params
->unlocked
) {
1553 /* make sure that the page tables covering the
1554 * address range are actually allocated
1556 r
= amdgpu_vm_alloc_pts(params
->adev
, params
->vm
,
1557 &cursor
, params
->immediate
);
1562 shift
= amdgpu_vm_level_shift(adev
, cursor
.level
);
1563 parent_shift
= amdgpu_vm_level_shift(adev
, cursor
.level
- 1);
1564 if (params
->unlocked
) {
1565 /* Unlocked updates are only allowed on the leaves */
1566 if (amdgpu_vm_pt_descendant(adev
, &cursor
))
1568 } else if (adev
->asic_type
< CHIP_VEGA10
&&
1569 (flags
& AMDGPU_PTE_VALID
)) {
1570 /* No huge page support before GMC v9 */
1571 if (cursor
.level
!= AMDGPU_VM_PTB
) {
1572 if (!amdgpu_vm_pt_descendant(adev
, &cursor
))
1576 } else if (frag
< shift
) {
1577 /* We can't use this level when the fragment size is
1578 * smaller than the address shift. Go to the next
1579 * child entry and try again.
1581 if (amdgpu_vm_pt_descendant(adev
, &cursor
))
1583 } else if (frag
>= parent_shift
) {
1584 /* If the fragment size is even larger than the parent
1585 * shift we should go up one level and check it again.
1587 if (!amdgpu_vm_pt_ancestor(&cursor
))
1592 pt
= cursor
.entry
->bo
;
1594 /* We need all PDs and PTs for mapping something, */
1595 if (flags
& AMDGPU_PTE_VALID
)
1598 /* but unmapping something can happen at a higher
1601 if (!amdgpu_vm_pt_ancestor(&cursor
))
1604 pt
= cursor
.entry
->bo
;
1605 shift
= parent_shift
;
1606 frag_end
= max(frag_end
, ALIGN(frag_start
+ 1,
1610 /* Looks good so far, calculate parameters for the update */
1611 incr
= (uint64_t)AMDGPU_GPU_PAGE_SIZE
<< shift
;
1612 mask
= amdgpu_vm_entries_mask(adev
, cursor
.level
);
1613 pe_start
= ((cursor
.pfn
>> shift
) & mask
) * 8;
1614 entry_end
= ((uint64_t)mask
+ 1) << shift
;
1615 entry_end
+= cursor
.pfn
& ~(entry_end
- 1);
1616 entry_end
= min(entry_end
, end
);
1619 struct amdgpu_vm
*vm
= params
->vm
;
1620 uint64_t upd_end
= min(entry_end
, frag_end
);
1621 unsigned nptes
= (upd_end
- frag_start
) >> shift
;
1622 uint64_t upd_flags
= flags
| AMDGPU_PTE_FRAG(frag
);
1624 /* This can happen when we set higher level PDs to
1625 * silent to stop fault floods.
1627 nptes
= max(nptes
, 1u);
1629 trace_amdgpu_vm_update_ptes(params
, frag_start
, upd_end
,
1630 nptes
, dst
, incr
, upd_flags
,
1632 vm
->immediate
.fence_context
);
1633 amdgpu_vm_update_flags(params
, to_amdgpu_bo_vm(pt
),
1634 cursor
.level
, pe_start
, dst
,
1635 nptes
, incr
, upd_flags
);
1637 pe_start
+= nptes
* 8;
1638 dst
+= nptes
* incr
;
1640 frag_start
= upd_end
;
1641 if (frag_start
>= frag_end
) {
1642 /* figure out the next fragment */
1643 amdgpu_vm_fragment(params
, frag_start
, end
,
1644 flags
, &frag
, &frag_end
);
1648 } while (frag_start
< entry_end
);
1650 if (amdgpu_vm_pt_descendant(adev
, &cursor
)) {
1651 /* Free all child entries.
1652 * Update the tables with the flags and addresses and free up subsequent
1653 * tables in the case of huge pages or freed up areas.
1654 * This is the maximum you can free, because all other page tables are not
1655 * completely covered by the range and so potentially still in use.
1657 while (cursor
.pfn
< frag_start
) {
1658 /* Make sure previous mapping is freed */
1659 if (cursor
.entry
->bo
) {
1660 params
->table_freed
= true;
1661 amdgpu_vm_free_pts(adev
, params
->vm
, &cursor
);
1663 amdgpu_vm_pt_next(adev
, &cursor
);
1666 } else if (frag
>= shift
) {
1667 /* or just move on to the next on the same level. */
1668 amdgpu_vm_pt_next(adev
, &cursor
);
1676 * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table
1678 * @adev: amdgpu_device pointer of the VM
1679 * @bo_adev: amdgpu_device pointer of the mapped BO
1681 * @immediate: immediate submission in a page fault
1682 * @unlocked: unlocked invalidation during MM callback
1683 * @resv: fences we need to sync to
1684 * @start: start of mapped range
1685 * @last: last mapped entry
1686 * @flags: flags for the entries
1687 * @offset: offset into nodes and pages_addr
1688 * @res: ttm_resource to map
1689 * @pages_addr: DMA addresses to use for mapping
1690 * @fence: optional resulting fence
1691 * @table_freed: return true if page table is freed
1693 * Fill in the page table entries between @start and @last.
1696 * 0 for success, -EINVAL for failure.
1698 int amdgpu_vm_bo_update_mapping(struct amdgpu_device
*adev
,
1699 struct amdgpu_device
*bo_adev
,
1700 struct amdgpu_vm
*vm
, bool immediate
,
1701 bool unlocked
, struct dma_resv
*resv
,
1702 uint64_t start
, uint64_t last
,
1703 uint64_t flags
, uint64_t offset
,
1704 struct ttm_resource
*res
,
1705 dma_addr_t
*pages_addr
,
1706 struct dma_fence
**fence
,
1709 struct amdgpu_vm_update_params params
;
1710 struct amdgpu_res_cursor cursor
;
1711 enum amdgpu_sync_mode sync_mode
;
1714 if (!drm_dev_enter(&adev
->ddev
, &idx
))
1717 memset(¶ms
, 0, sizeof(params
));
1720 params
.immediate
= immediate
;
1721 params
.pages_addr
= pages_addr
;
1722 params
.unlocked
= unlocked
;
1724 /* Implicitly sync to command submissions in the same VM before
1725 * unmapping. Sync to moving fences before mapping.
1727 if (!(flags
& AMDGPU_PTE_VALID
))
1728 sync_mode
= AMDGPU_SYNC_EQ_OWNER
;
1730 sync_mode
= AMDGPU_SYNC_EXPLICIT
;
1732 amdgpu_vm_eviction_lock(vm
);
1738 if (!unlocked
&& !dma_fence_is_signaled(vm
->last_unlocked
)) {
1739 struct dma_fence
*tmp
= dma_fence_get_stub();
1741 amdgpu_bo_fence(vm
->root
.bo
, vm
->last_unlocked
, true);
1742 swap(vm
->last_unlocked
, tmp
);
1746 r
= vm
->update_funcs
->prepare(¶ms
, resv
, sync_mode
);
1750 amdgpu_res_first(pages_addr
? NULL
: res
, offset
,
1751 (last
- start
+ 1) * AMDGPU_GPU_PAGE_SIZE
, &cursor
);
1752 while (cursor
.remaining
) {
1753 uint64_t tmp
, num_entries
, addr
;
1755 num_entries
= cursor
.size
>> AMDGPU_GPU_PAGE_SHIFT
;
1757 bool contiguous
= true;
1759 if (num_entries
> AMDGPU_GPU_PAGES_IN_CPU_PAGE
) {
1760 uint64_t pfn
= cursor
.start
>> PAGE_SHIFT
;
1763 contiguous
= pages_addr
[pfn
+ 1] ==
1764 pages_addr
[pfn
] + PAGE_SIZE
;
1767 AMDGPU_GPU_PAGES_IN_CPU_PAGE
;
1768 for (count
= 2; count
< tmp
; ++count
) {
1769 uint64_t idx
= pfn
+ count
;
1771 if (contiguous
!= (pages_addr
[idx
] ==
1772 pages_addr
[idx
- 1] + PAGE_SIZE
))
1775 num_entries
= count
*
1776 AMDGPU_GPU_PAGES_IN_CPU_PAGE
;
1780 addr
= cursor
.start
;
1781 params
.pages_addr
= pages_addr
;
1783 addr
= pages_addr
[cursor
.start
>> PAGE_SHIFT
];
1784 params
.pages_addr
= NULL
;
1787 } else if (flags
& (AMDGPU_PTE_VALID
| AMDGPU_PTE_PRT
)) {
1788 addr
= bo_adev
->vm_manager
.vram_base_offset
+
1794 tmp
= start
+ num_entries
;
1795 r
= amdgpu_vm_update_ptes(¶ms
, start
, tmp
, addr
, flags
);
1799 amdgpu_res_next(&cursor
, num_entries
* AMDGPU_GPU_PAGE_SIZE
);
1803 r
= vm
->update_funcs
->commit(¶ms
, fence
);
1806 *table_freed
= *table_freed
|| params
.table_freed
;
1809 amdgpu_vm_eviction_unlock(vm
);
1814 void amdgpu_vm_get_memory(struct amdgpu_vm
*vm
, uint64_t *vram_mem
,
1815 uint64_t *gtt_mem
, uint64_t *cpu_mem
)
1817 struct amdgpu_bo_va
*bo_va
, *tmp
;
1819 list_for_each_entry_safe(bo_va
, tmp
, &vm
->idle
, base
.vm_status
) {
1820 if (!bo_va
->base
.bo
)
1822 amdgpu_bo_get_memory(bo_va
->base
.bo
, vram_mem
,
1825 list_for_each_entry_safe(bo_va
, tmp
, &vm
->evicted
, base
.vm_status
) {
1826 if (!bo_va
->base
.bo
)
1828 amdgpu_bo_get_memory(bo_va
->base
.bo
, vram_mem
,
1831 list_for_each_entry_safe(bo_va
, tmp
, &vm
->relocated
, base
.vm_status
) {
1832 if (!bo_va
->base
.bo
)
1834 amdgpu_bo_get_memory(bo_va
->base
.bo
, vram_mem
,
1837 list_for_each_entry_safe(bo_va
, tmp
, &vm
->moved
, base
.vm_status
) {
1838 if (!bo_va
->base
.bo
)
1840 amdgpu_bo_get_memory(bo_va
->base
.bo
, vram_mem
,
1843 spin_lock(&vm
->invalidated_lock
);
1844 list_for_each_entry_safe(bo_va
, tmp
, &vm
->invalidated
, base
.vm_status
) {
1845 if (!bo_va
->base
.bo
)
1847 amdgpu_bo_get_memory(bo_va
->base
.bo
, vram_mem
,
1850 list_for_each_entry_safe(bo_va
, tmp
, &vm
->done
, base
.vm_status
) {
1851 if (!bo_va
->base
.bo
)
1853 amdgpu_bo_get_memory(bo_va
->base
.bo
, vram_mem
,
1856 spin_unlock(&vm
->invalidated_lock
);
1859 * amdgpu_vm_bo_update - update all BO mappings in the vm page table
1861 * @adev: amdgpu_device pointer
1862 * @bo_va: requested BO and VM object
1863 * @clear: if true clear the entries
1864 * @table_freed: return true if page table is freed
1866 * Fill in the page table entries for @bo_va.
1869 * 0 for success, -EINVAL for failure.
1871 int amdgpu_vm_bo_update(struct amdgpu_device
*adev
, struct amdgpu_bo_va
*bo_va
,
1872 bool clear
, bool *table_freed
)
1874 struct amdgpu_bo
*bo
= bo_va
->base
.bo
;
1875 struct amdgpu_vm
*vm
= bo_va
->base
.vm
;
1876 struct amdgpu_bo_va_mapping
*mapping
;
1877 dma_addr_t
*pages_addr
= NULL
;
1878 struct ttm_resource
*mem
;
1879 struct dma_fence
**last_update
;
1880 struct dma_resv
*resv
;
1882 struct amdgpu_device
*bo_adev
= adev
;
1887 resv
= vm
->root
.bo
->tbo
.base
.resv
;
1889 struct drm_gem_object
*obj
= &bo
->tbo
.base
;
1891 resv
= bo
->tbo
.base
.resv
;
1892 if (obj
->import_attach
&& bo_va
->is_xgmi
) {
1893 struct dma_buf
*dma_buf
= obj
->import_attach
->dmabuf
;
1894 struct drm_gem_object
*gobj
= dma_buf
->priv
;
1895 struct amdgpu_bo
*abo
= gem_to_amdgpu_bo(gobj
);
1897 if (abo
->tbo
.resource
->mem_type
== TTM_PL_VRAM
)
1898 bo
= gem_to_amdgpu_bo(gobj
);
1900 mem
= bo
->tbo
.resource
;
1901 if (mem
->mem_type
== TTM_PL_TT
||
1902 mem
->mem_type
== AMDGPU_PL_PREEMPT
)
1903 pages_addr
= bo
->tbo
.ttm
->dma_address
;
1907 flags
= amdgpu_ttm_tt_pte_flags(adev
, bo
->tbo
.ttm
, mem
);
1909 if (amdgpu_bo_encrypted(bo
))
1910 flags
|= AMDGPU_PTE_TMZ
;
1912 bo_adev
= amdgpu_ttm_adev(bo
->tbo
.bdev
);
1917 if (clear
|| (bo
&& bo
->tbo
.base
.resv
==
1918 vm
->root
.bo
->tbo
.base
.resv
))
1919 last_update
= &vm
->last_update
;
1921 last_update
= &bo_va
->last_pt_update
;
1923 if (!clear
&& bo_va
->base
.moved
) {
1924 bo_va
->base
.moved
= false;
1925 list_splice_init(&bo_va
->valids
, &bo_va
->invalids
);
1927 } else if (bo_va
->cleared
!= clear
) {
1928 list_splice_init(&bo_va
->valids
, &bo_va
->invalids
);
1931 list_for_each_entry(mapping
, &bo_va
->invalids
, list
) {
1932 uint64_t update_flags
= flags
;
1934 /* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
1935 * but in case of something, we filter the flags in first place
1937 if (!(mapping
->flags
& AMDGPU_PTE_READABLE
))
1938 update_flags
&= ~AMDGPU_PTE_READABLE
;
1939 if (!(mapping
->flags
& AMDGPU_PTE_WRITEABLE
))
1940 update_flags
&= ~AMDGPU_PTE_WRITEABLE
;
1942 /* Apply ASIC specific mapping flags */
1943 amdgpu_gmc_get_vm_pte(adev
, mapping
, &update_flags
);
1945 trace_amdgpu_vm_bo_update(mapping
);
1947 r
= amdgpu_vm_bo_update_mapping(adev
, bo_adev
, vm
, false, false,
1948 resv
, mapping
->start
,
1949 mapping
->last
, update_flags
,
1950 mapping
->offset
, mem
,
1951 pages_addr
, last_update
, table_freed
);
1956 /* If the BO is not in its preferred location add it back to
1957 * the evicted list so that it gets validated again on the
1958 * next command submission.
1960 if (bo
&& bo
->tbo
.base
.resv
== vm
->root
.bo
->tbo
.base
.resv
) {
1961 uint32_t mem_type
= bo
->tbo
.resource
->mem_type
;
1963 if (!(bo
->preferred_domains
&
1964 amdgpu_mem_type_to_domain(mem_type
)))
1965 amdgpu_vm_bo_evicted(&bo_va
->base
);
1967 amdgpu_vm_bo_idle(&bo_va
->base
);
1969 amdgpu_vm_bo_done(&bo_va
->base
);
1972 list_splice_init(&bo_va
->invalids
, &bo_va
->valids
);
1973 bo_va
->cleared
= clear
;
1975 if (trace_amdgpu_vm_bo_mapping_enabled()) {
1976 list_for_each_entry(mapping
, &bo_va
->valids
, list
)
1977 trace_amdgpu_vm_bo_mapping(mapping
);
1984 * amdgpu_vm_update_prt_state - update the global PRT state
1986 * @adev: amdgpu_device pointer
1988 static void amdgpu_vm_update_prt_state(struct amdgpu_device
*adev
)
1990 unsigned long flags
;
1993 spin_lock_irqsave(&adev
->vm_manager
.prt_lock
, flags
);
1994 enable
= !!atomic_read(&adev
->vm_manager
.num_prt_users
);
1995 adev
->gmc
.gmc_funcs
->set_prt(adev
, enable
);
1996 spin_unlock_irqrestore(&adev
->vm_manager
.prt_lock
, flags
);
2000 * amdgpu_vm_prt_get - add a PRT user
2002 * @adev: amdgpu_device pointer
2004 static void amdgpu_vm_prt_get(struct amdgpu_device
*adev
)
2006 if (!adev
->gmc
.gmc_funcs
->set_prt
)
2009 if (atomic_inc_return(&adev
->vm_manager
.num_prt_users
) == 1)
2010 amdgpu_vm_update_prt_state(adev
);
2014 * amdgpu_vm_prt_put - drop a PRT user
2016 * @adev: amdgpu_device pointer
2018 static void amdgpu_vm_prt_put(struct amdgpu_device
*adev
)
2020 if (atomic_dec_return(&adev
->vm_manager
.num_prt_users
) == 0)
2021 amdgpu_vm_update_prt_state(adev
);
2025 * amdgpu_vm_prt_cb - callback for updating the PRT status
2027 * @fence: fence for the callback
2028 * @_cb: the callback function
2030 static void amdgpu_vm_prt_cb(struct dma_fence
*fence
, struct dma_fence_cb
*_cb
)
2032 struct amdgpu_prt_cb
*cb
= container_of(_cb
, struct amdgpu_prt_cb
, cb
);
2034 amdgpu_vm_prt_put(cb
->adev
);
2039 * amdgpu_vm_add_prt_cb - add callback for updating the PRT status
2041 * @adev: amdgpu_device pointer
2042 * @fence: fence for the callback
2044 static void amdgpu_vm_add_prt_cb(struct amdgpu_device
*adev
,
2045 struct dma_fence
*fence
)
2047 struct amdgpu_prt_cb
*cb
;
2049 if (!adev
->gmc
.gmc_funcs
->set_prt
)
2052 cb
= kmalloc(sizeof(struct amdgpu_prt_cb
), GFP_KERNEL
);
2054 /* Last resort when we are OOM */
2056 dma_fence_wait(fence
, false);
2058 amdgpu_vm_prt_put(adev
);
2061 if (!fence
|| dma_fence_add_callback(fence
, &cb
->cb
,
2063 amdgpu_vm_prt_cb(fence
, &cb
->cb
);
2068 * amdgpu_vm_free_mapping - free a mapping
2070 * @adev: amdgpu_device pointer
2072 * @mapping: mapping to be freed
2073 * @fence: fence of the unmap operation
2075 * Free a mapping and make sure we decrease the PRT usage count if applicable.
2077 static void amdgpu_vm_free_mapping(struct amdgpu_device
*adev
,
2078 struct amdgpu_vm
*vm
,
2079 struct amdgpu_bo_va_mapping
*mapping
,
2080 struct dma_fence
*fence
)
2082 if (mapping
->flags
& AMDGPU_PTE_PRT
)
2083 amdgpu_vm_add_prt_cb(adev
, fence
);
2088 * amdgpu_vm_prt_fini - finish all prt mappings
2090 * @adev: amdgpu_device pointer
2093 * Register a cleanup callback to disable PRT support after VM dies.
2095 static void amdgpu_vm_prt_fini(struct amdgpu_device
*adev
, struct amdgpu_vm
*vm
)
2097 struct dma_resv
*resv
= vm
->root
.bo
->tbo
.base
.resv
;
2098 struct dma_fence
*excl
, **shared
;
2099 unsigned i
, shared_count
;
2102 r
= dma_resv_get_fences(resv
, &excl
, &shared_count
, &shared
);
2104 /* Not enough memory to grab the fence list, as last resort
2105 * block for all the fences to complete.
2107 dma_resv_wait_timeout(resv
, true, false,
2108 MAX_SCHEDULE_TIMEOUT
);
2112 /* Add a callback for each fence in the reservation object */
2113 amdgpu_vm_prt_get(adev
);
2114 amdgpu_vm_add_prt_cb(adev
, excl
);
2116 for (i
= 0; i
< shared_count
; ++i
) {
2117 amdgpu_vm_prt_get(adev
);
2118 amdgpu_vm_add_prt_cb(adev
, shared
[i
]);
2125 * amdgpu_vm_clear_freed - clear freed BOs in the PT
2127 * @adev: amdgpu_device pointer
2129 * @fence: optional resulting fence (unchanged if no work needed to be done
2130 * or if an error occurred)
2132 * Make sure all freed BOs are cleared in the PT.
2133 * PTs have to be reserved and mutex must be locked!
2139 int amdgpu_vm_clear_freed(struct amdgpu_device
*adev
,
2140 struct amdgpu_vm
*vm
,
2141 struct dma_fence
**fence
)
2143 struct dma_resv
*resv
= vm
->root
.bo
->tbo
.base
.resv
;
2144 struct amdgpu_bo_va_mapping
*mapping
;
2145 uint64_t init_pte_value
= 0;
2146 struct dma_fence
*f
= NULL
;
2149 while (!list_empty(&vm
->freed
)) {
2150 mapping
= list_first_entry(&vm
->freed
,
2151 struct amdgpu_bo_va_mapping
, list
);
2152 list_del(&mapping
->list
);
2154 if (vm
->pte_support_ats
&&
2155 mapping
->start
< AMDGPU_GMC_HOLE_START
)
2156 init_pte_value
= AMDGPU_PTE_DEFAULT_ATC
;
2158 r
= amdgpu_vm_bo_update_mapping(adev
, adev
, vm
, false, false,
2159 resv
, mapping
->start
,
2160 mapping
->last
, init_pte_value
,
2161 0, NULL
, NULL
, &f
, NULL
);
2162 amdgpu_vm_free_mapping(adev
, vm
, mapping
, f
);
2170 dma_fence_put(*fence
);
2181 * amdgpu_vm_handle_moved - handle moved BOs in the PT
2183 * @adev: amdgpu_device pointer
2186 * Make sure all BOs which are moved are updated in the PTs.
2191 * PTs have to be reserved!
2193 int amdgpu_vm_handle_moved(struct amdgpu_device
*adev
,
2194 struct amdgpu_vm
*vm
)
2196 struct amdgpu_bo_va
*bo_va
, *tmp
;
2197 struct dma_resv
*resv
;
2201 list_for_each_entry_safe(bo_va
, tmp
, &vm
->moved
, base
.vm_status
) {
2202 /* Per VM BOs never need to bo cleared in the page tables */
2203 r
= amdgpu_vm_bo_update(adev
, bo_va
, false, NULL
);
2208 spin_lock(&vm
->invalidated_lock
);
2209 while (!list_empty(&vm
->invalidated
)) {
2210 bo_va
= list_first_entry(&vm
->invalidated
, struct amdgpu_bo_va
,
2212 resv
= bo_va
->base
.bo
->tbo
.base
.resv
;
2213 spin_unlock(&vm
->invalidated_lock
);
2215 /* Try to reserve the BO to avoid clearing its ptes */
2216 if (!amdgpu_vm_debug
&& dma_resv_trylock(resv
))
2218 /* Somebody else is using the BO right now */
2222 r
= amdgpu_vm_bo_update(adev
, bo_va
, clear
, NULL
);
2227 dma_resv_unlock(resv
);
2228 spin_lock(&vm
->invalidated_lock
);
2230 spin_unlock(&vm
->invalidated_lock
);
2236 * amdgpu_vm_bo_add - add a bo to a specific vm
2238 * @adev: amdgpu_device pointer
2240 * @bo: amdgpu buffer object
2242 * Add @bo into the requested vm.
2243 * Add @bo to the list of bos associated with the vm
2246 * Newly added bo_va or NULL for failure
2248 * Object has to be reserved!
2250 struct amdgpu_bo_va
*amdgpu_vm_bo_add(struct amdgpu_device
*adev
,
2251 struct amdgpu_vm
*vm
,
2252 struct amdgpu_bo
*bo
)
2254 struct amdgpu_bo_va
*bo_va
;
2256 bo_va
= kzalloc(sizeof(struct amdgpu_bo_va
), GFP_KERNEL
);
2257 if (bo_va
== NULL
) {
2260 amdgpu_vm_bo_base_init(&bo_va
->base
, vm
, bo
);
2262 bo_va
->ref_count
= 1;
2263 INIT_LIST_HEAD(&bo_va
->valids
);
2264 INIT_LIST_HEAD(&bo_va
->invalids
);
2269 if (amdgpu_dmabuf_is_xgmi_accessible(adev
, bo
)) {
2270 bo_va
->is_xgmi
= true;
2271 /* Power up XGMI if it can be potentially used */
2272 amdgpu_xgmi_set_pstate(adev
, AMDGPU_XGMI_PSTATE_MAX_VEGA20
);
2280 * amdgpu_vm_bo_insert_map - insert a new mapping
2282 * @adev: amdgpu_device pointer
2283 * @bo_va: bo_va to store the address
2284 * @mapping: the mapping to insert
2286 * Insert a new mapping into all structures.
2288 static void amdgpu_vm_bo_insert_map(struct amdgpu_device
*adev
,
2289 struct amdgpu_bo_va
*bo_va
,
2290 struct amdgpu_bo_va_mapping
*mapping
)
2292 struct amdgpu_vm
*vm
= bo_va
->base
.vm
;
2293 struct amdgpu_bo
*bo
= bo_va
->base
.bo
;
2295 mapping
->bo_va
= bo_va
;
2296 list_add(&mapping
->list
, &bo_va
->invalids
);
2297 amdgpu_vm_it_insert(mapping
, &vm
->va
);
2299 if (mapping
->flags
& AMDGPU_PTE_PRT
)
2300 amdgpu_vm_prt_get(adev
);
2302 if (bo
&& bo
->tbo
.base
.resv
== vm
->root
.bo
->tbo
.base
.resv
&&
2303 !bo_va
->base
.moved
) {
2304 list_move(&bo_va
->base
.vm_status
, &vm
->moved
);
2306 trace_amdgpu_vm_bo_map(bo_va
, mapping
);
2310 * amdgpu_vm_bo_map - map bo inside a vm
2312 * @adev: amdgpu_device pointer
2313 * @bo_va: bo_va to store the address
2314 * @saddr: where to map the BO
2315 * @offset: requested offset in the BO
2316 * @size: BO size in bytes
2317 * @flags: attributes of pages (read/write/valid/etc.)
2319 * Add a mapping of the BO at the specefied addr into the VM.
2322 * 0 for success, error for failure.
2324 * Object has to be reserved and unreserved outside!
2326 int amdgpu_vm_bo_map(struct amdgpu_device
*adev
,
2327 struct amdgpu_bo_va
*bo_va
,
2328 uint64_t saddr
, uint64_t offset
,
2329 uint64_t size
, uint64_t flags
)
2331 struct amdgpu_bo_va_mapping
*mapping
, *tmp
;
2332 struct amdgpu_bo
*bo
= bo_va
->base
.bo
;
2333 struct amdgpu_vm
*vm
= bo_va
->base
.vm
;
2336 /* validate the parameters */
2337 if (saddr
& ~PAGE_MASK
|| offset
& ~PAGE_MASK
||
2338 size
== 0 || size
& ~PAGE_MASK
)
2341 /* make sure object fit at this offset */
2342 eaddr
= saddr
+ size
- 1;
2343 if (saddr
>= eaddr
||
2344 (bo
&& offset
+ size
> amdgpu_bo_size(bo
)) ||
2345 (eaddr
>= adev
->vm_manager
.max_pfn
<< AMDGPU_GPU_PAGE_SHIFT
))
2348 saddr
/= AMDGPU_GPU_PAGE_SIZE
;
2349 eaddr
/= AMDGPU_GPU_PAGE_SIZE
;
2351 tmp
= amdgpu_vm_it_iter_first(&vm
->va
, saddr
, eaddr
);
2353 /* bo and tmp overlap, invalid addr */
2354 dev_err(adev
->dev
, "bo %p va 0x%010Lx-0x%010Lx conflict with "
2355 "0x%010Lx-0x%010Lx\n", bo
, saddr
, eaddr
,
2356 tmp
->start
, tmp
->last
+ 1);
2360 mapping
= kmalloc(sizeof(*mapping
), GFP_KERNEL
);
2364 mapping
->start
= saddr
;
2365 mapping
->last
= eaddr
;
2366 mapping
->offset
= offset
;
2367 mapping
->flags
= flags
;
2369 amdgpu_vm_bo_insert_map(adev
, bo_va
, mapping
);
2375 * amdgpu_vm_bo_replace_map - map bo inside a vm, replacing existing mappings
2377 * @adev: amdgpu_device pointer
2378 * @bo_va: bo_va to store the address
2379 * @saddr: where to map the BO
2380 * @offset: requested offset in the BO
2381 * @size: BO size in bytes
2382 * @flags: attributes of pages (read/write/valid/etc.)
2384 * Add a mapping of the BO at the specefied addr into the VM. Replace existing
2385 * mappings as we do so.
2388 * 0 for success, error for failure.
2390 * Object has to be reserved and unreserved outside!
2392 int amdgpu_vm_bo_replace_map(struct amdgpu_device
*adev
,
2393 struct amdgpu_bo_va
*bo_va
,
2394 uint64_t saddr
, uint64_t offset
,
2395 uint64_t size
, uint64_t flags
)
2397 struct amdgpu_bo_va_mapping
*mapping
;
2398 struct amdgpu_bo
*bo
= bo_va
->base
.bo
;
2402 /* validate the parameters */
2403 if (saddr
& ~PAGE_MASK
|| offset
& ~PAGE_MASK
||
2404 size
== 0 || size
& ~PAGE_MASK
)
2407 /* make sure object fit at this offset */
2408 eaddr
= saddr
+ size
- 1;
2409 if (saddr
>= eaddr
||
2410 (bo
&& offset
+ size
> amdgpu_bo_size(bo
)) ||
2411 (eaddr
>= adev
->vm_manager
.max_pfn
<< AMDGPU_GPU_PAGE_SHIFT
))
2414 /* Allocate all the needed memory */
2415 mapping
= kmalloc(sizeof(*mapping
), GFP_KERNEL
);
2419 r
= amdgpu_vm_bo_clear_mappings(adev
, bo_va
->base
.vm
, saddr
, size
);
2425 saddr
/= AMDGPU_GPU_PAGE_SIZE
;
2426 eaddr
/= AMDGPU_GPU_PAGE_SIZE
;
2428 mapping
->start
= saddr
;
2429 mapping
->last
= eaddr
;
2430 mapping
->offset
= offset
;
2431 mapping
->flags
= flags
;
2433 amdgpu_vm_bo_insert_map(adev
, bo_va
, mapping
);
2439 * amdgpu_vm_bo_unmap - remove bo mapping from vm
2441 * @adev: amdgpu_device pointer
2442 * @bo_va: bo_va to remove the address from
2443 * @saddr: where to the BO is mapped
2445 * Remove a mapping of the BO at the specefied addr from the VM.
2448 * 0 for success, error for failure.
2450 * Object has to be reserved and unreserved outside!
2452 int amdgpu_vm_bo_unmap(struct amdgpu_device
*adev
,
2453 struct amdgpu_bo_va
*bo_va
,
2456 struct amdgpu_bo_va_mapping
*mapping
;
2457 struct amdgpu_vm
*vm
= bo_va
->base
.vm
;
2460 saddr
/= AMDGPU_GPU_PAGE_SIZE
;
2462 list_for_each_entry(mapping
, &bo_va
->valids
, list
) {
2463 if (mapping
->start
== saddr
)
2467 if (&mapping
->list
== &bo_va
->valids
) {
2470 list_for_each_entry(mapping
, &bo_va
->invalids
, list
) {
2471 if (mapping
->start
== saddr
)
2475 if (&mapping
->list
== &bo_va
->invalids
)
2479 list_del(&mapping
->list
);
2480 amdgpu_vm_it_remove(mapping
, &vm
->va
);
2481 mapping
->bo_va
= NULL
;
2482 trace_amdgpu_vm_bo_unmap(bo_va
, mapping
);
2485 list_add(&mapping
->list
, &vm
->freed
);
2487 amdgpu_vm_free_mapping(adev
, vm
, mapping
,
2488 bo_va
->last_pt_update
);
2494 * amdgpu_vm_bo_clear_mappings - remove all mappings in a specific range
2496 * @adev: amdgpu_device pointer
2497 * @vm: VM structure to use
2498 * @saddr: start of the range
2499 * @size: size of the range
2501 * Remove all mappings in a range, split them as appropriate.
2504 * 0 for success, error for failure.
2506 int amdgpu_vm_bo_clear_mappings(struct amdgpu_device
*adev
,
2507 struct amdgpu_vm
*vm
,
2508 uint64_t saddr
, uint64_t size
)
2510 struct amdgpu_bo_va_mapping
*before
, *after
, *tmp
, *next
;
2514 eaddr
= saddr
+ size
- 1;
2515 saddr
/= AMDGPU_GPU_PAGE_SIZE
;
2516 eaddr
/= AMDGPU_GPU_PAGE_SIZE
;
2518 /* Allocate all the needed memory */
2519 before
= kzalloc(sizeof(*before
), GFP_KERNEL
);
2522 INIT_LIST_HEAD(&before
->list
);
2524 after
= kzalloc(sizeof(*after
), GFP_KERNEL
);
2529 INIT_LIST_HEAD(&after
->list
);
2531 /* Now gather all removed mappings */
2532 tmp
= amdgpu_vm_it_iter_first(&vm
->va
, saddr
, eaddr
);
2534 /* Remember mapping split at the start */
2535 if (tmp
->start
< saddr
) {
2536 before
->start
= tmp
->start
;
2537 before
->last
= saddr
- 1;
2538 before
->offset
= tmp
->offset
;
2539 before
->flags
= tmp
->flags
;
2540 before
->bo_va
= tmp
->bo_va
;
2541 list_add(&before
->list
, &tmp
->bo_va
->invalids
);
2544 /* Remember mapping split at the end */
2545 if (tmp
->last
> eaddr
) {
2546 after
->start
= eaddr
+ 1;
2547 after
->last
= tmp
->last
;
2548 after
->offset
= tmp
->offset
;
2549 after
->offset
+= (after
->start
- tmp
->start
) << PAGE_SHIFT
;
2550 after
->flags
= tmp
->flags
;
2551 after
->bo_va
= tmp
->bo_va
;
2552 list_add(&after
->list
, &tmp
->bo_va
->invalids
);
2555 list_del(&tmp
->list
);
2556 list_add(&tmp
->list
, &removed
);
2558 tmp
= amdgpu_vm_it_iter_next(tmp
, saddr
, eaddr
);
2561 /* And free them up */
2562 list_for_each_entry_safe(tmp
, next
, &removed
, list
) {
2563 amdgpu_vm_it_remove(tmp
, &vm
->va
);
2564 list_del(&tmp
->list
);
2566 if (tmp
->start
< saddr
)
2568 if (tmp
->last
> eaddr
)
2572 list_add(&tmp
->list
, &vm
->freed
);
2573 trace_amdgpu_vm_bo_unmap(NULL
, tmp
);
2576 /* Insert partial mapping before the range */
2577 if (!list_empty(&before
->list
)) {
2578 amdgpu_vm_it_insert(before
, &vm
->va
);
2579 if (before
->flags
& AMDGPU_PTE_PRT
)
2580 amdgpu_vm_prt_get(adev
);
2585 /* Insert partial mapping after the range */
2586 if (!list_empty(&after
->list
)) {
2587 amdgpu_vm_it_insert(after
, &vm
->va
);
2588 if (after
->flags
& AMDGPU_PTE_PRT
)
2589 amdgpu_vm_prt_get(adev
);
2598 * amdgpu_vm_bo_lookup_mapping - find mapping by address
2600 * @vm: the requested VM
2601 * @addr: the address
2603 * Find a mapping by it's address.
2606 * The amdgpu_bo_va_mapping matching for addr or NULL
2609 struct amdgpu_bo_va_mapping
*amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm
*vm
,
2612 return amdgpu_vm_it_iter_first(&vm
->va
, addr
, addr
);
2616 * amdgpu_vm_bo_trace_cs - trace all reserved mappings
2618 * @vm: the requested vm
2619 * @ticket: CS ticket
2621 * Trace all mappings of BOs reserved during a command submission.
2623 void amdgpu_vm_bo_trace_cs(struct amdgpu_vm
*vm
, struct ww_acquire_ctx
*ticket
)
2625 struct amdgpu_bo_va_mapping
*mapping
;
2627 if (!trace_amdgpu_vm_bo_cs_enabled())
2630 for (mapping
= amdgpu_vm_it_iter_first(&vm
->va
, 0, U64_MAX
); mapping
;
2631 mapping
= amdgpu_vm_it_iter_next(mapping
, 0, U64_MAX
)) {
2632 if (mapping
->bo_va
&& mapping
->bo_va
->base
.bo
) {
2633 struct amdgpu_bo
*bo
;
2635 bo
= mapping
->bo_va
->base
.bo
;
2636 if (dma_resv_locking_ctx(bo
->tbo
.base
.resv
) !=
2641 trace_amdgpu_vm_bo_cs(mapping
);
2646 * amdgpu_vm_bo_rmv - remove a bo to a specific vm
2648 * @adev: amdgpu_device pointer
2649 * @bo_va: requested bo_va
2651 * Remove @bo_va->bo from the requested vm.
2653 * Object have to be reserved!
2655 void amdgpu_vm_bo_rmv(struct amdgpu_device
*adev
,
2656 struct amdgpu_bo_va
*bo_va
)
2658 struct amdgpu_bo_va_mapping
*mapping
, *next
;
2659 struct amdgpu_bo
*bo
= bo_va
->base
.bo
;
2660 struct amdgpu_vm
*vm
= bo_va
->base
.vm
;
2661 struct amdgpu_vm_bo_base
**base
;
2664 if (bo
->tbo
.base
.resv
== vm
->root
.bo
->tbo
.base
.resv
)
2665 vm
->bulk_moveable
= false;
2667 for (base
= &bo_va
->base
.bo
->vm_bo
; *base
;
2668 base
= &(*base
)->next
) {
2669 if (*base
!= &bo_va
->base
)
2672 *base
= bo_va
->base
.next
;
2677 spin_lock(&vm
->invalidated_lock
);
2678 list_del(&bo_va
->base
.vm_status
);
2679 spin_unlock(&vm
->invalidated_lock
);
2681 list_for_each_entry_safe(mapping
, next
, &bo_va
->valids
, list
) {
2682 list_del(&mapping
->list
);
2683 amdgpu_vm_it_remove(mapping
, &vm
->va
);
2684 mapping
->bo_va
= NULL
;
2685 trace_amdgpu_vm_bo_unmap(bo_va
, mapping
);
2686 list_add(&mapping
->list
, &vm
->freed
);
2688 list_for_each_entry_safe(mapping
, next
, &bo_va
->invalids
, list
) {
2689 list_del(&mapping
->list
);
2690 amdgpu_vm_it_remove(mapping
, &vm
->va
);
2691 amdgpu_vm_free_mapping(adev
, vm
, mapping
,
2692 bo_va
->last_pt_update
);
2695 dma_fence_put(bo_va
->last_pt_update
);
2697 if (bo
&& bo_va
->is_xgmi
)
2698 amdgpu_xgmi_set_pstate(adev
, AMDGPU_XGMI_PSTATE_MIN
);
2704 * amdgpu_vm_evictable - check if we can evict a VM
2706 * @bo: A page table of the VM.
2708 * Check if it is possible to evict a VM.
2710 bool amdgpu_vm_evictable(struct amdgpu_bo
*bo
)
2712 struct amdgpu_vm_bo_base
*bo_base
= bo
->vm_bo
;
2714 /* Page tables of a destroyed VM can go away immediately */
2715 if (!bo_base
|| !bo_base
->vm
)
2718 /* Don't evict VM page tables while they are busy */
2719 if (!dma_resv_test_signaled(bo
->tbo
.base
.resv
, true))
2722 /* Try to block ongoing updates */
2723 if (!amdgpu_vm_eviction_trylock(bo_base
->vm
))
2726 /* Don't evict VM page tables while they are updated */
2727 if (!dma_fence_is_signaled(bo_base
->vm
->last_unlocked
)) {
2728 amdgpu_vm_eviction_unlock(bo_base
->vm
);
2732 bo_base
->vm
->evicting
= true;
2733 amdgpu_vm_eviction_unlock(bo_base
->vm
);
2738 * amdgpu_vm_bo_invalidate - mark the bo as invalid
2740 * @adev: amdgpu_device pointer
2741 * @bo: amdgpu buffer object
2742 * @evicted: is the BO evicted
2744 * Mark @bo as invalid.
2746 void amdgpu_vm_bo_invalidate(struct amdgpu_device
*adev
,
2747 struct amdgpu_bo
*bo
, bool evicted
)
2749 struct amdgpu_vm_bo_base
*bo_base
;
2751 /* shadow bo doesn't have bo base, its validation needs its parent */
2752 if (bo
->parent
&& (amdgpu_bo_shadowed(bo
->parent
) == bo
))
2755 for (bo_base
= bo
->vm_bo
; bo_base
; bo_base
= bo_base
->next
) {
2756 struct amdgpu_vm
*vm
= bo_base
->vm
;
2758 if (evicted
&& bo
->tbo
.base
.resv
== vm
->root
.bo
->tbo
.base
.resv
) {
2759 amdgpu_vm_bo_evicted(bo_base
);
2765 bo_base
->moved
= true;
2767 if (bo
->tbo
.type
== ttm_bo_type_kernel
)
2768 amdgpu_vm_bo_relocated(bo_base
);
2769 else if (bo
->tbo
.base
.resv
== vm
->root
.bo
->tbo
.base
.resv
)
2770 amdgpu_vm_bo_moved(bo_base
);
2772 amdgpu_vm_bo_invalidated(bo_base
);
2777 * amdgpu_vm_get_block_size - calculate VM page table size as power of two
2782 * VM page table as power of two
2784 static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size
)
2786 /* Total bits covered by PD + PTs */
2787 unsigned bits
= ilog2(vm_size
) + 18;
2789 /* Make sure the PD is 4K in size up to 8GB address space.
2790 Above that split equal between PD and PTs */
2794 return ((bits
+ 3) / 2);
2798 * amdgpu_vm_adjust_size - adjust vm size, block size and fragment size
2800 * @adev: amdgpu_device pointer
2801 * @min_vm_size: the minimum vm size in GB if it's set auto
2802 * @fragment_size_default: Default PTE fragment size
2803 * @max_level: max VMPT level
2804 * @max_bits: max address space size in bits
2807 void amdgpu_vm_adjust_size(struct amdgpu_device
*adev
, uint32_t min_vm_size
,
2808 uint32_t fragment_size_default
, unsigned max_level
,
2811 unsigned int max_size
= 1 << (max_bits
- 30);
2812 unsigned int vm_size
;
2815 /* adjust vm size first */
2816 if (amdgpu_vm_size
!= -1) {
2817 vm_size
= amdgpu_vm_size
;
2818 if (vm_size
> max_size
) {
2819 dev_warn(adev
->dev
, "VM size (%d) too large, max is %u GB\n",
2820 amdgpu_vm_size
, max_size
);
2825 unsigned int phys_ram_gb
;
2827 /* Optimal VM size depends on the amount of physical
2828 * RAM available. Underlying requirements and
2831 * - Need to map system memory and VRAM from all GPUs
2832 * - VRAM from other GPUs not known here
2833 * - Assume VRAM <= system memory
2834 * - On GFX8 and older, VM space can be segmented for
2836 * - Need to allow room for fragmentation, guard pages etc.
2838 * This adds up to a rough guess of system memory x3.
2839 * Round up to power of two to maximize the available
2840 * VM size with the given page table size.
2843 phys_ram_gb
= ((uint64_t)si
.totalram
* si
.mem_unit
+
2844 (1 << 30) - 1) >> 30;
2845 vm_size
= roundup_pow_of_two(
2846 min(max(phys_ram_gb
* 3, min_vm_size
), max_size
));
2849 adev
->vm_manager
.max_pfn
= (uint64_t)vm_size
<< 18;
2851 tmp
= roundup_pow_of_two(adev
->vm_manager
.max_pfn
);
2852 if (amdgpu_vm_block_size
!= -1)
2853 tmp
>>= amdgpu_vm_block_size
- 9;
2854 tmp
= DIV_ROUND_UP(fls64(tmp
) - 1, 9) - 1;
2855 adev
->vm_manager
.num_level
= min(max_level
, (unsigned)tmp
);
2856 switch (adev
->vm_manager
.num_level
) {
2858 adev
->vm_manager
.root_level
= AMDGPU_VM_PDB2
;
2861 adev
->vm_manager
.root_level
= AMDGPU_VM_PDB1
;
2864 adev
->vm_manager
.root_level
= AMDGPU_VM_PDB0
;
2867 dev_err(adev
->dev
, "VMPT only supports 2~4+1 levels\n");
2869 /* block size depends on vm size and hw setup*/
2870 if (amdgpu_vm_block_size
!= -1)
2871 adev
->vm_manager
.block_size
=
2872 min((unsigned)amdgpu_vm_block_size
, max_bits
2873 - AMDGPU_GPU_PAGE_SHIFT
2874 - 9 * adev
->vm_manager
.num_level
);
2875 else if (adev
->vm_manager
.num_level
> 1)
2876 adev
->vm_manager
.block_size
= 9;
2878 adev
->vm_manager
.block_size
= amdgpu_vm_get_block_size(tmp
);
2880 if (amdgpu_vm_fragment_size
== -1)
2881 adev
->vm_manager
.fragment_size
= fragment_size_default
;
2883 adev
->vm_manager
.fragment_size
= amdgpu_vm_fragment_size
;
2885 DRM_INFO("vm size is %u GB, %u levels, block size is %u-bit, fragment size is %u-bit\n",
2886 vm_size
, adev
->vm_manager
.num_level
+ 1,
2887 adev
->vm_manager
.block_size
,
2888 adev
->vm_manager
.fragment_size
);
2892 * amdgpu_vm_wait_idle - wait for the VM to become idle
2894 * @vm: VM object to wait for
2895 * @timeout: timeout to wait for VM to become idle
2897 long amdgpu_vm_wait_idle(struct amdgpu_vm
*vm
, long timeout
)
2899 timeout
= dma_resv_wait_timeout(vm
->root
.bo
->tbo
.base
.resv
, true,
2904 return dma_fence_wait_timeout(vm
->last_unlocked
, true, timeout
);
2908 * amdgpu_vm_init - initialize a vm instance
2910 * @adev: amdgpu_device pointer
2916 * 0 for success, error for failure.
2918 int amdgpu_vm_init(struct amdgpu_device
*adev
, struct amdgpu_vm
*vm
)
2920 struct amdgpu_bo
*root_bo
;
2921 struct amdgpu_bo_vm
*root
;
2924 vm
->va
= RB_ROOT_CACHED
;
2925 for (i
= 0; i
< AMDGPU_MAX_VMHUBS
; i
++)
2926 vm
->reserved_vmid
[i
] = NULL
;
2927 INIT_LIST_HEAD(&vm
->evicted
);
2928 INIT_LIST_HEAD(&vm
->relocated
);
2929 INIT_LIST_HEAD(&vm
->moved
);
2930 INIT_LIST_HEAD(&vm
->idle
);
2931 INIT_LIST_HEAD(&vm
->invalidated
);
2932 spin_lock_init(&vm
->invalidated_lock
);
2933 INIT_LIST_HEAD(&vm
->freed
);
2934 INIT_LIST_HEAD(&vm
->done
);
2936 /* create scheduler entities for page table updates */
2937 r
= drm_sched_entity_init(&vm
->immediate
, DRM_SCHED_PRIORITY_NORMAL
,
2938 adev
->vm_manager
.vm_pte_scheds
,
2939 adev
->vm_manager
.vm_pte_num_scheds
, NULL
);
2943 r
= drm_sched_entity_init(&vm
->delayed
, DRM_SCHED_PRIORITY_NORMAL
,
2944 adev
->vm_manager
.vm_pte_scheds
,
2945 adev
->vm_manager
.vm_pte_num_scheds
, NULL
);
2947 goto error_free_immediate
;
2949 vm
->pte_support_ats
= false;
2950 vm
->is_compute_context
= false;
2952 vm
->use_cpu_for_update
= !!(adev
->vm_manager
.vm_update_mode
&
2953 AMDGPU_VM_USE_CPU_FOR_GFX
);
2955 DRM_DEBUG_DRIVER("VM update mode is %s\n",
2956 vm
->use_cpu_for_update
? "CPU" : "SDMA");
2957 WARN_ONCE((vm
->use_cpu_for_update
&&
2958 !amdgpu_gmc_vram_full_visible(&adev
->gmc
)),
2959 "CPU update of VM recommended only for large BAR system\n");
2961 if (vm
->use_cpu_for_update
)
2962 vm
->update_funcs
= &amdgpu_vm_cpu_funcs
;
2964 vm
->update_funcs
= &amdgpu_vm_sdma_funcs
;
2965 vm
->last_update
= NULL
;
2966 vm
->last_unlocked
= dma_fence_get_stub();
2968 mutex_init(&vm
->eviction_lock
);
2969 vm
->evicting
= false;
2971 r
= amdgpu_vm_pt_create(adev
, vm
, adev
->vm_manager
.root_level
,
2974 goto error_free_delayed
;
2975 root_bo
= &root
->bo
;
2976 r
= amdgpu_bo_reserve(root_bo
, true);
2978 goto error_free_root
;
2980 r
= dma_resv_reserve_shared(root_bo
->tbo
.base
.resv
, 1);
2982 goto error_unreserve
;
2984 amdgpu_vm_bo_base_init(&vm
->root
, vm
, root_bo
);
2986 r
= amdgpu_vm_clear_bo(adev
, vm
, root
, false);
2988 goto error_unreserve
;
2990 amdgpu_bo_unreserve(vm
->root
.bo
);
2992 INIT_KFIFO(vm
->faults
);
2997 amdgpu_bo_unreserve(vm
->root
.bo
);
3000 amdgpu_bo_unref(&root
->shadow
);
3001 amdgpu_bo_unref(&root_bo
);
3005 dma_fence_put(vm
->last_unlocked
);
3006 drm_sched_entity_destroy(&vm
->delayed
);
3008 error_free_immediate
:
3009 drm_sched_entity_destroy(&vm
->immediate
);
3015 * amdgpu_vm_check_clean_reserved - check if a VM is clean
3017 * @adev: amdgpu_device pointer
3018 * @vm: the VM to check
3020 * check all entries of the root PD, if any subsequent PDs are allocated,
3021 * it means there are page table creating and filling, and is no a clean
3025 * 0 if this VM is clean
3027 static int amdgpu_vm_check_clean_reserved(struct amdgpu_device
*adev
,
3028 struct amdgpu_vm
*vm
)
3030 enum amdgpu_vm_level root
= adev
->vm_manager
.root_level
;
3031 unsigned int entries
= amdgpu_vm_num_entries(adev
, root
);
3034 for (i
= 0; i
< entries
; i
++) {
3035 if (to_amdgpu_bo_vm(vm
->root
.bo
)->entries
[i
].bo
)
3043 * amdgpu_vm_make_compute - Turn a GFX VM into a compute VM
3045 * @adev: amdgpu_device pointer
3048 * This only works on GFX VMs that don't have any BOs added and no
3049 * page tables allocated yet.
3051 * Changes the following VM parameters:
3052 * - use_cpu_for_update
3053 * - pte_supports_ats
3055 * Reinitializes the page directory to reflect the changed ATS
3059 * 0 for success, -errno for errors.
3061 int amdgpu_vm_make_compute(struct amdgpu_device
*adev
, struct amdgpu_vm
*vm
)
3063 bool pte_support_ats
= (adev
->asic_type
== CHIP_RAVEN
);
3066 r
= amdgpu_bo_reserve(vm
->root
.bo
, true);
3071 r
= amdgpu_vm_check_clean_reserved(adev
, vm
);
3075 /* Check if PD needs to be reinitialized and do it before
3076 * changing any other state, in case it fails.
3078 if (pte_support_ats
!= vm
->pte_support_ats
) {
3079 vm
->pte_support_ats
= pte_support_ats
;
3080 r
= amdgpu_vm_clear_bo(adev
, vm
,
3081 to_amdgpu_bo_vm(vm
->root
.bo
),
3087 /* Update VM state */
3088 vm
->use_cpu_for_update
= !!(adev
->vm_manager
.vm_update_mode
&
3089 AMDGPU_VM_USE_CPU_FOR_COMPUTE
);
3090 DRM_DEBUG_DRIVER("VM update mode is %s\n",
3091 vm
->use_cpu_for_update
? "CPU" : "SDMA");
3092 WARN_ONCE((vm
->use_cpu_for_update
&&
3093 !amdgpu_gmc_vram_full_visible(&adev
->gmc
)),
3094 "CPU update of VM recommended only for large BAR system\n");
3096 if (vm
->use_cpu_for_update
) {
3097 /* Sync with last SDMA update/clear before switching to CPU */
3098 r
= amdgpu_bo_sync_wait(vm
->root
.bo
,
3099 AMDGPU_FENCE_OWNER_UNDEFINED
, true);
3103 vm
->update_funcs
= &amdgpu_vm_cpu_funcs
;
3105 vm
->update_funcs
= &amdgpu_vm_sdma_funcs
;
3107 dma_fence_put(vm
->last_update
);
3108 vm
->last_update
= NULL
;
3109 vm
->is_compute_context
= true;
3111 /* Free the shadow bo for compute VM */
3112 amdgpu_bo_unref(&to_amdgpu_bo_vm(vm
->root
.bo
)->shadow
);
3117 amdgpu_bo_unreserve(vm
->root
.bo
);
3122 * amdgpu_vm_release_compute - release a compute vm
3123 * @adev: amdgpu_device pointer
3124 * @vm: a vm turned into compute vm by calling amdgpu_vm_make_compute
3126 * This is a correspondant of amdgpu_vm_make_compute. It decouples compute
3127 * pasid from vm. Compute should stop use of vm after this call.
3129 void amdgpu_vm_release_compute(struct amdgpu_device
*adev
, struct amdgpu_vm
*vm
)
3131 amdgpu_vm_set_pasid(adev
, vm
, 0);
3132 vm
->is_compute_context
= false;
3136 * amdgpu_vm_fini - tear down a vm instance
3138 * @adev: amdgpu_device pointer
3142 * Unbind the VM and remove all bos from the vm bo list
3144 void amdgpu_vm_fini(struct amdgpu_device
*adev
, struct amdgpu_vm
*vm
)
3146 struct amdgpu_bo_va_mapping
*mapping
, *tmp
;
3147 bool prt_fini_needed
= !!adev
->gmc
.gmc_funcs
->set_prt
;
3148 struct amdgpu_bo
*root
;
3151 amdgpu_amdkfd_gpuvm_destroy_cb(adev
, vm
);
3153 root
= amdgpu_bo_ref(vm
->root
.bo
);
3154 amdgpu_bo_reserve(root
, true);
3155 amdgpu_vm_set_pasid(adev
, vm
, 0);
3156 dma_fence_wait(vm
->last_unlocked
, false);
3157 dma_fence_put(vm
->last_unlocked
);
3159 list_for_each_entry_safe(mapping
, tmp
, &vm
->freed
, list
) {
3160 if (mapping
->flags
& AMDGPU_PTE_PRT
&& prt_fini_needed
) {
3161 amdgpu_vm_prt_fini(adev
, vm
);
3162 prt_fini_needed
= false;
3165 list_del(&mapping
->list
);
3166 amdgpu_vm_free_mapping(adev
, vm
, mapping
, NULL
);
3169 amdgpu_vm_free_pts(adev
, vm
, NULL
);
3170 amdgpu_bo_unreserve(root
);
3171 amdgpu_bo_unref(&root
);
3172 WARN_ON(vm
->root
.bo
);
3174 drm_sched_entity_destroy(&vm
->immediate
);
3175 drm_sched_entity_destroy(&vm
->delayed
);
3177 if (!RB_EMPTY_ROOT(&vm
->va
.rb_root
)) {
3178 dev_err(adev
->dev
, "still active bo inside vm\n");
3180 rbtree_postorder_for_each_entry_safe(mapping
, tmp
,
3181 &vm
->va
.rb_root
, rb
) {
3182 /* Don't remove the mapping here, we don't want to trigger a
3183 * rebalance and the tree is about to be destroyed anyway.
3185 list_del(&mapping
->list
);
3189 dma_fence_put(vm
->last_update
);
3190 for (i
= 0; i
< AMDGPU_MAX_VMHUBS
; i
++)
3191 amdgpu_vmid_free_reserved(adev
, vm
, i
);
3195 * amdgpu_vm_manager_init - init the VM manager
3197 * @adev: amdgpu_device pointer
3199 * Initialize the VM manager structures
3201 void amdgpu_vm_manager_init(struct amdgpu_device
*adev
)
3205 /* Concurrent flushes are only possible starting with Vega10 and
3206 * are broken on Navi10 and Navi14.
3208 adev
->vm_manager
.concurrent_flush
= !(adev
->asic_type
< CHIP_VEGA10
||
3209 adev
->asic_type
== CHIP_NAVI10
||
3210 adev
->asic_type
== CHIP_NAVI14
);
3211 amdgpu_vmid_mgr_init(adev
);
3213 adev
->vm_manager
.fence_context
=
3214 dma_fence_context_alloc(AMDGPU_MAX_RINGS
);
3215 for (i
= 0; i
< AMDGPU_MAX_RINGS
; ++i
)
3216 adev
->vm_manager
.seqno
[i
] = 0;
3218 spin_lock_init(&adev
->vm_manager
.prt_lock
);
3219 atomic_set(&adev
->vm_manager
.num_prt_users
, 0);
3221 /* If not overridden by the user, by default, only in large BAR systems
3222 * Compute VM tables will be updated by CPU
3224 #ifdef CONFIG_X86_64
3225 if (amdgpu_vm_update_mode
== -1) {
3226 if (amdgpu_gmc_vram_full_visible(&adev
->gmc
))
3227 adev
->vm_manager
.vm_update_mode
=
3228 AMDGPU_VM_USE_CPU_FOR_COMPUTE
;
3230 adev
->vm_manager
.vm_update_mode
= 0;
3232 adev
->vm_manager
.vm_update_mode
= amdgpu_vm_update_mode
;
3234 adev
->vm_manager
.vm_update_mode
= 0;
3237 xa_init_flags(&adev
->vm_manager
.pasids
, XA_FLAGS_LOCK_IRQ
);
3241 * amdgpu_vm_manager_fini - cleanup VM manager
3243 * @adev: amdgpu_device pointer
3245 * Cleanup the VM manager and free resources.
3247 void amdgpu_vm_manager_fini(struct amdgpu_device
*adev
)
3249 WARN_ON(!xa_empty(&adev
->vm_manager
.pasids
));
3250 xa_destroy(&adev
->vm_manager
.pasids
);
3252 amdgpu_vmid_mgr_fini(adev
);
3256 * amdgpu_vm_ioctl - Manages VMID reservation for vm hubs.
3258 * @dev: drm device pointer
3259 * @data: drm_amdgpu_vm
3260 * @filp: drm file pointer
3263 * 0 for success, -errno for errors.
3265 int amdgpu_vm_ioctl(struct drm_device
*dev
, void *data
, struct drm_file
*filp
)
3267 union drm_amdgpu_vm
*args
= data
;
3268 struct amdgpu_device
*adev
= drm_to_adev(dev
);
3269 struct amdgpu_fpriv
*fpriv
= filp
->driver_priv
;
3270 long timeout
= msecs_to_jiffies(2000);
3273 switch (args
->in
.op
) {
3274 case AMDGPU_VM_OP_RESERVE_VMID
:
3275 /* We only have requirement to reserve vmid from gfxhub */
3276 r
= amdgpu_vmid_alloc_reserved(adev
, &fpriv
->vm
,
3281 case AMDGPU_VM_OP_UNRESERVE_VMID
:
3282 if (amdgpu_sriov_runtime(adev
))
3283 timeout
= 8 * timeout
;
3285 /* Wait vm idle to make sure the vmid set in SPM_VMID is
3286 * not referenced anymore.
3288 r
= amdgpu_bo_reserve(fpriv
->vm
.root
.bo
, true);
3292 r
= amdgpu_vm_wait_idle(&fpriv
->vm
, timeout
);
3296 amdgpu_bo_unreserve(fpriv
->vm
.root
.bo
);
3297 amdgpu_vmid_free_reserved(adev
, &fpriv
->vm
, AMDGPU_GFXHUB_0
);
3307 * amdgpu_vm_get_task_info - Extracts task info for a PASID.
3309 * @adev: drm device pointer
3310 * @pasid: PASID identifier for VM
3311 * @task_info: task_info to fill.
3313 void amdgpu_vm_get_task_info(struct amdgpu_device
*adev
, u32 pasid
,
3314 struct amdgpu_task_info
*task_info
)
3316 struct amdgpu_vm
*vm
;
3317 unsigned long flags
;
3319 xa_lock_irqsave(&adev
->vm_manager
.pasids
, flags
);
3321 vm
= xa_load(&adev
->vm_manager
.pasids
, pasid
);
3323 *task_info
= vm
->task_info
;
3325 xa_unlock_irqrestore(&adev
->vm_manager
.pasids
, flags
);
3329 * amdgpu_vm_set_task_info - Sets VMs task info.
3331 * @vm: vm for which to set the info
3333 void amdgpu_vm_set_task_info(struct amdgpu_vm
*vm
)
3335 if (vm
->task_info
.pid
)
3338 vm
->task_info
.pid
= current
->pid
;
3339 get_task_comm(vm
->task_info
.task_name
, current
);
3341 if (current
->group_leader
->mm
!= current
->mm
)
3344 vm
->task_info
.tgid
= current
->group_leader
->pid
;
3345 get_task_comm(vm
->task_info
.process_name
, current
->group_leader
);
3349 * amdgpu_vm_handle_fault - graceful handling of VM faults.
3350 * @adev: amdgpu device pointer
3351 * @pasid: PASID of the VM
3352 * @addr: Address of the fault
3353 * @write_fault: true is write fault, false is read fault
3355 * Try to gracefully handle a VM fault. Return true if the fault was handled and
3356 * shouldn't be reported any more.
3358 bool amdgpu_vm_handle_fault(struct amdgpu_device
*adev
, u32 pasid
,
3359 uint64_t addr
, bool write_fault
)
3361 bool is_compute_context
= false;
3362 struct amdgpu_bo
*root
;
3363 unsigned long irqflags
;
3364 uint64_t value
, flags
;
3365 struct amdgpu_vm
*vm
;
3368 xa_lock_irqsave(&adev
->vm_manager
.pasids
, irqflags
);
3369 vm
= xa_load(&adev
->vm_manager
.pasids
, pasid
);
3371 root
= amdgpu_bo_ref(vm
->root
.bo
);
3372 is_compute_context
= vm
->is_compute_context
;
3376 xa_unlock_irqrestore(&adev
->vm_manager
.pasids
, irqflags
);
3381 addr
/= AMDGPU_GPU_PAGE_SIZE
;
3383 if (is_compute_context
&&
3384 !svm_range_restore_pages(adev
, pasid
, addr
, write_fault
)) {
3385 amdgpu_bo_unref(&root
);
3389 r
= amdgpu_bo_reserve(root
, true);
3393 /* Double check that the VM still exists */
3394 xa_lock_irqsave(&adev
->vm_manager
.pasids
, irqflags
);
3395 vm
= xa_load(&adev
->vm_manager
.pasids
, pasid
);
3396 if (vm
&& vm
->root
.bo
!= root
)
3398 xa_unlock_irqrestore(&adev
->vm_manager
.pasids
, irqflags
);
3402 flags
= AMDGPU_PTE_VALID
| AMDGPU_PTE_SNOOPED
|
3405 if (is_compute_context
) {
3406 /* Intentionally setting invalid PTE flag
3407 * combination to force a no-retry-fault
3409 flags
= AMDGPU_PTE_EXECUTABLE
| AMDGPU_PDE_PTE
|
3412 } else if (amdgpu_vm_fault_stop
== AMDGPU_VM_FAULT_STOP_NEVER
) {
3413 /* Redirect the access to the dummy page */
3414 value
= adev
->dummy_page_addr
;
3415 flags
|= AMDGPU_PTE_EXECUTABLE
| AMDGPU_PTE_READABLE
|
3416 AMDGPU_PTE_WRITEABLE
;
3419 /* Let the hw retry silently on the PTE */
3423 r
= dma_resv_reserve_shared(root
->tbo
.base
.resv
, 1);
3425 pr_debug("failed %d to reserve fence slot\n", r
);
3429 r
= amdgpu_vm_bo_update_mapping(adev
, adev
, vm
, true, false, NULL
, addr
,
3430 addr
, flags
, value
, NULL
, NULL
, NULL
,
3435 r
= amdgpu_vm_update_pdes(adev
, vm
, true);
3438 amdgpu_bo_unreserve(root
);
3440 DRM_ERROR("Can't handle page fault (%d)\n", r
);
3443 amdgpu_bo_unref(&root
);
3448 #if defined(CONFIG_DEBUG_FS)
3450 * amdgpu_debugfs_vm_bo_info - print BO info for the VM
3452 * @vm: Requested VM for printing BO info
3455 * Print BO information in debugfs file for the VM
3457 void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm
*vm
, struct seq_file
*m
)
3459 struct amdgpu_bo_va
*bo_va
, *tmp
;
3461 u64 total_evicted
= 0;
3462 u64 total_relocated
= 0;
3463 u64 total_moved
= 0;
3464 u64 total_invalidated
= 0;
3466 unsigned int total_idle_objs
= 0;
3467 unsigned int total_evicted_objs
= 0;
3468 unsigned int total_relocated_objs
= 0;
3469 unsigned int total_moved_objs
= 0;
3470 unsigned int total_invalidated_objs
= 0;
3471 unsigned int total_done_objs
= 0;
3472 unsigned int id
= 0;
3474 seq_puts(m
, "\tIdle BOs:\n");
3475 list_for_each_entry_safe(bo_va
, tmp
, &vm
->idle
, base
.vm_status
) {
3476 if (!bo_va
->base
.bo
)
3478 total_idle
+= amdgpu_bo_print_info(id
++, bo_va
->base
.bo
, m
);
3480 total_idle_objs
= id
;
3483 seq_puts(m
, "\tEvicted BOs:\n");
3484 list_for_each_entry_safe(bo_va
, tmp
, &vm
->evicted
, base
.vm_status
) {
3485 if (!bo_va
->base
.bo
)
3487 total_evicted
+= amdgpu_bo_print_info(id
++, bo_va
->base
.bo
, m
);
3489 total_evicted_objs
= id
;
3492 seq_puts(m
, "\tRelocated BOs:\n");
3493 list_for_each_entry_safe(bo_va
, tmp
, &vm
->relocated
, base
.vm_status
) {
3494 if (!bo_va
->base
.bo
)
3496 total_relocated
+= amdgpu_bo_print_info(id
++, bo_va
->base
.bo
, m
);
3498 total_relocated_objs
= id
;
3501 seq_puts(m
, "\tMoved BOs:\n");
3502 list_for_each_entry_safe(bo_va
, tmp
, &vm
->moved
, base
.vm_status
) {
3503 if (!bo_va
->base
.bo
)
3505 total_moved
+= amdgpu_bo_print_info(id
++, bo_va
->base
.bo
, m
);
3507 total_moved_objs
= id
;
3510 seq_puts(m
, "\tInvalidated BOs:\n");
3511 spin_lock(&vm
->invalidated_lock
);
3512 list_for_each_entry_safe(bo_va
, tmp
, &vm
->invalidated
, base
.vm_status
) {
3513 if (!bo_va
->base
.bo
)
3515 total_invalidated
+= amdgpu_bo_print_info(id
++, bo_va
->base
.bo
, m
);
3517 total_invalidated_objs
= id
;
3520 seq_puts(m
, "\tDone BOs:\n");
3521 list_for_each_entry_safe(bo_va
, tmp
, &vm
->done
, base
.vm_status
) {
3522 if (!bo_va
->base
.bo
)
3524 total_done
+= amdgpu_bo_print_info(id
++, bo_va
->base
.bo
, m
);
3526 spin_unlock(&vm
->invalidated_lock
);
3527 total_done_objs
= id
;
3529 seq_printf(m
, "\tTotal idle size: %12lld\tobjs:\t%d\n", total_idle
,
3531 seq_printf(m
, "\tTotal evicted size: %12lld\tobjs:\t%d\n", total_evicted
,
3532 total_evicted_objs
);
3533 seq_printf(m
, "\tTotal relocated size: %12lld\tobjs:\t%d\n", total_relocated
,
3534 total_relocated_objs
);
3535 seq_printf(m
, "\tTotal moved size: %12lld\tobjs:\t%d\n", total_moved
,
3537 seq_printf(m
, "\tTotal invalidated size: %12lld\tobjs:\t%d\n", total_invalidated
,
3538 total_invalidated_objs
);
3539 seq_printf(m
, "\tTotal done size: %12lld\tobjs:\t%d\n", total_done
,