]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blobdiff - drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
drm/amdgpu: Fix VM clean check method
[mirror_ubuntu-jammy-kernel.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_vm.c
index 3ada094852c5283db4ee6a7bc4c0051abaa5e24c..4f10f5aba00b80ee2e5507323f55a0f28a51d157 100644 (file)
@@ -306,7 +306,7 @@ static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
                return;
 
        vm->bulk_moveable = false;
-       if (bo->tbo.type == ttm_bo_type_kernel)
+       if (bo->tbo.type == ttm_bo_type_kernel && bo->parent)
                amdgpu_vm_bo_relocated(base);
        else
                amdgpu_vm_bo_idle(base);
@@ -603,12 +603,14 @@ void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
        struct ttm_bo_global *glob = adev->mman.bdev.glob;
        struct amdgpu_vm_bo_base *bo_base;
 
+#if 0
        if (vm->bulk_moveable) {
                spin_lock(&glob->lru_lock);
                ttm_bo_bulk_move_lru_tail(&vm->lru_bulk_move);
                spin_unlock(&glob->lru_lock);
                return;
        }
+#endif
 
        memset(&vm->lru_bulk_move, 0, sizeof(vm->lru_bulk_move));
 
@@ -649,6 +651,8 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
        struct amdgpu_vm_bo_base *bo_base, *tmp;
        int r = 0;
 
+       vm->bulk_moveable &= list_empty(&vm->evicted);
+
        list_for_each_entry_safe(bo_base, tmp, &vm->evicted, vm_status) {
                struct amdgpu_bo *bo = bo_base->bo;
 
@@ -660,7 +664,10 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
                        amdgpu_vm_bo_moved(bo_base);
                } else {
                        vm->update_funcs->map_table(bo);
-                       amdgpu_vm_bo_relocated(bo_base);
+                       if (bo->parent)
+                               amdgpu_vm_bo_relocated(bo_base);
+                       else
+                               amdgpu_vm_bo_idle(bo_base);
                }
        }
 
@@ -762,14 +769,17 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
 
        addr = 0;
        if (ats_entries) {
-               uint64_t ats_value;
+               uint64_t value = 0, flags;
 
-               ats_value = AMDGPU_PTE_DEFAULT_ATC;
-               if (level != AMDGPU_VM_PTB)
-                       ats_value |= AMDGPU_PDE_PTE;
+               flags = AMDGPU_PTE_DEFAULT_ATC;
+               if (level != AMDGPU_VM_PTB) {
+                       /* Handle leaf PDEs as PTEs */
+                       flags |= AMDGPU_PDE_PTE;
+                       amdgpu_gmc_get_vm_pde(adev, level, &value, &flags);
+               }
 
                r = vm->update_funcs->update(&params, bo, addr, 0, ats_entries,
-                                            0, ats_value);
+                                            value, flags);
                if (r)
                        return r;
 
@@ -777,15 +787,22 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
        }
 
        if (entries) {
-               uint64_t value = 0;
-
-               /* Workaround for fault priority problem on GMC9 */
-               if (level == AMDGPU_VM_PTB &&
-                   adev->asic_type >= CHIP_VEGA10)
-                       value = AMDGPU_PTE_EXECUTABLE;
+               uint64_t value = 0, flags = 0;
+
+               if (adev->asic_type >= CHIP_VEGA10) {
+                       if (level != AMDGPU_VM_PTB) {
+                               /* Handle leaf PDEs as PTEs */
+                               flags |= AMDGPU_PDE_PTE;
+                               amdgpu_gmc_get_vm_pde(adev, level,
+                                                     &value, &flags);
+                       } else {
+                               /* Workaround for fault priority problem on GMC9 */
+                               flags = AMDGPU_PTE_EXECUTABLE;
+                       }
+               }
 
                r = vm->update_funcs->update(&params, bo, addr, 0, entries,
-                                            0, value);
+                                            value, flags);
                if (r)
                        return r;
        }
@@ -1162,16 +1179,15 @@ uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
  *
  * @param: parameters for the update
  * @vm: requested vm
- * @parent: parent directory
  * @entry: entry to update
  *
  * Makes sure the requested entry in parent is up to date.
  */
 static int amdgpu_vm_update_pde(struct amdgpu_vm_update_params *params,
                                struct amdgpu_vm *vm,
-                               struct amdgpu_vm_pt *parent,
                                struct amdgpu_vm_pt *entry)
 {
+       struct amdgpu_vm_pt *parent = amdgpu_vm_pt_parent(entry);
        struct amdgpu_bo *bo = parent->base.bo, *pbo;
        uint64_t pde, pt, flags;
        unsigned level;
@@ -1233,17 +1249,13 @@ int amdgpu_vm_update_directories(struct amdgpu_device *adev,
                return r;
 
        while (!list_empty(&vm->relocated)) {
-               struct amdgpu_vm_pt *pt, *entry;
+               struct amdgpu_vm_pt *entry;
 
                entry = list_first_entry(&vm->relocated, struct amdgpu_vm_pt,
                                         base.vm_status);
                amdgpu_vm_bo_idle(&entry->base);
 
-               pt = amdgpu_vm_pt_parent(entry);
-               if (!pt)
-                       continue;
-
-               r = amdgpu_vm_update_pde(&params, vm, pt, entry);
+               r = amdgpu_vm_update_pde(&params, vm, entry);
                if (r)
                        goto error;
        }
@@ -2025,7 +2037,8 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
        INIT_LIST_HEAD(&bo_va->valids);
        INIT_LIST_HEAD(&bo_va->invalids);
 
-       if (bo && amdgpu_xgmi_same_hive(adev, amdgpu_ttm_adev(bo->tbo.bdev))) {
+       if (bo && amdgpu_xgmi_same_hive(adev, amdgpu_ttm_adev(bo->tbo.bdev)) &&
+           (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM)) {
                bo_va->is_xgmi = true;
                mutex_lock(&adev->vm_manager.lock_pstate);
                /* Power up XGMI if it can be potentially used */
@@ -2743,6 +2756,37 @@ error_free_sched_entity:
        return r;
 }
 
+/**
+ * amdgpu_vm_check_clean_reserved - check if a VM is clean
+ *
+ * @adev: amdgpu_device pointer
+ * @vm: the VM to check
+ *
+ * check all entries of the root PD, if any subsequent PDs are allocated,
+ * it means there are page table creating and filling, and is no a clean
+ * VM
+ *
+ * Returns:
+ *     0 if this VM is clean
+ */
+static int amdgpu_vm_check_clean_reserved(struct amdgpu_device *adev,
+       struct amdgpu_vm *vm)
+{
+       enum amdgpu_vm_level root = adev->vm_manager.root_level;
+       unsigned int entries = amdgpu_vm_num_entries(adev, root);
+       unsigned int i = 0;
+
+       if (!(vm->root.entries))
+               return 0;
+
+       for (i = 0; i < entries; i++) {
+               if (vm->root.entries[i].base.bo)
+                       return -EINVAL;
+       }
+
+       return 0;
+}
+
 /**
  * amdgpu_vm_make_compute - Turn a GFX VM into a compute VM
  *
@@ -2773,10 +2817,9 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, uns
                return r;
 
        /* Sanity checks */
-       if (!RB_EMPTY_ROOT(&vm->va.rb_root) || vm->root.entries) {
-               r = -EINVAL;
+       r = amdgpu_vm_check_clean_reserved(adev, vm);
+       if (r)
                goto unreserve_bo;
-       }
 
        if (pasid) {
                unsigned long flags;