to >= amdgpu_vm_num_entries(adev, level))
return -EINVAL;
- if (to > parent->last_entry_used)
- parent->last_entry_used = to;
-
++level;
saddr = saddr & ((1 << shift) - 1);
eaddr = eaddr & ((1 << shift) - 1);
*
* Mark all PD level as invalid after an error.
*/
-static void amdgpu_vm_invalidate_level(struct amdgpu_vm *vm,
- struct amdgpu_vm_pt *parent)
+static void amdgpu_vm_invalidate_level(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm,
+ struct amdgpu_vm_pt *parent,
+ unsigned level)
{
- unsigned pt_idx;
+ unsigned pt_idx, num_entries;
/*
* Recurse into the subdirectories. This recursion is harmless because
* we only have a maximum of 5 layers.
*/
- for (pt_idx = 0; pt_idx <= parent->last_entry_used; ++pt_idx) {
+ num_entries = amdgpu_vm_num_entries(adev, level);
+ for (pt_idx = 0; pt_idx < num_entries; ++pt_idx) {
struct amdgpu_vm_pt *entry = &parent->entries[pt_idx];
if (!entry->base.bo)
if (list_empty(&entry->base.vm_status))
list_add(&entry->base.vm_status, &vm->relocated);
spin_unlock(&vm->status_lock);
- amdgpu_vm_invalidate_level(vm, entry);
+ amdgpu_vm_invalidate_level(adev, vm, entry, level + 1);
}
}
r = amdgpu_vm_update_pde(adev, vm, pt, entry);
if (r) {
- amdgpu_vm_invalidate_level(vm, &vm->root);
+ amdgpu_vm_invalidate_level(adev, vm,
+ &vm->root, 0);
return r;
}
spin_lock(&vm->status_lock);
error_free:
amdgpu_job_free(job);
- amdgpu_vm_invalidate_level(vm, &vm->root);
+ amdgpu_vm_invalidate_level(adev, vm, &vm->root, 0);
return r;
}
/**
* amdgpu_vm_free_levels - free PD/PT levels
*
- * @level: PD/PT starting level to free
+ * @adev: amdgpu device structure
+ * @parent: PD/PT starting level to free
+ * @level: level of parent structure
*
* Free the page directory or page table level and all sub levels.
*/
-static void amdgpu_vm_free_levels(struct amdgpu_vm_pt *level)
+static void amdgpu_vm_free_levels(struct amdgpu_device *adev,
+ struct amdgpu_vm_pt *parent,
+ unsigned level)
{
- unsigned i;
+ unsigned i, num_entries = amdgpu_vm_num_entries(adev, level);
- if (level->base.bo) {
- list_del(&level->base.bo_list);
- list_del(&level->base.vm_status);
- amdgpu_bo_unref(&level->base.bo->shadow);
- amdgpu_bo_unref(&level->base.bo);
+ if (parent->base.bo) {
+ list_del(&parent->base.bo_list);
+ list_del(&parent->base.vm_status);
+ amdgpu_bo_unref(&parent->base.bo->shadow);
+ amdgpu_bo_unref(&parent->base.bo);
}
- if (level->entries)
- for (i = 0; i <= level->last_entry_used; i++)
- amdgpu_vm_free_levels(&level->entries[i]);
+ if (parent->entries)
+ for (i = 0; i < num_entries; i++)
+ amdgpu_vm_free_levels(adev, &parent->entries[i],
+ level + 1);
- kvfree(level->entries);
+ kvfree(parent->entries);
}
/**
if (r) {
dev_err(adev->dev, "Leaking page tables because BO reservation failed\n");
} else {
- amdgpu_vm_free_levels(&vm->root);
+ amdgpu_vm_free_levels(adev, &vm->root, 0);
amdgpu_bo_unreserve(root);
}
amdgpu_bo_unref(&root);