#define AMDGPU_VM_FAULT_STOP_ALWAYS 2
struct amdgpu_vm_pt {
- struct amdgpu_bo *bo;
- uint64_t addr;
+ struct amdgpu_bo_list_entry entry;
+ uint64_t addr;
};
struct amdgpu_vm_id {
void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
struct list_head *validated,
struct amdgpu_bo_list_entry *entry);
-struct amdgpu_bo_list_entry *amdgpu_vm_get_pt_bos(struct amdgpu_vm *vm,
- struct list_head *duplicates);
+void amdgpu_vm_get_pt_bos(struct amdgpu_vm *vm, struct list_head *duplicates);
int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
struct amdgpu_sync *sync);
void amdgpu_vm_flush(struct amdgpu_ring *ring,
struct amdgpu_cs_chunk *chunks;
/* relocations */
struct amdgpu_bo_list_entry vm_pd;
- struct amdgpu_bo_list_entry *vm_bos;
struct list_head validated;
struct fence *fence;
struct amdgpu_bo_va *bo_va, uint32_t operation)
{
struct ttm_validate_buffer tv, *entry;
- struct amdgpu_bo_list_entry *vm_bos;
struct amdgpu_bo_list_entry vm_pd;
struct ww_acquire_ctx ticket;
struct list_head list, duplicates;
if (r)
goto error_print;
- vm_bos = amdgpu_vm_get_pt_bos(bo_va->vm, &duplicates);
- if (!vm_bos) {
- r = -ENOMEM;
- goto error_unreserve;
- }
-
+ amdgpu_vm_get_pt_bos(bo_va->vm, &duplicates);
list_for_each_entry(entry, &list, head) {
domain = amdgpu_mem_type_to_domain(entry->bo->mem.mem_type);
/* if anything is swapped out don't swap it in here,
error_unreserve:
ttm_eu_backoff_reservation(&ticket, &list);
- drm_free_large(vm_bos);
error_print:
if (r && r != -ERESTARTSYS)
}
/**
- * amdgpu_vm_get_bos - add the vm BOs to a validation list
+ * amdgpu_vm_get_bos - add the vm BOs to a duplicates list
*
* @vm: vm providing the BOs
* @duplicates: head of duplicates list
*
- * Add the page directory to the list of BOs to
- * validate for command submission (cayman+).
+ * Add the page directory to the BO duplicates list
+ * for command submission.
*/
-struct amdgpu_bo_list_entry *amdgpu_vm_get_pt_bos(struct amdgpu_vm *vm,
- struct list_head *duplicates)
+void amdgpu_vm_get_pt_bos(struct amdgpu_vm *vm, struct list_head *duplicates)
{
- struct amdgpu_bo_list_entry *list;
- unsigned i, idx;
-
- list = drm_malloc_ab(vm->max_pde_used + 1,
- sizeof(struct amdgpu_bo_list_entry));
- if (!list)
- return NULL;
+ unsigned i;
/* add the vm page table to the list */
- for (i = 0, idx = 0; i <= vm->max_pde_used; i++) {
- if (!vm->page_tables[i].bo)
+ for (i = 0; i <= vm->max_pde_used; ++i) {
+ struct amdgpu_bo_list_entry *entry = &vm->page_tables[i].entry;
+
+ if (!entry->robj)
continue;
- list[idx].robj = vm->page_tables[i].bo;
- list[idx].prefered_domains = AMDGPU_GEM_DOMAIN_VRAM;
- list[idx].allowed_domains = AMDGPU_GEM_DOMAIN_VRAM;
- list[idx].priority = 0;
- list[idx].tv.bo = &list[idx].robj->tbo;
- list[idx].tv.shared = true;
- list_add(&list[idx++].tv.head, duplicates);
+ list_add(&entry->tv.head, duplicates);
}
-
- return list;
}
/**
/* walk over the address space and update the page directory */
for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) {
- struct amdgpu_bo *bo = vm->page_tables[pt_idx].bo;
+ struct amdgpu_bo *bo = vm->page_tables[pt_idx].entry.robj;
uint64_t pde, pt;
if (bo == NULL)
/* walk over the address space and update the page tables */
for (addr = start; addr < end; ) {
uint64_t pt_idx = addr >> amdgpu_vm_block_size;
- struct amdgpu_bo *pt = vm->page_tables[pt_idx].bo;
+ struct amdgpu_bo *pt = vm->page_tables[pt_idx].entry.robj;
unsigned nptes;
uint64_t pte;
int r;
/* walk over the address space and allocate the page tables */
for (pt_idx = saddr; pt_idx <= eaddr; ++pt_idx) {
struct reservation_object *resv = vm->page_directory->tbo.resv;
+ struct amdgpu_bo_list_entry *entry;
struct amdgpu_bo *pt;
- if (vm->page_tables[pt_idx].bo)
+ entry = &vm->page_tables[pt_idx].entry;
+ if (entry->robj)
continue;
r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8,
goto error_free;
}
+ entry->robj = pt;
+ entry->prefered_domains = AMDGPU_GEM_DOMAIN_VRAM;
+ entry->allowed_domains = AMDGPU_GEM_DOMAIN_VRAM;
+ entry->priority = 0;
+ entry->tv.bo = &entry->robj->tbo;
+ entry->tv.shared = true;
vm->page_tables[pt_idx].addr = 0;
- vm->page_tables[pt_idx].bo = pt;
}
return 0;
}
for (i = 0; i < amdgpu_vm_num_pdes(adev); i++)
- amdgpu_bo_unref(&vm->page_tables[i].bo);
+ amdgpu_bo_unref(&vm->page_tables[i].entry.robj);
kfree(vm->page_tables);
amdgpu_bo_unref(&vm->page_directory);