*/
#include <linux/firmware.h>
+#include <drm/drm_exec.h>
#include "amdgpu_mes.h"
#include "amdgpu.h"
struct amdgpu_mes_ctx_data *ctx_data)
{
struct amdgpu_bo_va *bo_va;
- struct ww_acquire_ctx ticket;
- struct list_head list;
- struct amdgpu_bo_list_entry pd;
- struct ttm_validate_buffer csa_tv;
struct amdgpu_sync sync;
+ struct drm_exec exec;
int r;
amdgpu_sync_create(&sync);
- INIT_LIST_HEAD(&list);
- INIT_LIST_HEAD(&csa_tv.head);
- csa_tv.bo = &ctx_data->meta_data_obj->tbo;
- csa_tv.num_shared = 1;
-
- list_add(&csa_tv.head, &list);
- amdgpu_vm_get_pd_bo(vm, &list, &pd);
-
- r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
- if (r) {
- DRM_ERROR("failed to reserve meta data BO: err=%d\n", r);
- return r;
+ drm_exec_init(&exec, 0);
+ drm_exec_until_all_locked(&exec) {
+ r = drm_exec_lock_obj(&exec,
+ &ctx_data->meta_data_obj->tbo.base);
+ drm_exec_retry_on_contention(&exec);
+ if (unlikely(r))
+ goto error_fini_exec;
+
+ r = amdgpu_vm_lock_pd(vm, &exec, 0);
+ drm_exec_retry_on_contention(&exec);
+ if (unlikely(r))
+ goto error_fini_exec;
}
bo_va = amdgpu_vm_bo_add(adev, vm, ctx_data->meta_data_obj);
if (!bo_va) {
- ttm_eu_backoff_reservation(&ticket, &list);
DRM_ERROR("failed to create bo_va for meta data BO\n");
- return -ENOMEM;
+ r = -ENOMEM;
+ goto error_fini_exec;
}
r = amdgpu_vm_bo_map(adev, bo_va, ctx_data->meta_data_gpu_addr, 0,
if (r) {
DRM_ERROR("failed to do bo_map on meta data, err=%d\n", r);
- goto error;
+ goto error_del_bo_va;
}
r = amdgpu_vm_bo_update(adev, bo_va, false);
if (r) {
DRM_ERROR("failed to do vm_bo_update on meta data\n");
- goto error;
+ goto error_del_bo_va;
}
amdgpu_sync_fence(&sync, bo_va->last_pt_update);
r = amdgpu_vm_update_pdes(adev, vm, false);
if (r) {
DRM_ERROR("failed to update pdes on meta data\n");
- goto error;
+ goto error_del_bo_va;
}
amdgpu_sync_fence(&sync, vm->last_update);
amdgpu_sync_wait(&sync, false);
- ttm_eu_backoff_reservation(&ticket, &list);
+ drm_exec_fini(&exec);
amdgpu_sync_free(&sync);
ctx_data->meta_data_va = bo_va;
return 0;
-error:
+error_del_bo_va:
amdgpu_vm_bo_del(adev, bo_va);
- ttm_eu_backoff_reservation(&ticket, &list);
+
+error_fini_exec:
+ drm_exec_fini(&exec);
amdgpu_sync_free(&sync);
return r;
}
struct amdgpu_bo_va *bo_va = ctx_data->meta_data_va;
struct amdgpu_bo *bo = ctx_data->meta_data_obj;
struct amdgpu_vm *vm = bo_va->base.vm;
- struct amdgpu_bo_list_entry vm_pd;
- struct list_head list, duplicates;
- struct dma_fence *fence = NULL;
- struct ttm_validate_buffer tv;
- struct ww_acquire_ctx ticket;
- long r = 0;
-
- INIT_LIST_HEAD(&list);
- INIT_LIST_HEAD(&duplicates);
-
- tv.bo = &bo->tbo;
- tv.num_shared = 2;
- list_add(&tv.head, &list);
-
- amdgpu_vm_get_pd_bo(vm, &list, &vm_pd);
-
- r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates);
- if (r) {
- dev_err(adev->dev, "leaking bo va because "
- "we fail to reserve bo (%ld)\n", r);
- return r;
+ struct dma_fence *fence;
+ struct drm_exec exec;
+ long r;
+
+ drm_exec_init(&exec, 0);
+ drm_exec_until_all_locked(&exec) {
+ r = drm_exec_lock_obj(&exec,
+ &ctx_data->meta_data_obj->tbo.base);
+ drm_exec_retry_on_contention(&exec);
+ if (unlikely(r))
+ goto out_unlock;
+
+ r = amdgpu_vm_lock_pd(vm, &exec, 0);
+ drm_exec_retry_on_contention(&exec);
+ if (unlikely(r))
+ goto out_unlock;
}
amdgpu_vm_bo_del(adev, bo_va);
if (!amdgpu_vm_ready(vm))
goto out_unlock;
- r = dma_resv_get_singleton(bo->tbo.base.resv, DMA_RESV_USAGE_BOOKKEEP, &fence);
+ r = dma_resv_get_singleton(bo->tbo.base.resv, DMA_RESV_USAGE_BOOKKEEP,
+ &fence);
if (r)
goto out_unlock;
if (fence) {
out_unlock:
if (unlikely(r < 0))
dev_err(adev->dev, "failed to clear page tables (%ld)\n", r);
- ttm_eu_backoff_reservation(&ticket, &list);
+ drm_exec_fini(&exec);
return r;
}