(*job)->num_ibs = num_ibs;
amdgpu_sync_create(&(*job)->sync);
+ amdgpu_sync_create(&(*job)->dep_sync);
amdgpu_sync_create(&(*job)->sched_sync);
return 0;
dma_fence_put(job->fence);
amdgpu_sync_free(&job->sync);
+ amdgpu_sync_free(&job->dep_sync);
amdgpu_sync_free(&job->sched_sync);
kfree(job);
}
dma_fence_put(job->fence);
amdgpu_sync_free(&job->sync);
+ amdgpu_sync_free(&job->dep_sync);
amdgpu_sync_free(&job->sched_sync);
kfree(job);
}
struct amdgpu_job *job = to_amdgpu_job(sched_job);
struct amdgpu_vm *vm = job->vm;
- struct dma_fence *fence = amdgpu_sync_get_fence(&job->sync);
+ struct dma_fence *fence = amdgpu_sync_get_fence(&job->dep_sync);
int r;
+ if (amd_sched_dependency_optimized(fence, sched_job->s_entity)) {
+ r = amdgpu_sync_fence(job->adev, &job->sched_sync, fence);
+ if (r)
+ DRM_ERROR("Error adding fence to sync (%d)\n", r);
+ }
+ if (!fence)
+ fence = amdgpu_sync_get_fence(&job->sync);
while (fence == NULL && vm && !job->vm_id) {
struct amdgpu_ring *ring = job->ring;
fence = amdgpu_sync_get_fence(&job->sync);
}
- if (amd_sched_dependency_optimized(fence, sched_job->s_entity)) {
- r = amdgpu_sync_fence(job->adev, &job->sched_sync, fence);
- if (r)
- DRM_ERROR("Error adding fence to sync (%d)\n", r);
- }
return fence;
}