]> git.proxmox.com Git - mirror_ubuntu-kernels.git/commitdiff
drm/amdgpu: fix fence wait in sync_fence, instead should be in sync_rings
authorChristian König <christian.koenig@amd.com>
Thu, 20 Aug 2015 06:47:40 +0000 (14:47 +0800)
committerAlex Deucher <alexander.deucher@amd.com>
Tue, 25 Aug 2015 14:39:39 +0000 (10:39 -0400)
Signed-off-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Chunming Zhou <david1.zhou@amd.com>
Reviewed-by: Christian K?nig <christian.koenig@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c

index 65e0e9406abb0fc16f6e94d72073ee15801a13bb..3c5487257ef0ff3384197a9335bddc5c027b6cd0 100644 (file)
@@ -704,6 +704,7 @@ void amdgpu_semaphore_free(struct amdgpu_device *adev,
 struct amdgpu_sync {
        struct amdgpu_semaphore *semaphores[AMDGPU_NUM_SYNCS];
        struct amdgpu_fence     *sync_to[AMDGPU_MAX_RINGS];
+       DECLARE_HASHTABLE(fences, 4);
        struct amdgpu_fence     *last_vm_update;
 };
 
@@ -716,6 +717,7 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
                     void *owner);
 int amdgpu_sync_rings(struct amdgpu_sync *sync,
                      struct amdgpu_ring *ring);
+int amdgpu_sync_wait(struct amdgpu_sync *sync);
 void amdgpu_sync_free(struct amdgpu_device *adev, struct amdgpu_sync *sync,
                      struct fence *fence);
 
index 737c8e3e3a74d5d1fd33f039d427e377914a87cc..c439735ee670ddffa18930840b520c626017eca8 100644 (file)
@@ -140,7 +140,11 @@ int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs,
                dev_err(adev->dev, "couldn't schedule ib\n");
                return -EINVAL;
        }
-
+       r = amdgpu_sync_wait(&ibs->sync);
+       if (r) {
+               dev_err(adev->dev, "IB sync failed (%d).\n", r);
+               return r;
+       }
        r = amdgpu_ring_lock(ring, (256 + AMDGPU_NUM_SYNCS * 8) * num_ibs);
        if (r) {
                dev_err(adev->dev, "scheduling IB failed (%d).\n", r);
index ee68eebfded1043b28a9b774701b437f1720fb4f..febbf37b1412ee252ce738b6f8cabd71520171b6 100644 (file)
 #include "amdgpu.h"
 #include "amdgpu_trace.h"
 
+struct amdgpu_sync_entry {
+       struct hlist_node       node;
+       struct fence            *fence;
+};
+
 /**
  * amdgpu_sync_create - zero init sync object
  *
@@ -49,6 +54,7 @@ void amdgpu_sync_create(struct amdgpu_sync *sync)
        for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
                sync->sync_to[i] = NULL;
 
+       hash_init(sync->fences);
        sync->last_vm_update = NULL;
 }
 
@@ -62,6 +68,7 @@ void amdgpu_sync_create(struct amdgpu_sync *sync)
 int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
                      struct fence *f)
 {
+       struct amdgpu_sync_entry *e;
        struct amdgpu_fence *fence;
        struct amdgpu_fence *other;
 
@@ -69,8 +76,27 @@ int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
                return 0;
 
        fence = to_amdgpu_fence(f);
-       if (!fence || fence->ring->adev != adev)
-               return fence_wait(f, true);
+       if (!fence || fence->ring->adev != adev) {
+               hash_for_each_possible(sync->fences, e, node, f->context) {
+                       struct fence *new;
+                       if (unlikely(e->fence->context != f->context))
+                               continue;
+                       new = fence_get(fence_later(e->fence, f));
+                       if (new) {
+                               fence_put(e->fence);
+                               e->fence = new;
+                       }
+                       return 0;
+               }
+
+               e = kmalloc(sizeof(struct amdgpu_sync_entry), GFP_KERNEL);
+               if (!e)
+                       return -ENOMEM;
+
+               hash_add(sync->fences, &e->node, f->context);
+               e->fence = fence_get(f);
+               return 0;
+       }
 
        other = sync->sync_to[fence->ring->idx];
        sync->sync_to[fence->ring->idx] = amdgpu_fence_ref(
@@ -147,6 +173,24 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
        return r;
 }
 
+int amdgpu_sync_wait(struct amdgpu_sync *sync)
+{
+       struct amdgpu_sync_entry *e;
+       struct hlist_node *tmp;
+       int i, r;
+
+       hash_for_each_safe(sync->fences, i, tmp, e, node) {
+               r = fence_wait(e->fence, false);
+               if (r)
+                       return r;
+
+               hash_del(&e->node);
+               fence_put(e->fence);
+               kfree(e);
+       }
+       return 0;
+}
+
 /**
  * amdgpu_sync_rings - sync ring to all registered fences
  *
@@ -236,8 +280,16 @@ void amdgpu_sync_free(struct amdgpu_device *adev,
                      struct amdgpu_sync *sync,
                      struct fence *fence)
 {
+       struct amdgpu_sync_entry *e;
+       struct hlist_node *tmp;
        unsigned i;
 
+       hash_for_each_safe(sync->fences, i, tmp, e, node) {
+               hash_del(&e->node);
+               fence_put(e->fence);
+               kfree(e);
+       }
+
        for (i = 0; i < AMDGPU_NUM_SYNCS; ++i)
                amdgpu_semaphore_free(adev, &sync->semaphores[i], fence);