]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/commitdiff
drm/amdgpu: new implement for fence_wait_any (v2)
authormonk.liu <monk.liu@amd.com>
Thu, 30 Jul 2015 07:19:05 +0000 (15:19 +0800)
committerAlex Deucher <alexander.deucher@amd.com>
Mon, 17 Aug 2015 20:50:47 +0000 (16:50 -0400)
origninal method will sleep/schedule at the granurarity of HZ/2 and
based on seq signal method, the new implement is based on kernel fance
interface, no unnecessary schedule at all

v2: replace logic of original amdgpu_fence_wait_any

Signed-off-by: monk.liu <monk.liu@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c

index 1e87acf3589201ff9af99c531e94831df6f41c86..5f32f859230b3ae77d2b89c59383507ab9c34b4a 100644 (file)
@@ -440,9 +440,9 @@ unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring);
 
 bool amdgpu_fence_signaled(struct amdgpu_fence *fence);
 int amdgpu_fence_wait(struct amdgpu_fence *fence, bool interruptible);
-int amdgpu_fence_wait_any(struct amdgpu_device *adev,
+signed long amdgpu_fence_wait_any(struct amdgpu_device *adev,
                          struct amdgpu_fence **fences,
-                         bool intr);
+                         bool intr, long t);
 struct amdgpu_fence *amdgpu_fence_ref(struct amdgpu_fence *fence);
 void amdgpu_fence_unref(struct amdgpu_fence **fence);
 
@@ -487,7 +487,7 @@ static inline bool amdgpu_fence_is_earlier(struct amdgpu_fence *a,
        return a->seq < b->seq;
 }
 
-int amdgpu_user_fence_emit(struct amdgpu_ring *ring, struct amdgpu_user_fence *user, 
+int amdgpu_user_fence_emit(struct amdgpu_ring *ring, struct amdgpu_user_fence *user,
                           void *owner, struct amdgpu_fence **fence);
 
 /*
index 4834725b627e63c57b533c4d15c411e8dfc1484b..a4982f53f937fb674979046be6d9001f45cc8d7f 100644 (file)
@@ -630,49 +630,6 @@ int amdgpu_fence_wait(struct amdgpu_fence *fence, bool intr)
        return 0;
 }
 
-/**
- * amdgpu_fence_wait_any - wait for a fence to signal on any ring
- *
- * @adev: amdgpu device pointer
- * @fences: amdgpu fence object(s)
- * @intr: use interruptable sleep
- *
- * Wait for any requested fence to signal (all asics).  Fence
- * array is indexed by ring id.  @intr selects whether to use
- * interruptable (true) or non-interruptable (false) sleep when
- * waiting for the fences. Used by the suballocator.
- * Returns 0 if any fence has passed, error for all other cases.
- */
-int amdgpu_fence_wait_any(struct amdgpu_device *adev,
-                         struct amdgpu_fence **fences,
-                         bool intr)
-{
-       uint64_t seq[AMDGPU_MAX_RINGS];
-       unsigned i, num_rings = 0;
-       long r;
-
-       for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
-               seq[i] = 0;
-
-               if (!fences[i]) {
-                       continue;
-               }
-
-               seq[i] = fences[i]->seq;
-               ++num_rings;
-       }
-
-       /* nothing to wait for ? */
-       if (num_rings == 0)
-               return -ENOENT;
-
-       r = amdgpu_fence_wait_seq_timeout(adev, seq, intr, MAX_SCHEDULE_TIMEOUT);
-       if (r < 0) {
-               return r;
-       }
-       return 0;
-}
-
 /**
  * amdgpu_fence_wait_next - wait for the next fence to signal
  *
@@ -1128,6 +1085,22 @@ static inline bool amdgpu_test_signaled(struct amdgpu_fence *fence)
        return test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags);
 }
 
+static inline bool amdgpu_test_signaled_any(struct amdgpu_fence **fences)
+{
+       int idx;
+       struct amdgpu_fence *fence;
+
+       idx = 0;
+       for (idx = 0; idx < AMDGPU_MAX_RINGS; ++idx) {
+               fence = fences[idx];
+               if (fence) {
+                       if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags))
+                               return true;
+               }
+       }
+       return false;
+}
+
 struct amdgpu_wait_cb {
        struct fence_cb base;
        struct task_struct *task;
@@ -1182,6 +1155,62 @@ static signed long amdgpu_fence_default_wait(struct fence *f, bool intr,
        return t;
 }
 
+/* wait until any fence in array signaled */
+signed long amdgpu_fence_wait_any(struct amdgpu_device *adev,
+                               struct amdgpu_fence **array, bool intr, signed long t)
+{
+       long idx = 0;
+       struct amdgpu_wait_cb cb[AMDGPU_MAX_RINGS];
+       struct amdgpu_fence *fence;
+
+       BUG_ON(!array);
+
+       for (idx = 0; idx < AMDGPU_MAX_RINGS; ++idx) {
+               fence = array[idx];
+               if (fence) {
+                       cb[idx].task = current;
+                       if (fence_add_callback(&fence->base,
+                                       &cb[idx].base, amdgpu_fence_wait_cb))
+                               return t; /* return if fence is already signaled */
+               }
+       }
+
+       while (t > 0) {
+               if (intr)
+                       set_current_state(TASK_INTERRUPTIBLE);
+               else
+                       set_current_state(TASK_UNINTERRUPTIBLE);
+
+               /*
+                * amdgpu_test_signaled_any must be called after
+                * set_current_state to prevent a race with wake_up_process
+                */
+               if (amdgpu_test_signaled_any(array))
+                       break;
+
+               if (adev->needs_reset) {
+                       t = -EDEADLK;
+                       break;
+               }
+
+               t = schedule_timeout(t);
+
+               if (t > 0 && intr && signal_pending(current))
+                       t = -ERESTARTSYS;
+       }
+
+       __set_current_state(TASK_RUNNING);
+
+       idx = 0;
+       for (idx = 0; idx < AMDGPU_MAX_RINGS; ++idx) {
+               fence = array[idx];
+               if (fence)
+                       fence_remove_callback(&fence->base, &cb[idx].base);
+       }
+
+       return t;
+}
+
 const struct fence_ops amdgpu_fence_ops = {
        .get_driver_name = amdgpu_fence_get_driver_name,
        .get_timeline_name = amdgpu_fence_get_timeline_name,
index eb20987ce18d7e80d97a9580777b6d45b61fc4e4..f4e20eaede826743b388a3f6eebc7bc47570ea24 100644 (file)
@@ -350,7 +350,8 @@ int amdgpu_sa_bo_new(struct amdgpu_device *adev,
                } while (amdgpu_sa_bo_next_hole(sa_manager, fences, tries));
 
                spin_unlock(&sa_manager->wq.lock);
-               r = amdgpu_fence_wait_any(adev, fences, false);
+               r = amdgpu_fence_wait_any(adev, fences, false, MAX_SCHEDULE_TIMEOUT);
+               r = (r > 0) ? 0 : r;
                spin_lock(&sa_manager->wq.lock);
                /* if we have nothing to wait for block */
                if (r == -ENOENT) {