]> git.proxmox.com Git - mirror_ubuntu-disco-kernel.git/commitdiff
amdgpu/cs: split out fence dependency checking (v2)
authorDave Airlie <airlied@redhat.com>
Thu, 9 Mar 2017 03:45:52 +0000 (03:45 +0000)
committerAlex Deucher <alexander.deucher@amd.com>
Fri, 16 Jun 2017 20:58:31 +0000 (16:58 -0400)
This just splits out the fence depenency checking into it's
own function to make it easier to add semaphore dependencies.

v2: rebase onto other changes.

v1-Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Dave Airlie <airlied@redhat.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c

index a37bdf4f8e9b6ef6ef132f67cba64f65d283e152..29469e6b58b81e874d47facdf6c26392e8c9166c 100644 (file)
@@ -923,59 +923,68 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
        return 0;
 }
 
-static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
-                                 struct amdgpu_cs_parser *p)
+static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p,
+                                      struct amdgpu_cs_chunk *chunk)
 {
        struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
-       int i, j, r;
+       unsigned num_deps;
+       int i, r;
+       struct drm_amdgpu_cs_chunk_dep *deps;
 
-       for (i = 0; i < p->nchunks; ++i) {
-               struct drm_amdgpu_cs_chunk_dep *deps;
-               struct amdgpu_cs_chunk *chunk;
-               unsigned num_deps;
+       deps = (struct drm_amdgpu_cs_chunk_dep *)chunk->kdata;
+       num_deps = chunk->length_dw * 4 /
+               sizeof(struct drm_amdgpu_cs_chunk_dep);
 
-               chunk = &p->chunks[i];
+       for (i = 0; i < num_deps; ++i) {
+               struct amdgpu_ring *ring;
+               struct amdgpu_ctx *ctx;
+               struct dma_fence *fence;
 
-               if (chunk->chunk_id != AMDGPU_CHUNK_ID_DEPENDENCIES)
-                       continue;
+               ctx = amdgpu_ctx_get(fpriv, deps[i].ctx_id);
+               if (ctx == NULL)
+                       return -EINVAL;
 
-               deps = (struct drm_amdgpu_cs_chunk_dep *)chunk->kdata;
-               num_deps = chunk->length_dw * 4 /
-                       sizeof(struct drm_amdgpu_cs_chunk_dep);
+               r = amdgpu_queue_mgr_map(p->adev, &ctx->queue_mgr,
+                                        deps[i].ip_type,
+                                        deps[i].ip_instance,
+                                        deps[i].ring, &ring);
+               if (r) {
+                       amdgpu_ctx_put(ctx);
+                       return r;
+               }
 
-               for (j = 0; j < num_deps; ++j) {
-                       struct amdgpu_ring *ring;
-                       struct amdgpu_ctx *ctx;
-                       struct dma_fence *fence;
+               fence = amdgpu_ctx_get_fence(ctx, ring,
+                                            deps[i].handle);
+               if (IS_ERR(fence)) {
+                       r = PTR_ERR(fence);
+                       amdgpu_ctx_put(ctx);
+                       return r;
+               } else if (fence) {
+                       r = amdgpu_sync_fence(p->adev, &p->job->sync,
+                                             fence);
+                       dma_fence_put(fence);
+                       amdgpu_ctx_put(ctx);
+                       if (r)
+                               return r;
+               }
+       }
+       return 0;
+}
 
-                       ctx = amdgpu_ctx_get(fpriv, deps[j].ctx_id);
-                       if (ctx == NULL)
-                               return -EINVAL;
+static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
+                                 struct amdgpu_cs_parser *p)
+{
+       int i, r;
 
-                       r = amdgpu_queue_mgr_map(adev, &ctx->queue_mgr,
-                                                deps[j].ip_type,
-                                                deps[j].ip_instance,
-                                                deps[j].ring, &ring);
-                       if (r) {
-                               amdgpu_ctx_put(ctx);
-                               return r;
-                       }
+       for (i = 0; i < p->nchunks; ++i) {
+               struct amdgpu_cs_chunk *chunk;
 
-                       fence = amdgpu_ctx_get_fence(ctx, ring,
-                                                    deps[j].handle);
-                       if (IS_ERR(fence)) {
-                               r = PTR_ERR(fence);
-                               amdgpu_ctx_put(ctx);
-                               return r;
+               chunk = &p->chunks[i];
 
-                       } else if (fence) {
-                               r = amdgpu_sync_fence(adev, &p->job->sync,
-                                                     fence);
-                               dma_fence_put(fence);
-                               amdgpu_ctx_put(ctx);
-                               if (r)
-                                       return r;
-                       }
+               if (chunk->chunk_id == AMDGPU_CHUNK_ID_DEPENDENCIES) {
+                       r = amdgpu_cs_process_fence_dep(p, chunk);
+                       if (r)
+                               return r;
                }
        }