]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/commitdiff
drm/scheduler: Rename cleanup functions v2.
authorAndrey Grodzovsky <andrey.grodzovsky@amd.com>
Tue, 5 Jun 2018 16:43:23 +0000 (12:43 -0400)
committerAlex Deucher <alexander.deucher@amd.com>
Thu, 5 Jul 2018 21:38:45 +0000 (16:38 -0500)
Everything in the flush code path (i.e. waiting for SW queue
to become empty) names with *_flush()
and everything in the release code path names *_fini()

This patch also effect the amdgpu and etnaviv drivers which
use those functions.

v2:
Also pplay the change to vd3.

Signed-off-by: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
Suggested-by: Christian König <christian.koenig@amd.com>
Acked-by: Lucas Stach <l.stach@pengutronix.de>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
drivers/gpu/drm/etnaviv/etnaviv_drv.c
drivers/gpu/drm/scheduler/gpu_scheduler.c
drivers/gpu/drm/v3d/v3d_drv.c
include/drm/gpu_scheduler.h

index 64b3a1ed04dcacd1af40fc7db15961d6fc57af93..c0f06c02f2dea4df511fd283a4860b98ffbe8c59 100644 (file)
@@ -104,7 +104,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
 
 failed:
        for (j = 0; j < i; j++)
-               drm_sched_entity_fini(&adev->rings[j]->sched,
+               drm_sched_entity_destroy(&adev->rings[j]->sched,
                                      &ctx->rings[j].entity);
        kfree(ctx->fences);
        ctx->fences = NULL;
@@ -178,7 +178,7 @@ static void amdgpu_ctx_do_release(struct kref *ref)
                if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring)
                        continue;
 
-               drm_sched_entity_fini(&ctx->adev->rings[i]->sched,
+               drm_sched_entity_destroy(&ctx->adev->rings[i]->sched,
                        &ctx->rings[i].entity);
        }
 
@@ -466,7 +466,7 @@ void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
                        if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring)
                                continue;
 
-                       max_wait = drm_sched_entity_do_release(&ctx->adev->rings[i]->sched,
+                       max_wait = drm_sched_entity_flush(&ctx->adev->rings[i]->sched,
                                          &ctx->rings[i].entity, max_wait);
                }
        }
@@ -492,7 +492,7 @@ void amdgpu_ctx_mgr_entity_cleanup(struct amdgpu_ctx_mgr *mgr)
                                continue;
 
                        if (kref_read(&ctx->refcount) == 1)
-                               drm_sched_entity_cleanup(&ctx->adev->rings[i]->sched,
+                               drm_sched_entity_fini(&ctx->adev->rings[i]->sched,
                                        &ctx->rings[i].entity);
                        else
                                DRM_ERROR("ctx %p is still alive\n", ctx);
index 0c084d3d086526e4453a0bebd87a41dba2a7ad6c..0246cb87d9e4e4b045c5076b64dac1b6c3ac99aa 100644 (file)
@@ -162,7 +162,7 @@ error_mem:
 static void amdgpu_ttm_global_fini(struct amdgpu_device *adev)
 {
        if (adev->mman.mem_global_referenced) {
-               drm_sched_entity_fini(adev->mman.entity.sched,
+               drm_sched_entity_destroy(adev->mman.entity.sched,
                                      &adev->mman.entity);
                mutex_destroy(&adev->mman.gtt_window_lock);
                drm_global_item_unref(&adev->mman.bo_global_ref.ref);
index cc15d32304022c7f01d42ad38ffc94d67c9da767..0b46ea1c62907178c796cf91dc4225dca16aff84 100644 (file)
@@ -309,7 +309,7 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
        for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
                kfree(adev->uvd.inst[j].saved_bo);
 
-               drm_sched_entity_fini(&adev->uvd.inst[j].ring.sched, &adev->uvd.inst[j].entity);
+               drm_sched_entity_destroy(&adev->uvd.inst[j].ring.sched, &adev->uvd.inst[j].entity);
 
                amdgpu_bo_free_kernel(&adev->uvd.inst[j].vcpu_bo,
                                      &adev->uvd.inst[j].gpu_addr,
index 23d960ec1cf27947d8f5ccea419a92224b9ad112..b0dcdfd85f5b84ba7e0bdd5ffb9636a53ac18cb2 100644 (file)
@@ -222,7 +222,7 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev)
        if (adev->vce.vcpu_bo == NULL)
                return 0;
 
-       drm_sched_entity_fini(&adev->vce.ring[0].sched, &adev->vce.entity);
+       drm_sched_entity_destroy(&adev->vce.ring[0].sched, &adev->vce.entity);
 
        amdgpu_bo_free_kernel(&adev->vce.vcpu_bo, &adev->vce.gpu_addr,
                (void **)&adev->vce.cpu_addr);
index 590db78b8c72536d8168c181a27fcad9c431ac43..837066076ccf5144accd45fb0d7a911470ef882c 100644 (file)
@@ -2643,7 +2643,7 @@ error_free_root:
        vm->root.base.bo = NULL;
 
 error_free_sched_entity:
-       drm_sched_entity_fini(&ring->sched, &vm->entity);
+       drm_sched_entity_destroy(&ring->sched, &vm->entity);
 
        return r;
 }
@@ -2780,7 +2780,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
                spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
        }
 
-       drm_sched_entity_fini(vm->entity.sched, &vm->entity);
+       drm_sched_entity_destroy(vm->entity.sched, &vm->entity);
 
        if (!RB_EMPTY_ROOT(&vm->va.rb_root)) {
                dev_err(adev->dev, "still active bo inside vm\n");
index bfddf97dd13e6048ab009a5ef3f3655b72bad48d..1df1c6115341d693356067ac50c4a8a3fcc05c46 100644 (file)
@@ -470,7 +470,7 @@ static int uvd_v6_0_sw_fini(void *handle)
                return r;
 
        if (uvd_v6_0_enc_support(adev)) {
-               drm_sched_entity_fini(&adev->uvd.inst->ring_enc[0].sched, &adev->uvd.inst->entity_enc);
+               drm_sched_entity_destroy(&adev->uvd.inst->ring_enc[0].sched, &adev->uvd.inst->entity_enc);
 
                for (i = 0; i < adev->uvd.num_enc_rings; ++i)
                        amdgpu_ring_fini(&adev->uvd.inst->ring_enc[i]);
index 57d32f21b3a611e5bc5d009cb0dd7d32430720bb..ba244d3b74dbbe7ab8fffc8177822fc9c1eb4d1f 100644 (file)
@@ -491,7 +491,7 @@ static int uvd_v7_0_sw_fini(void *handle)
                return r;
 
        for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
-               drm_sched_entity_fini(&adev->uvd.inst[j].ring_enc[0].sched, &adev->uvd.inst[j].entity_enc);
+               drm_sched_entity_destroy(&adev->uvd.inst[j].ring_enc[0].sched, &adev->uvd.inst[j].entity_enc);
 
                for (i = 0; i < adev->uvd.num_enc_rings; ++i)
                        amdgpu_ring_fini(&adev->uvd.inst[j].ring_enc[i]);
index e5013a9991477eda57913a80f7c978a199727a62..45bfdf4cc1078465d6c630f572fd7e012e3dc8c7 100644 (file)
@@ -78,8 +78,8 @@ static void etnaviv_postclose(struct drm_device *dev, struct drm_file *file)
                                gpu->lastctx = NULL;
                        mutex_unlock(&gpu->lock);
 
-                       drm_sched_entity_fini(&gpu->sched,
-                                             &ctx->sched_entity[i]);
+                       drm_sched_entity_destroy(&gpu->sched,
+                                               &ctx->sched_entity[i]);
                }
        }
 
index 6a316701da739a1e2e2d4fdc9dc743c86084c9af..7d2560699b84e7d9bc9cca21c2746307cff81923 100644 (file)
@@ -256,7 +256,7 @@ static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
 
 
 /**
- * drm_sched_entity_do_release - Destroy a context entity
+ * drm_sched_entity_flush - Flush a context entity
  *
  * @sched: scheduler instance
  * @entity: scheduler entity
@@ -267,7 +267,7 @@ static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
  *
  * Returns the remaining time in jiffies left from the input timeout
  */
-long drm_sched_entity_do_release(struct drm_gpu_scheduler *sched,
+long drm_sched_entity_flush(struct drm_gpu_scheduler *sched,
                           struct drm_sched_entity *entity, long timeout)
 {
        long ret = timeout;
@@ -294,7 +294,7 @@ long drm_sched_entity_do_release(struct drm_gpu_scheduler *sched,
 
        return ret;
 }
-EXPORT_SYMBOL(drm_sched_entity_do_release);
+EXPORT_SYMBOL(drm_sched_entity_flush);
 
 /**
  * drm_sched_entity_cleanup - Destroy a context entity
@@ -306,7 +306,7 @@ EXPORT_SYMBOL(drm_sched_entity_do_release);
  * entity and signals all jobs with an error code if the process was killed.
  *
  */
-void drm_sched_entity_cleanup(struct drm_gpu_scheduler *sched,
+void drm_sched_entity_fini(struct drm_gpu_scheduler *sched,
                           struct drm_sched_entity *entity)
 {
 
@@ -357,7 +357,7 @@ void drm_sched_entity_cleanup(struct drm_gpu_scheduler *sched,
        dma_fence_put(entity->last_scheduled);
        entity->last_scheduled = NULL;
 }
-EXPORT_SYMBOL(drm_sched_entity_cleanup);
+EXPORT_SYMBOL(drm_sched_entity_fini);
 
 /**
  * drm_sched_entity_fini - Destroy a context entity
@@ -367,13 +367,13 @@ EXPORT_SYMBOL(drm_sched_entity_cleanup);
  *
  * Calls drm_sched_entity_do_release() and drm_sched_entity_cleanup()
  */
-void drm_sched_entity_fini(struct drm_gpu_scheduler *sched,
+void drm_sched_entity_destroy(struct drm_gpu_scheduler *sched,
                                struct drm_sched_entity *entity)
 {
-       drm_sched_entity_do_release(sched, entity, MAX_WAIT_SCHED_ENTITY_Q_EMPTY);
-       drm_sched_entity_cleanup(sched, entity);
+       drm_sched_entity_flush(sched, entity, MAX_WAIT_SCHED_ENTITY_Q_EMPTY);
+       drm_sched_entity_fini(sched, entity);
 }
-EXPORT_SYMBOL(drm_sched_entity_fini);
+EXPORT_SYMBOL(drm_sched_entity_destroy);
 
 static void drm_sched_entity_wakeup(struct dma_fence *f, struct dma_fence_cb *cb)
 {
index cdb582043b4fc26cc63a64e04eadcd1bd559e752..567f7d46d912d4de6ea31ad5440f9b62b0f4859f 100644 (file)
@@ -151,7 +151,7 @@ v3d_postclose(struct drm_device *dev, struct drm_file *file)
        enum v3d_queue q;
 
        for (q = 0; q < V3D_MAX_QUEUES; q++) {
-               drm_sched_entity_fini(&v3d->queue[q].sched,
+               drm_sched_entity_destroy(&v3d->queue[q].sched,
                                      &v3d_priv->sched_entity[q]);
        }
 
index 7c2dfd6cc1afba57c9ec9fae8f949983191d2f75..4214ceb71c054d0748303e95f8155fe71c0ca403 100644 (file)
@@ -284,12 +284,12 @@ int drm_sched_entity_init(struct drm_gpu_scheduler *sched,
                          struct drm_sched_entity *entity,
                          struct drm_sched_rq *rq,
                          atomic_t *guilty);
-long drm_sched_entity_do_release(struct drm_gpu_scheduler *sched,
+long drm_sched_entity_flush(struct drm_gpu_scheduler *sched,
                           struct drm_sched_entity *entity, long timeout);
-void drm_sched_entity_cleanup(struct drm_gpu_scheduler *sched,
-                          struct drm_sched_entity *entity);
 void drm_sched_entity_fini(struct drm_gpu_scheduler *sched,
                           struct drm_sched_entity *entity);
+void drm_sched_entity_destroy(struct drm_gpu_scheduler *sched,
+                          struct drm_sched_entity *entity);
 void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
                               struct drm_sched_entity *entity);
 void drm_sched_entity_set_rq(struct drm_sched_entity *entity,