]> git.proxmox.com Git - mirror_ubuntu-kernels.git/commitdiff
drm/scheduler: change entities rq even earlier
authorChristian König <christian.koenig@amd.com>
Wed, 8 Aug 2018 11:07:11 +0000 (13:07 +0200)
committerAlex Deucher <alexander.deucher@amd.com>
Mon, 27 Aug 2018 16:10:07 +0000 (11:10 -0500)
Looks like for correct debugging we need to know the scheduler even
earlier. So move picking a rq for an entity into job creation.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Nayan Deshmukh <nayan26deshmukh@gmail.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/scheduler/gpu_scheduler.c
drivers/gpu/drm/scheduler/sched_fence.c

index f40a504e3d68a53fe76ac5f2ce202c3fdf755574..f566405f49e3c3af9fad4fe3403b44d245a97add 100644 (file)
@@ -549,6 +549,34 @@ drm_sched_entity_pop_job(struct drm_sched_entity *entity)
        return sched_job;
 }
 
+/**
+ * drm_sched_entity_select_rq - select a new rq for the entity
+ *
+ * @entity: scheduler entity
+ *
+ * Check all prerequisites and select a new rq for the entity for load
+ * balancing.
+ */
+static void drm_sched_entity_select_rq(struct drm_sched_entity *entity)
+{
+       struct dma_fence *fence;
+       struct drm_sched_rq *rq;
+
+       if (!spsc_queue_count(&entity->job_queue) == 0 ||
+           entity->num_rq_list <= 1)
+               return;
+
+       fence = READ_ONCE(entity->last_scheduled);
+       if (fence && !dma_fence_is_signaled(fence))
+               return;
+
+       rq = drm_sched_entity_get_free_sched(entity);
+       spin_lock(&entity->rq_lock);
+       drm_sched_rq_remove_entity(entity->rq, entity);
+       entity->rq = rq;
+       spin_unlock(&entity->rq_lock);
+}
+
 /**
  * drm_sched_entity_push_job - Submit a job to the entity's job queue
  *
@@ -564,25 +592,8 @@ drm_sched_entity_pop_job(struct drm_sched_entity *entity)
 void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
                               struct drm_sched_entity *entity)
 {
-       struct drm_sched_rq *rq = entity->rq;
        bool first;
 
-       first = spsc_queue_count(&entity->job_queue) == 0;
-       if (first && (entity->num_rq_list > 1)) {
-               struct dma_fence *fence;
-
-               fence = READ_ONCE(entity->last_scheduled);
-               if (fence == NULL || dma_fence_is_signaled(fence)) {
-                       rq = drm_sched_entity_get_free_sched(entity);
-                       spin_lock(&entity->rq_lock);
-                       drm_sched_rq_remove_entity(entity->rq, entity);
-                       entity->rq = rq;
-                       spin_unlock(&entity->rq_lock);
-               }
-       }
-
-       sched_job->sched = entity->rq->sched;
-       sched_job->s_fence->sched = entity->rq->sched;
        trace_drm_sched_job(sched_job, entity);
        atomic_inc(&entity->rq->sched->num_jobs);
        WRITE_ONCE(entity->last_user, current->group_leader);
@@ -786,7 +797,10 @@ int drm_sched_job_init(struct drm_sched_job *job,
                       struct drm_sched_entity *entity,
                       void *owner)
 {
-       struct drm_gpu_scheduler *sched = entity->rq->sched;
+       struct drm_gpu_scheduler *sched;
+
+       drm_sched_entity_select_rq(entity);
+       sched = entity->rq->sched;
 
        job->sched = sched;
        job->entity = entity;
index 20e4da377890a966d5b695acdde5016fd4cca0ad..d8d2dff9ea2f79d17479426db774a5b8049d90e7 100644 (file)
@@ -161,7 +161,7 @@ struct drm_sched_fence *drm_sched_fence_create(struct drm_sched_entity *entity,
                return NULL;
 
        fence->owner = owner;
-       fence->sched = NULL;
+       fence->sched = entity->rq->sched;
        spin_lock_init(&fence->lock);
 
        seq = atomic_inc_return(&entity->fence_seq);