]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/commitdiff
drm/scheduler: improve job distribution with multiple queues
authorNirmoy Das <nirmoy.aiemd@gmail.com>
Thu, 25 Jun 2020 12:07:23 +0000 (14:07 +0200)
committerChristian König <christian.koenig@amd.com>
Fri, 26 Jun 2020 12:16:29 +0000 (14:16 +0200)
This patch uses score to select a new drm scheduler for better
loadbalance between multiple drm schedulers instead of num_jobs.

Below are test results after running amdgpu_test for ~10 times.

Before this patch:

sched_name     num of many times it got schedule
=========      ==================================
sdma0          1463
sdma1          198
comp_1.0.1     280

After this patch:

sched_name     num of many times it got schedule
=========      ==================================
sdma0          925
sdma1          928
comp_1.0.1     177
comp_1.1.1     44
comp_1.2.1     43
comp_1.3.1     44

Signed-off-by: Nirmoy Das <nirmoy.das@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Link: https://patchwork.freedesktop.org/patch/373000/
Signed-off-by: Christian König <christian.koenig@amd.com>
drivers/gpu/drm/scheduler/sched_entity.c
drivers/gpu/drm/scheduler/sched_main.c
include/drm/gpu_scheduler.h

index c803e14eed91a867d63f4b3015992f7a08261f83..1463801189624a571c22c84370d80c4e33fc8246 100644 (file)
@@ -486,7 +486,7 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
        bool first;
 
        trace_drm_sched_job(sched_job, entity);
-       atomic_inc(&entity->rq->sched->num_jobs);
+       atomic_inc(&entity->rq->sched->score);
        WRITE_ONCE(entity->last_user, current->group_leader);
        first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node);
 
index 8e731ed0d9d9f81d4b04bfe5ecd4e139c13bfd51..25a9e691160218542a49c6181378aeb64a53ae3d 100644 (file)
@@ -92,6 +92,7 @@ void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
        if (!list_empty(&entity->list))
                return;
        spin_lock(&rq->lock);
+       atomic_inc(&rq->sched->score);
        list_add_tail(&entity->list, &rq->entities);
        spin_unlock(&rq->lock);
 }
@@ -110,6 +111,7 @@ void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
        if (list_empty(&entity->list))
                return;
        spin_lock(&rq->lock);
+       atomic_dec(&rq->sched->score);
        list_del_init(&entity->list);
        if (rq->current_entity == entity)
                rq->current_entity = NULL;
@@ -647,7 +649,7 @@ static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb)
        struct drm_gpu_scheduler *sched = s_fence->sched;
 
        atomic_dec(&sched->hw_rq_count);
-       atomic_dec(&sched->num_jobs);
+       atomic_dec(&sched->score);
 
        trace_drm_sched_process_job(s_fence);
 
@@ -712,7 +714,7 @@ drm_sched_pick_best(struct drm_gpu_scheduler **sched_list,
 {
        struct drm_gpu_scheduler *sched, *picked_sched = NULL;
        int i;
-       unsigned int min_jobs = UINT_MAX, num_jobs;
+       unsigned int min_score = UINT_MAX, num_score;
 
        for (i = 0; i < num_sched_list; ++i) {
                sched = sched_list[i];
@@ -723,9 +725,9 @@ drm_sched_pick_best(struct drm_gpu_scheduler **sched_list,
                        continue;
                }
 
-               num_jobs = atomic_read(&sched->num_jobs);
-               if (num_jobs < min_jobs) {
-                       min_jobs = num_jobs;
+               num_score = atomic_read(&sched->score);
+               if (num_score < min_score) {
+                       min_score = num_score;
                        picked_sched = sched;
                }
        }
@@ -860,7 +862,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
        spin_lock_init(&sched->job_list_lock);
        atomic_set(&sched->hw_rq_count, 0);
        INIT_DELAYED_WORK(&sched->work_tdr, drm_sched_job_timedout);
-       atomic_set(&sched->num_jobs, 0);
+       atomic_set(&sched->score, 0);
        atomic64_set(&sched->job_id_count, 0);
 
        /* Each scheduler will run on a seperate kernel thread */
index a21b3b92135a6ee4898a20fed3464967c463172f..b9780ae9dd26c140b43e806b3a59cc01afa5cc75 100644 (file)
@@ -263,7 +263,7 @@ struct drm_sched_backend_ops {
  * @job_list_lock: lock to protect the ring_mirror_list.
  * @hang_limit: once the hangs by a job crosses this limit then it is marked
  *              guilty and it will be considered for scheduling further.
- * @num_jobs: the number of jobs in queue in the scheduler
+ * @score: score to help loadbalancer pick a idle sched
  * @ready: marks if the underlying HW is ready to work
  * @free_guilty: A hit to time out handler to free the guilty job.
  *
@@ -284,8 +284,8 @@ struct drm_gpu_scheduler {
        struct list_head                ring_mirror_list;
        spinlock_t                      job_list_lock;
        int                             hang_limit;
-       atomic_t                        num_jobs;
-       bool                    ready;
+       atomic_t                        score;
+       bool                            ready;
        bool                            free_guilty;
 };