]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blame - drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
drm/amdgpu: add some additional vega10 pci ids
[mirror_ubuntu-focal-kernel.git] / drivers / gpu / drm / amd / scheduler / gpu_scheduler.c
CommitLineData
a72ce6f8
JZ
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 *
23 */
24#include <linux/kthread.h>
25#include <linux/wait.h>
26#include <linux/sched.h>
ae7e81c0 27#include <uapi/linux/sched/types.h>
a72ce6f8
JZ
28#include <drm/drmP.h>
29#include "gpu_scheduler.h"
30
353da3c5
CZ
31#define CREATE_TRACE_POINTS
32#include "gpu_sched_trace.h"
33
3d651936 34static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity);
88079006 35static void amd_sched_wakeup(struct amd_gpu_scheduler *sched);
f54d1867 36static void amd_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb);
88079006 37
a72ce6f8 38/* Initialize a given run queue struct */
432a4ff8 39static void amd_sched_rq_init(struct amd_sched_rq *rq)
a72ce6f8 40{
2b184d8d 41 spin_lock_init(&rq->lock);
432a4ff8 42 INIT_LIST_HEAD(&rq->entities);
432a4ff8 43 rq->current_entity = NULL;
a72ce6f8
JZ
44}
45
432a4ff8
CK
46static void amd_sched_rq_add_entity(struct amd_sched_rq *rq,
47 struct amd_sched_entity *entity)
a72ce6f8 48{
e8deea2d
CZ
49 if (!list_empty(&entity->list))
50 return;
2b184d8d 51 spin_lock(&rq->lock);
432a4ff8 52 list_add_tail(&entity->list, &rq->entities);
2b184d8d 53 spin_unlock(&rq->lock);
a72ce6f8
JZ
54}
55
432a4ff8
CK
56static void amd_sched_rq_remove_entity(struct amd_sched_rq *rq,
57 struct amd_sched_entity *entity)
a72ce6f8 58{
e8deea2d
CZ
59 if (list_empty(&entity->list))
60 return;
2b184d8d 61 spin_lock(&rq->lock);
432a4ff8
CK
62 list_del_init(&entity->list);
63 if (rq->current_entity == entity)
64 rq->current_entity = NULL;
2b184d8d 65 spin_unlock(&rq->lock);
a72ce6f8
JZ
66}
67
68/**
3d651936
CK
69 * Select an entity which could provide a job to run
70 *
71 * @rq The run queue to check.
72 *
73 * Try to find a ready entity, returns NULL if none found.
a72ce6f8 74 */
3d651936
CK
75static struct amd_sched_entity *
76amd_sched_rq_select_entity(struct amd_sched_rq *rq)
a72ce6f8 77{
2b184d8d 78 struct amd_sched_entity *entity;
432a4ff8 79
2b184d8d
CK
80 spin_lock(&rq->lock);
81
82 entity = rq->current_entity;
432a4ff8
CK
83 if (entity) {
84 list_for_each_entry_continue(entity, &rq->entities, list) {
3d651936 85 if (amd_sched_entity_is_ready(entity)) {
432a4ff8 86 rq->current_entity = entity;
2b184d8d 87 spin_unlock(&rq->lock);
3d651936 88 return entity;
432a4ff8 89 }
a72ce6f8 90 }
a72ce6f8 91 }
a72ce6f8 92
432a4ff8 93 list_for_each_entry(entity, &rq->entities, list) {
a72ce6f8 94
3d651936 95 if (amd_sched_entity_is_ready(entity)) {
432a4ff8 96 rq->current_entity = entity;
2b184d8d 97 spin_unlock(&rq->lock);
3d651936 98 return entity;
432a4ff8 99 }
a72ce6f8 100
432a4ff8
CK
101 if (entity == rq->current_entity)
102 break;
103 }
a72ce6f8 104
2b184d8d
CK
105 spin_unlock(&rq->lock);
106
432a4ff8 107 return NULL;
a72ce6f8
JZ
108}
109
a72ce6f8
JZ
110/**
111 * Init a context entity used by scheduler when submit to HW ring.
112 *
113 * @sched The pointer to the scheduler
91404fb2 114 * @entity The pointer to a valid amd_sched_entity
a72ce6f8 115 * @rq The run queue this entity belongs
0e89d0c1 116 * @kernel If this is an entity for the kernel
1333f723 117 * @jobs The max number of jobs in the job queue
a72ce6f8
JZ
118 *
119 * return 0 if succeed. negative error code on failure
120*/
91404fb2 121int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
6f0e54a9 122 struct amd_sched_entity *entity,
432a4ff8 123 struct amd_sched_rq *rq,
6f0e54a9 124 uint32_t jobs)
a72ce6f8 125{
0f75aee7
CK
126 int r;
127
a72ce6f8
JZ
128 if (!(sched && entity && rq))
129 return -EINVAL;
130
91404fb2 131 memset(entity, 0, sizeof(struct amd_sched_entity));
0f75aee7
CK
132 INIT_LIST_HEAD(&entity->list);
133 entity->rq = rq;
134 entity->sched = sched;
a72ce6f8
JZ
135
136 spin_lock_init(&entity->queue_lock);
0f75aee7
CK
137 r = kfifo_alloc(&entity->job_queue, jobs * sizeof(void *), GFP_KERNEL);
138 if (r)
139 return r;
140
ce882e6d 141 atomic_set(&entity->fence_seq, 0);
f54d1867 142 entity->fence_context = dma_fence_context_alloc(2);
a72ce6f8 143
a72ce6f8
JZ
144 return 0;
145}
146
147/**
148 * Query if entity is initialized
149 *
150 * @sched Pointer to scheduler instance
151 * @entity The pointer to a valid scheduler entity
152 *
153 * return true if entity is initialized, false otherwise
154*/
d54fdb94
CK
155static bool amd_sched_entity_is_initialized(struct amd_gpu_scheduler *sched,
156 struct amd_sched_entity *entity)
a72ce6f8 157{
0f75aee7
CK
158 return entity->sched == sched &&
159 entity->rq != NULL;
a72ce6f8
JZ
160}
161
aef4852e
CK
162/**
163 * Check if entity is idle
164 *
165 * @entity The pointer to a valid scheduler entity
166 *
167 * Return true if entity don't has any unscheduled jobs.
168 */
169static bool amd_sched_entity_is_idle(struct amd_sched_entity *entity)
a72ce6f8 170{
aef4852e
CK
171 rmb();
172 if (kfifo_is_empty(&entity->job_queue))
a72ce6f8
JZ
173 return true;
174
175 return false;
176}
177
3d651936
CK
178/**
179 * Check if entity is ready
180 *
181 * @entity The pointer to a valid scheduler entity
182 *
183 * Return true if entity could provide a job.
184 */
185static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity)
186{
187 if (kfifo_is_empty(&entity->job_queue))
188 return false;
189
190 if (ACCESS_ONCE(entity->dependency))
191 return false;
192
193 return true;
194}
195
a72ce6f8
JZ
196/**
197 * Destroy a context entity
198 *
199 * @sched Pointer to scheduler instance
200 * @entity The pointer to a valid scheduler entity
201 *
062c7fb3 202 * Cleanup and free the allocated resources.
a72ce6f8 203 */
062c7fb3
CK
204void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
205 struct amd_sched_entity *entity)
a72ce6f8 206{
0f75aee7 207 struct amd_sched_rq *rq = entity->rq;
a72ce6f8 208
d54fdb94 209 if (!amd_sched_entity_is_initialized(sched, entity))
062c7fb3 210 return;
6c859274 211
a72ce6f8
JZ
212 /**
213 * The client will not queue more IBs during this fini, consume existing
214 * queued IBs
215 */
c2b6bd7e 216 wait_event(sched->job_scheduled, amd_sched_entity_is_idle(entity));
a72ce6f8 217
432a4ff8 218 amd_sched_rq_remove_entity(rq, entity);
a72ce6f8 219 kfifo_free(&entity->job_queue);
a72ce6f8
JZ
220}
221
f54d1867 222static void amd_sched_entity_wakeup(struct dma_fence *f, struct dma_fence_cb *cb)
e61235db
CK
223{
224 struct amd_sched_entity *entity =
225 container_of(cb, struct amd_sched_entity, cb);
226 entity->dependency = NULL;
f54d1867 227 dma_fence_put(f);
0f75aee7 228 amd_sched_wakeup(entity->sched);
e61235db
CK
229}
230
f54d1867 231static void amd_sched_entity_clear_dep(struct dma_fence *f, struct dma_fence_cb *cb)
777dbd45
ML
232{
233 struct amd_sched_entity *entity =
234 container_of(cb, struct amd_sched_entity, cb);
235 entity->dependency = NULL;
f54d1867 236 dma_fence_put(f);
777dbd45
ML
237}
238
393a0bd4
CK
239static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity *entity)
240{
241 struct amd_gpu_scheduler *sched = entity->sched;
f54d1867 242 struct dma_fence * fence = entity->dependency;
393a0bd4
CK
243 struct amd_sched_fence *s_fence;
244
245 if (fence->context == entity->fence_context) {
246 /* We can ignore fences from ourself */
f54d1867 247 dma_fence_put(entity->dependency);
393a0bd4
CK
248 return false;
249 }
250
251 s_fence = to_amd_sched_fence(fence);
252 if (s_fence && s_fence->sched == sched) {
393a0bd4 253
6fc13675
CK
254 /*
255 * Fence is from the same scheduler, only need to wait for
256 * it to be scheduled
257 */
f54d1867
CW
258 fence = dma_fence_get(&s_fence->scheduled);
259 dma_fence_put(entity->dependency);
6fc13675 260 entity->dependency = fence;
f54d1867
CW
261 if (!dma_fence_add_callback(fence, &entity->cb,
262 amd_sched_entity_clear_dep))
6fc13675
CK
263 return true;
264
265 /* Ignore it when it is already scheduled */
f54d1867 266 dma_fence_put(fence);
6fc13675 267 return false;
393a0bd4
CK
268 }
269
f54d1867
CW
270 if (!dma_fence_add_callback(entity->dependency, &entity->cb,
271 amd_sched_entity_wakeup))
393a0bd4
CK
272 return true;
273
f54d1867 274 dma_fence_put(entity->dependency);
393a0bd4
CK
275 return false;
276}
277
69bd5bf1
CK
278static struct amd_sched_job *
279amd_sched_entity_pop_job(struct amd_sched_entity *entity)
280{
0f75aee7 281 struct amd_gpu_scheduler *sched = entity->sched;
4c7eb91c 282 struct amd_sched_job *sched_job;
69bd5bf1 283
4c7eb91c 284 if (!kfifo_out_peek(&entity->job_queue, &sched_job, sizeof(sched_job)))
69bd5bf1
CK
285 return NULL;
286
393a0bd4
CK
287 while ((entity->dependency = sched->ops->dependency(sched_job)))
288 if (amd_sched_entity_add_dependency_cb(entity))
e61235db 289 return NULL;
e61235db 290
4c7eb91c 291 return sched_job;
69bd5bf1
CK
292}
293
a72ce6f8 294/**
6c859274 295 * Helper to submit a job to the job queue
a72ce6f8 296 *
4c7eb91c 297 * @sched_job The pointer to job required to submit
6c859274
CK
298 *
299 * Returns true if we could submit the job.
300 */
4c7eb91c 301static bool amd_sched_entity_in(struct amd_sched_job *sched_job)
a72ce6f8 302{
786b5219 303 struct amd_gpu_scheduler *sched = sched_job->sched;
4c7eb91c 304 struct amd_sched_entity *entity = sched_job->s_entity;
6c859274
CK
305 bool added, first = false;
306
307 spin_lock(&entity->queue_lock);
4c7eb91c
JZ
308 added = kfifo_in(&entity->job_queue, &sched_job,
309 sizeof(sched_job)) == sizeof(sched_job);
6c859274 310
4c7eb91c 311 if (added && kfifo_len(&entity->job_queue) == sizeof(sched_job))
6c859274
CK
312 first = true;
313
314 spin_unlock(&entity->queue_lock);
315
316 /* first job wakes up scheduler */
e8deea2d
CZ
317 if (first) {
318 /* Add the entity to the run queue */
319 amd_sched_rq_add_entity(entity->rq, entity);
786b5219 320 amd_sched_wakeup(sched);
e8deea2d 321 }
6c859274
CK
322 return added;
323}
324
0de2479c
ML
325/* job_finish is called after hw fence signaled, and
326 * the job had already been deleted from ring_mirror_list
327 */
c5f74f78 328static void amd_sched_job_finish(struct work_struct *work)
0de2479c 329{
c5f74f78
CK
330 struct amd_sched_job *s_job = container_of(work, struct amd_sched_job,
331 finish_work);
0de2479c
ML
332 struct amd_gpu_scheduler *sched = s_job->sched;
333
f42d20a9 334 /* remove job from ring_mirror_list */
1059e117 335 spin_lock(&sched->job_list_lock);
f42d20a9 336 list_del_init(&s_job->node);
0de2479c 337 if (sched->timeout != MAX_SCHEDULE_TIMEOUT) {
c5f74f78
CK
338 struct amd_sched_job *next;
339
1059e117 340 spin_unlock(&sched->job_list_lock);
c5f74f78 341 cancel_delayed_work_sync(&s_job->work_tdr);
1059e117 342 spin_lock(&sched->job_list_lock);
0de2479c
ML
343
344 /* queue TDR for next job */
345 next = list_first_entry_or_null(&sched->ring_mirror_list,
346 struct amd_sched_job, node);
347
c5f74f78 348 if (next)
0de2479c 349 schedule_delayed_work(&next->work_tdr, sched->timeout);
0de2479c 350 }
1059e117 351 spin_unlock(&sched->job_list_lock);
c5f74f78
CK
352 sched->ops->free_job(s_job);
353}
354
f54d1867
CW
355static void amd_sched_job_finish_cb(struct dma_fence *f,
356 struct dma_fence_cb *cb)
c5f74f78
CK
357{
358 struct amd_sched_job *job = container_of(cb, struct amd_sched_job,
359 finish_cb);
360 schedule_work(&job->finish_work);
0de2479c
ML
361}
362
7392c329 363static void amd_sched_job_begin(struct amd_sched_job *s_job)
0de2479c
ML
364{
365 struct amd_gpu_scheduler *sched = s_job->sched;
366
1059e117 367 spin_lock(&sched->job_list_lock);
f42d20a9 368 list_add_tail(&s_job->node, &sched->ring_mirror_list);
0de2479c 369 if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
16a7133f
CK
370 list_first_entry_or_null(&sched->ring_mirror_list,
371 struct amd_sched_job, node) == s_job)
0de2479c 372 schedule_delayed_work(&s_job->work_tdr, sched->timeout);
1059e117 373 spin_unlock(&sched->job_list_lock);
0de2479c
ML
374}
375
0e51a772
CK
376static void amd_sched_job_timedout(struct work_struct *work)
377{
378 struct amd_sched_job *job = container_of(work, struct amd_sched_job,
379 work_tdr.work);
380
381 job->sched->ops->timedout_job(job);
382}
383
e686e75d
CZ
384void amd_sched_hw_job_reset(struct amd_gpu_scheduler *sched)
385{
386 struct amd_sched_job *s_job;
387
388 spin_lock(&sched->job_list_lock);
389 list_for_each_entry_reverse(s_job, &sched->ring_mirror_list, node) {
a6bef67e
CZ
390 if (s_job->s_fence->parent &&
391 dma_fence_remove_callback(s_job->s_fence->parent,
392 &s_job->s_fence->cb)) {
f54d1867 393 dma_fence_put(s_job->s_fence->parent);
e686e75d
CZ
394 s_job->s_fence->parent = NULL;
395 }
396 }
bdc2eea4 397 atomic_set(&sched->hw_rq_count, 0);
e686e75d
CZ
398 spin_unlock(&sched->job_list_lock);
399}
400
ec75f573
CZ
401void amd_sched_job_recovery(struct amd_gpu_scheduler *sched)
402{
1c62cf91 403 struct amd_sched_job *s_job, *tmp;
ec75f573
CZ
404 int r;
405
406 spin_lock(&sched->job_list_lock);
407 s_job = list_first_entry_or_null(&sched->ring_mirror_list,
408 struct amd_sched_job, node);
bdf00137 409 if (s_job && sched->timeout != MAX_SCHEDULE_TIMEOUT)
ec75f573
CZ
410 schedule_delayed_work(&s_job->work_tdr, sched->timeout);
411
1c62cf91 412 list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
ec75f573 413 struct amd_sched_fence *s_fence = s_job->s_fence;
f54d1867 414 struct dma_fence *fence;
bdc2eea4 415
1c62cf91
CZ
416 spin_unlock(&sched->job_list_lock);
417 fence = sched->ops->run_job(s_job);
bdc2eea4 418 atomic_inc(&sched->hw_rq_count);
ec75f573 419 if (fence) {
f54d1867
CW
420 s_fence->parent = dma_fence_get(fence);
421 r = dma_fence_add_callback(fence, &s_fence->cb,
422 amd_sched_process_job);
ec75f573
CZ
423 if (r == -ENOENT)
424 amd_sched_process_job(fence, &s_fence->cb);
425 else if (r)
426 DRM_ERROR("fence add callback failed (%d)\n",
427 r);
f54d1867 428 dma_fence_put(fence);
ec75f573
CZ
429 } else {
430 DRM_ERROR("Failed to run job!\n");
431 amd_sched_process_job(NULL, &s_fence->cb);
432 }
1c62cf91 433 spin_lock(&sched->job_list_lock);
ec75f573
CZ
434 }
435 spin_unlock(&sched->job_list_lock);
436}
437
6c859274
CK
438/**
439 * Submit a job to the job queue
440 *
4c7eb91c 441 * @sched_job The pointer to job required to submit
6c859274
CK
442 *
443 * Returns 0 for success, negative error code otherwise.
444 */
e2840221 445void amd_sched_entity_push_job(struct amd_sched_job *sched_job)
6c859274
CK
446{
447 struct amd_sched_entity *entity = sched_job->s_entity;
6c859274 448
786b5219 449 trace_amd_sched_job(sched_job);
f54d1867
CW
450 dma_fence_add_callback(&sched_job->s_fence->finished, &sched_job->finish_cb,
451 amd_sched_job_finish_cb);
0f75aee7 452 wait_event(entity->sched->job_scheduled,
c9f0fe5e 453 amd_sched_entity_in(sched_job));
a72ce6f8
JZ
454}
455
e686941a
ML
456/* init a sched_job with basic field */
457int amd_sched_job_init(struct amd_sched_job *job,
16a7133f
CK
458 struct amd_gpu_scheduler *sched,
459 struct amd_sched_entity *entity,
595a9cd6 460 void *owner)
e686941a
ML
461{
462 job->sched = sched;
463 job->s_entity = entity;
464 job->s_fence = amd_sched_fence_create(entity, owner);
93f8b367 465 job->id = atomic64_inc_return(&sched->job_id_count);
e686941a
ML
466 if (!job->s_fence)
467 return -ENOMEM;
468
c5f74f78
CK
469 INIT_WORK(&job->finish_work, amd_sched_job_finish);
470 INIT_LIST_HEAD(&job->node);
0e51a772 471 INIT_DELAYED_WORK(&job->work_tdr, amd_sched_job_timedout);
4835096b 472
e686941a
ML
473 return 0;
474}
475
e688b728
CK
476/**
477 * Return ture if we can push more jobs to the hw.
478 */
479static bool amd_sched_ready(struct amd_gpu_scheduler *sched)
480{
481 return atomic_read(&sched->hw_rq_count) <
482 sched->hw_submission_limit;
483}
484
88079006
CK
485/**
486 * Wake up the scheduler when it is ready
487 */
488static void amd_sched_wakeup(struct amd_gpu_scheduler *sched)
489{
490 if (amd_sched_ready(sched))
c2b6bd7e 491 wake_up_interruptible(&sched->wake_up_worker);
88079006
CK
492}
493
e688b728 494/**
3d651936 495 * Select next entity to process
e688b728 496*/
3d651936
CK
497static struct amd_sched_entity *
498amd_sched_select_entity(struct amd_gpu_scheduler *sched)
e688b728 499{
3d651936 500 struct amd_sched_entity *entity;
d033a6de 501 int i;
e688b728
CK
502
503 if (!amd_sched_ready(sched))
504 return NULL;
505
506 /* Kernel run queue has higher priority than normal run queue*/
153de9df 507 for (i = AMD_SCHED_PRIORITY_MAX - 1; i >= AMD_SCHED_PRIORITY_MIN; i--) {
d033a6de
CZ
508 entity = amd_sched_rq_select_entity(&sched->sched_rq[i]);
509 if (entity)
510 break;
511 }
e688b728 512
3d651936 513 return entity;
e688b728
CK
514}
515
f54d1867 516static void amd_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb)
6f0e54a9 517{
258f3f99
CK
518 struct amd_sched_fence *s_fence =
519 container_of(cb, struct amd_sched_fence, cb);
9b398fa5 520 struct amd_gpu_scheduler *sched = s_fence->sched;
6f0e54a9 521
c746ba22 522 atomic_dec(&sched->hw_rq_count);
6fc13675 523 amd_sched_fence_finished(s_fence);
cccd9bce 524
7034decf 525 trace_amd_sched_process_job(s_fence);
f54d1867 526 dma_fence_put(&s_fence->finished);
c2b6bd7e 527 wake_up_interruptible(&sched->wake_up_worker);
6f0e54a9
CK
528}
529
0875dc9e
CZ
530static bool amd_sched_blocked(struct amd_gpu_scheduler *sched)
531{
532 if (kthread_should_park()) {
533 kthread_parkme();
534 return true;
535 }
536
537 return false;
538}
539
a72ce6f8
JZ
540static int amd_sched_main(void *param)
541{
a72ce6f8 542 struct sched_param sparam = {.sched_priority = 1};
a72ce6f8 543 struct amd_gpu_scheduler *sched = (struct amd_gpu_scheduler *)param;
5134e999 544 int r, count;
a72ce6f8
JZ
545
546 sched_setscheduler(current, SCHED_FIFO, &sparam);
547
548 while (!kthread_should_stop()) {
0875dc9e 549 struct amd_sched_entity *entity = NULL;
258f3f99 550 struct amd_sched_fence *s_fence;
4c7eb91c 551 struct amd_sched_job *sched_job;
f54d1867 552 struct dma_fence *fence;
6f0e54a9 553
c2b6bd7e 554 wait_event_interruptible(sched->wake_up_worker,
0875dc9e
CZ
555 (!amd_sched_blocked(sched) &&
556 (entity = amd_sched_select_entity(sched))) ||
557 kthread_should_stop());
f85a6dd9 558
3d651936
CK
559 if (!entity)
560 continue;
561
562 sched_job = amd_sched_entity_pop_job(entity);
4c7eb91c 563 if (!sched_job)
f85a6dd9
CK
564 continue;
565
4c7eb91c 566 s_fence = sched_job->s_fence;
2440ff2c 567
b034b572 568 atomic_inc(&sched->hw_rq_count);
7392c329 569 amd_sched_job_begin(sched_job);
7392c329 570
4c7eb91c 571 fence = sched->ops->run_job(sched_job);
393a0bd4 572 amd_sched_fence_scheduled(s_fence);
6f0e54a9 573 if (fence) {
f54d1867
CW
574 s_fence->parent = dma_fence_get(fence);
575 r = dma_fence_add_callback(fence, &s_fence->cb,
576 amd_sched_process_job);
6f0e54a9 577 if (r == -ENOENT)
258f3f99 578 amd_sched_process_job(fence, &s_fence->cb);
6f0e54a9 579 else if (r)
16a7133f
CK
580 DRM_ERROR("fence add callback failed (%d)\n",
581 r);
f54d1867 582 dma_fence_put(fence);
27439fca
CK
583 } else {
584 DRM_ERROR("Failed to run job!\n");
258f3f99 585 amd_sched_process_job(NULL, &s_fence->cb);
6f0e54a9 586 }
aef4852e 587
4c7eb91c
JZ
588 count = kfifo_out(&entity->job_queue, &sched_job,
589 sizeof(sched_job));
590 WARN_ON(count != sizeof(sched_job));
c2b6bd7e 591 wake_up(&sched->job_scheduled);
a72ce6f8
JZ
592 }
593 return 0;
594}
595
a72ce6f8 596/**
4f839a24 597 * Init a gpu scheduler instance
a72ce6f8 598 *
4f839a24 599 * @sched The pointer to the scheduler
69f7dd65 600 * @ops The backend operations for this scheduler.
69f7dd65 601 * @hw_submissions Number of hw submissions to do.
4f839a24 602 * @name Name used for debugging
a72ce6f8 603 *
4f839a24 604 * Return 0 on success, otherwise error code.
a72ce6f8 605*/
4f839a24 606int amd_sched_init(struct amd_gpu_scheduler *sched,
62250a91 607 const struct amd_sched_backend_ops *ops,
2440ff2c 608 unsigned hw_submission, long timeout, const char *name)
a72ce6f8 609{
d033a6de 610 int i;
a72ce6f8 611 sched->ops = ops;
4cef9267 612 sched->hw_submission_limit = hw_submission;
4f839a24 613 sched->name = name;
2440ff2c 614 sched->timeout = timeout;
153de9df 615 for (i = AMD_SCHED_PRIORITY_MIN; i < AMD_SCHED_PRIORITY_MAX; i++)
d033a6de 616 amd_sched_rq_init(&sched->sched_rq[i]);
a72ce6f8 617
c2b6bd7e
CK
618 init_waitqueue_head(&sched->wake_up_worker);
619 init_waitqueue_head(&sched->job_scheduled);
4835096b
ML
620 INIT_LIST_HEAD(&sched->ring_mirror_list);
621 spin_lock_init(&sched->job_list_lock);
c746ba22 622 atomic_set(&sched->hw_rq_count, 0);
93f8b367 623 atomic64_set(&sched->job_id_count, 0);
4f839a24 624
a72ce6f8 625 /* Each scheduler will run on a seperate kernel thread */
c14692f0 626 sched->thread = kthread_run(amd_sched_main, sched, sched->name);
f4956598 627 if (IS_ERR(sched->thread)) {
4f839a24
CK
628 DRM_ERROR("Failed to create scheduler for %s.\n", name);
629 return PTR_ERR(sched->thread);
a72ce6f8
JZ
630 }
631
4f839a24 632 return 0;
a72ce6f8
JZ
633}
634
635/**
636 * Destroy a gpu scheduler
637 *
638 * @sched The pointer to the scheduler
a72ce6f8 639 */
4f839a24 640void amd_sched_fini(struct amd_gpu_scheduler *sched)
a72ce6f8 641{
32544d02
DA
642 if (sched->thread)
643 kthread_stop(sched->thread);
a72ce6f8 644}