]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blame - drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
Merge remote-tracking branches 'asoc/topic/ac97', 'asoc/topic/ac97-mfd', 'asoc/topic...
[mirror_ubuntu-focal-kernel.git] / drivers / gpu / drm / amd / scheduler / gpu_scheduler.c
CommitLineData
a72ce6f8
JZ
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 *
23 */
24#include <linux/kthread.h>
25#include <linux/wait.h>
26#include <linux/sched.h>
ae7e81c0 27#include <uapi/linux/sched/types.h>
a72ce6f8
JZ
28#include <drm/drmP.h>
29#include "gpu_scheduler.h"
30
353da3c5
CZ
31#define CREATE_TRACE_POINTS
32#include "gpu_sched_trace.h"
33
3d651936 34static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity);
88079006 35static void amd_sched_wakeup(struct amd_gpu_scheduler *sched);
f54d1867 36static void amd_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb);
88079006 37
a72ce6f8 38/* Initialize a given run queue struct */
432a4ff8 39static void amd_sched_rq_init(struct amd_sched_rq *rq)
a72ce6f8 40{
2b184d8d 41 spin_lock_init(&rq->lock);
432a4ff8 42 INIT_LIST_HEAD(&rq->entities);
432a4ff8 43 rq->current_entity = NULL;
a72ce6f8
JZ
44}
45
432a4ff8
CK
46static void amd_sched_rq_add_entity(struct amd_sched_rq *rq,
47 struct amd_sched_entity *entity)
a72ce6f8 48{
e8deea2d
CZ
49 if (!list_empty(&entity->list))
50 return;
2b184d8d 51 spin_lock(&rq->lock);
432a4ff8 52 list_add_tail(&entity->list, &rq->entities);
2b184d8d 53 spin_unlock(&rq->lock);
a72ce6f8
JZ
54}
55
432a4ff8
CK
56static void amd_sched_rq_remove_entity(struct amd_sched_rq *rq,
57 struct amd_sched_entity *entity)
a72ce6f8 58{
e8deea2d
CZ
59 if (list_empty(&entity->list))
60 return;
2b184d8d 61 spin_lock(&rq->lock);
432a4ff8
CK
62 list_del_init(&entity->list);
63 if (rq->current_entity == entity)
64 rq->current_entity = NULL;
2b184d8d 65 spin_unlock(&rq->lock);
a72ce6f8
JZ
66}
67
68/**
3d651936
CK
69 * Select an entity which could provide a job to run
70 *
71 * @rq The run queue to check.
72 *
73 * Try to find a ready entity, returns NULL if none found.
a72ce6f8 74 */
3d651936
CK
75static struct amd_sched_entity *
76amd_sched_rq_select_entity(struct amd_sched_rq *rq)
a72ce6f8 77{
2b184d8d 78 struct amd_sched_entity *entity;
432a4ff8 79
2b184d8d
CK
80 spin_lock(&rq->lock);
81
82 entity = rq->current_entity;
432a4ff8
CK
83 if (entity) {
84 list_for_each_entry_continue(entity, &rq->entities, list) {
3d651936 85 if (amd_sched_entity_is_ready(entity)) {
432a4ff8 86 rq->current_entity = entity;
2b184d8d 87 spin_unlock(&rq->lock);
3d651936 88 return entity;
432a4ff8 89 }
a72ce6f8 90 }
a72ce6f8 91 }
a72ce6f8 92
432a4ff8 93 list_for_each_entry(entity, &rq->entities, list) {
a72ce6f8 94
3d651936 95 if (amd_sched_entity_is_ready(entity)) {
432a4ff8 96 rq->current_entity = entity;
2b184d8d 97 spin_unlock(&rq->lock);
3d651936 98 return entity;
432a4ff8 99 }
a72ce6f8 100
432a4ff8
CK
101 if (entity == rq->current_entity)
102 break;
103 }
a72ce6f8 104
2b184d8d
CK
105 spin_unlock(&rq->lock);
106
432a4ff8 107 return NULL;
a72ce6f8
JZ
108}
109
a72ce6f8
JZ
110/**
111 * Init a context entity used by scheduler when submit to HW ring.
112 *
113 * @sched The pointer to the scheduler
91404fb2 114 * @entity The pointer to a valid amd_sched_entity
a72ce6f8 115 * @rq The run queue this entity belongs
0e89d0c1 116 * @kernel If this is an entity for the kernel
1333f723 117 * @jobs The max number of jobs in the job queue
a72ce6f8
JZ
118 *
119 * return 0 if succeed. negative error code on failure
120*/
91404fb2 121int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
6f0e54a9 122 struct amd_sched_entity *entity,
432a4ff8 123 struct amd_sched_rq *rq,
6f0e54a9 124 uint32_t jobs)
a72ce6f8 125{
0f75aee7
CK
126 int r;
127
a72ce6f8
JZ
128 if (!(sched && entity && rq))
129 return -EINVAL;
130
91404fb2 131 memset(entity, 0, sizeof(struct amd_sched_entity));
0f75aee7
CK
132 INIT_LIST_HEAD(&entity->list);
133 entity->rq = rq;
134 entity->sched = sched;
a72ce6f8
JZ
135
136 spin_lock_init(&entity->queue_lock);
0f75aee7
CK
137 r = kfifo_alloc(&entity->job_queue, jobs * sizeof(void *), GFP_KERNEL);
138 if (r)
139 return r;
140
ce882e6d 141 atomic_set(&entity->fence_seq, 0);
f54d1867 142 entity->fence_context = dma_fence_context_alloc(2);
a72ce6f8 143
a72ce6f8
JZ
144 return 0;
145}
146
147/**
148 * Query if entity is initialized
149 *
150 * @sched Pointer to scheduler instance
151 * @entity The pointer to a valid scheduler entity
152 *
153 * return true if entity is initialized, false otherwise
154*/
d54fdb94
CK
155static bool amd_sched_entity_is_initialized(struct amd_gpu_scheduler *sched,
156 struct amd_sched_entity *entity)
a72ce6f8 157{
0f75aee7
CK
158 return entity->sched == sched &&
159 entity->rq != NULL;
a72ce6f8
JZ
160}
161
aef4852e
CK
162/**
163 * Check if entity is idle
164 *
165 * @entity The pointer to a valid scheduler entity
166 *
167 * Return true if entity don't has any unscheduled jobs.
168 */
169static bool amd_sched_entity_is_idle(struct amd_sched_entity *entity)
a72ce6f8 170{
aef4852e
CK
171 rmb();
172 if (kfifo_is_empty(&entity->job_queue))
a72ce6f8
JZ
173 return true;
174
175 return false;
176}
177
3d651936
CK
178/**
179 * Check if entity is ready
180 *
181 * @entity The pointer to a valid scheduler entity
182 *
183 * Return true if entity could provide a job.
184 */
185static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity)
186{
187 if (kfifo_is_empty(&entity->job_queue))
188 return false;
189
190 if (ACCESS_ONCE(entity->dependency))
191 return false;
192
193 return true;
194}
195
a72ce6f8
JZ
196/**
197 * Destroy a context entity
198 *
199 * @sched Pointer to scheduler instance
200 * @entity The pointer to a valid scheduler entity
201 *
062c7fb3 202 * Cleanup and free the allocated resources.
a72ce6f8 203 */
062c7fb3
CK
204void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
205 struct amd_sched_entity *entity)
a72ce6f8 206{
0f75aee7 207 struct amd_sched_rq *rq = entity->rq;
a72ce6f8 208
d54fdb94 209 if (!amd_sched_entity_is_initialized(sched, entity))
062c7fb3 210 return;
c9450127 211
a72ce6f8
JZ
212 /**
213 * The client will not queue more IBs during this fini, consume existing
c9450127 214 * queued IBs
a72ce6f8 215 */
c9450127 216 wait_event(sched->job_scheduled, amd_sched_entity_is_idle(entity));
6af0883e 217
c9450127 218 amd_sched_rq_remove_entity(rq, entity);
a72ce6f8 219 kfifo_free(&entity->job_queue);
a72ce6f8
JZ
220}
221
f54d1867 222static void amd_sched_entity_wakeup(struct dma_fence *f, struct dma_fence_cb *cb)
e61235db
CK
223{
224 struct amd_sched_entity *entity =
225 container_of(cb, struct amd_sched_entity, cb);
226 entity->dependency = NULL;
f54d1867 227 dma_fence_put(f);
0f75aee7 228 amd_sched_wakeup(entity->sched);
e61235db
CK
229}
230
f54d1867 231static void amd_sched_entity_clear_dep(struct dma_fence *f, struct dma_fence_cb *cb)
777dbd45
ML
232{
233 struct amd_sched_entity *entity =
234 container_of(cb, struct amd_sched_entity, cb);
235 entity->dependency = NULL;
f54d1867 236 dma_fence_put(f);
777dbd45
ML
237}
238
30514dec
CZ
239bool amd_sched_dependency_optimized(struct dma_fence* fence,
240 struct amd_sched_entity *entity)
241{
242 struct amd_gpu_scheduler *sched = entity->sched;
243 struct amd_sched_fence *s_fence;
244
245 if (!fence || dma_fence_is_signaled(fence))
246 return false;
247 if (fence->context == entity->fence_context)
248 return true;
249 s_fence = to_amd_sched_fence(fence);
250 if (s_fence && s_fence->sched == sched)
251 return true;
252
253 return false;
254}
255
393a0bd4
CK
256static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity *entity)
257{
258 struct amd_gpu_scheduler *sched = entity->sched;
f54d1867 259 struct dma_fence * fence = entity->dependency;
393a0bd4
CK
260 struct amd_sched_fence *s_fence;
261
262 if (fence->context == entity->fence_context) {
263 /* We can ignore fences from ourself */
f54d1867 264 dma_fence_put(entity->dependency);
393a0bd4
CK
265 return false;
266 }
267
268 s_fence = to_amd_sched_fence(fence);
269 if (s_fence && s_fence->sched == sched) {
393a0bd4 270
6fc13675
CK
271 /*
272 * Fence is from the same scheduler, only need to wait for
273 * it to be scheduled
274 */
f54d1867
CW
275 fence = dma_fence_get(&s_fence->scheduled);
276 dma_fence_put(entity->dependency);
6fc13675 277 entity->dependency = fence;
f54d1867
CW
278 if (!dma_fence_add_callback(fence, &entity->cb,
279 amd_sched_entity_clear_dep))
6fc13675
CK
280 return true;
281
282 /* Ignore it when it is already scheduled */
f54d1867 283 dma_fence_put(fence);
6fc13675 284 return false;
393a0bd4
CK
285 }
286
f54d1867
CW
287 if (!dma_fence_add_callback(entity->dependency, &entity->cb,
288 amd_sched_entity_wakeup))
393a0bd4
CK
289 return true;
290
f54d1867 291 dma_fence_put(entity->dependency);
393a0bd4
CK
292 return false;
293}
294
69bd5bf1
CK
295static struct amd_sched_job *
296amd_sched_entity_pop_job(struct amd_sched_entity *entity)
297{
0f75aee7 298 struct amd_gpu_scheduler *sched = entity->sched;
4c7eb91c 299 struct amd_sched_job *sched_job;
69bd5bf1 300
4c7eb91c 301 if (!kfifo_out_peek(&entity->job_queue, &sched_job, sizeof(sched_job)))
69bd5bf1
CK
302 return NULL;
303
393a0bd4
CK
304 while ((entity->dependency = sched->ops->dependency(sched_job)))
305 if (amd_sched_entity_add_dependency_cb(entity))
e61235db 306 return NULL;
e61235db 307
4c7eb91c 308 return sched_job;
69bd5bf1
CK
309}
310
a72ce6f8 311/**
6c859274 312 * Helper to submit a job to the job queue
a72ce6f8 313 *
4c7eb91c 314 * @sched_job The pointer to job required to submit
6c859274
CK
315 *
316 * Returns true if we could submit the job.
317 */
4c7eb91c 318static bool amd_sched_entity_in(struct amd_sched_job *sched_job)
a72ce6f8 319{
786b5219 320 struct amd_gpu_scheduler *sched = sched_job->sched;
4c7eb91c 321 struct amd_sched_entity *entity = sched_job->s_entity;
6c859274
CK
322 bool added, first = false;
323
324 spin_lock(&entity->queue_lock);
4c7eb91c
JZ
325 added = kfifo_in(&entity->job_queue, &sched_job,
326 sizeof(sched_job)) == sizeof(sched_job);
6c859274 327
4c7eb91c 328 if (added && kfifo_len(&entity->job_queue) == sizeof(sched_job))
6c859274
CK
329 first = true;
330
331 spin_unlock(&entity->queue_lock);
332
333 /* first job wakes up scheduler */
e8deea2d
CZ
334 if (first) {
335 /* Add the entity to the run queue */
336 amd_sched_rq_add_entity(entity->rq, entity);
786b5219 337 amd_sched_wakeup(sched);
e8deea2d 338 }
6c859274
CK
339 return added;
340}
341
0de2479c
ML
342/* job_finish is called after hw fence signaled, and
343 * the job had already been deleted from ring_mirror_list
344 */
c5f74f78 345static void amd_sched_job_finish(struct work_struct *work)
0de2479c 346{
c5f74f78
CK
347 struct amd_sched_job *s_job = container_of(work, struct amd_sched_job,
348 finish_work);
0de2479c
ML
349 struct amd_gpu_scheduler *sched = s_job->sched;
350
f42d20a9 351 /* remove job from ring_mirror_list */
1059e117 352 spin_lock(&sched->job_list_lock);
f42d20a9 353 list_del_init(&s_job->node);
0de2479c 354 if (sched->timeout != MAX_SCHEDULE_TIMEOUT) {
c5f74f78
CK
355 struct amd_sched_job *next;
356
1059e117 357 spin_unlock(&sched->job_list_lock);
c5f74f78 358 cancel_delayed_work_sync(&s_job->work_tdr);
1059e117 359 spin_lock(&sched->job_list_lock);
0de2479c
ML
360
361 /* queue TDR for next job */
362 next = list_first_entry_or_null(&sched->ring_mirror_list,
363 struct amd_sched_job, node);
364
c5f74f78 365 if (next)
0de2479c 366 schedule_delayed_work(&next->work_tdr, sched->timeout);
0de2479c 367 }
1059e117 368 spin_unlock(&sched->job_list_lock);
c5f74f78
CK
369 sched->ops->free_job(s_job);
370}
371
f54d1867
CW
372static void amd_sched_job_finish_cb(struct dma_fence *f,
373 struct dma_fence_cb *cb)
c5f74f78
CK
374{
375 struct amd_sched_job *job = container_of(cb, struct amd_sched_job,
376 finish_cb);
377 schedule_work(&job->finish_work);
0de2479c
ML
378}
379
7392c329 380static void amd_sched_job_begin(struct amd_sched_job *s_job)
0de2479c
ML
381{
382 struct amd_gpu_scheduler *sched = s_job->sched;
383
1059e117 384 spin_lock(&sched->job_list_lock);
f42d20a9 385 list_add_tail(&s_job->node, &sched->ring_mirror_list);
0de2479c 386 if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
16a7133f
CK
387 list_first_entry_or_null(&sched->ring_mirror_list,
388 struct amd_sched_job, node) == s_job)
0de2479c 389 schedule_delayed_work(&s_job->work_tdr, sched->timeout);
1059e117 390 spin_unlock(&sched->job_list_lock);
0de2479c
ML
391}
392
0e51a772
CK
393static void amd_sched_job_timedout(struct work_struct *work)
394{
395 struct amd_sched_job *job = container_of(work, struct amd_sched_job,
396 work_tdr.work);
397
398 job->sched->ops->timedout_job(job);
399}
400
e686e75d
CZ
401void amd_sched_hw_job_reset(struct amd_gpu_scheduler *sched)
402{
403 struct amd_sched_job *s_job;
404
405 spin_lock(&sched->job_list_lock);
406 list_for_each_entry_reverse(s_job, &sched->ring_mirror_list, node) {
a6bef67e
CZ
407 if (s_job->s_fence->parent &&
408 dma_fence_remove_callback(s_job->s_fence->parent,
409 &s_job->s_fence->cb)) {
f54d1867 410 dma_fence_put(s_job->s_fence->parent);
e686e75d 411 s_job->s_fence->parent = NULL;
65781c78 412 atomic_dec(&sched->hw_rq_count);
e686e75d
CZ
413 }
414 }
65781c78
ML
415 spin_unlock(&sched->job_list_lock);
416}
417
418void amd_sched_job_kickout(struct amd_sched_job *s_job)
419{
420 struct amd_gpu_scheduler *sched = s_job->sched;
421
422 spin_lock(&sched->job_list_lock);
423 list_del_init(&s_job->node);
e686e75d
CZ
424 spin_unlock(&sched->job_list_lock);
425}
426
ec75f573
CZ
427void amd_sched_job_recovery(struct amd_gpu_scheduler *sched)
428{
1c62cf91 429 struct amd_sched_job *s_job, *tmp;
ec75f573
CZ
430 int r;
431
432 spin_lock(&sched->job_list_lock);
433 s_job = list_first_entry_or_null(&sched->ring_mirror_list,
434 struct amd_sched_job, node);
bdf00137 435 if (s_job && sched->timeout != MAX_SCHEDULE_TIMEOUT)
ec75f573
CZ
436 schedule_delayed_work(&s_job->work_tdr, sched->timeout);
437
1c62cf91 438 list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
ec75f573 439 struct amd_sched_fence *s_fence = s_job->s_fence;
f54d1867 440 struct dma_fence *fence;
bdc2eea4 441
1c62cf91
CZ
442 spin_unlock(&sched->job_list_lock);
443 fence = sched->ops->run_job(s_job);
bdc2eea4 444 atomic_inc(&sched->hw_rq_count);
ec75f573 445 if (fence) {
f54d1867
CW
446 s_fence->parent = dma_fence_get(fence);
447 r = dma_fence_add_callback(fence, &s_fence->cb,
448 amd_sched_process_job);
ec75f573
CZ
449 if (r == -ENOENT)
450 amd_sched_process_job(fence, &s_fence->cb);
451 else if (r)
452 DRM_ERROR("fence add callback failed (%d)\n",
453 r);
f54d1867 454 dma_fence_put(fence);
ec75f573
CZ
455 } else {
456 DRM_ERROR("Failed to run job!\n");
457 amd_sched_process_job(NULL, &s_fence->cb);
458 }
1c62cf91 459 spin_lock(&sched->job_list_lock);
ec75f573
CZ
460 }
461 spin_unlock(&sched->job_list_lock);
462}
463
6c859274
CK
464/**
465 * Submit a job to the job queue
466 *
4c7eb91c 467 * @sched_job The pointer to job required to submit
6c859274
CK
468 *
469 * Returns 0 for success, negative error code otherwise.
470 */
e2840221 471void amd_sched_entity_push_job(struct amd_sched_job *sched_job)
6c859274
CK
472{
473 struct amd_sched_entity *entity = sched_job->s_entity;
6c859274 474
786b5219 475 trace_amd_sched_job(sched_job);
f54d1867
CW
476 dma_fence_add_callback(&sched_job->s_fence->finished, &sched_job->finish_cb,
477 amd_sched_job_finish_cb);
0f75aee7 478 wait_event(entity->sched->job_scheduled,
c9f0fe5e 479 amd_sched_entity_in(sched_job));
a72ce6f8
JZ
480}
481
e686941a
ML
482/* init a sched_job with basic field */
483int amd_sched_job_init(struct amd_sched_job *job,
16a7133f
CK
484 struct amd_gpu_scheduler *sched,
485 struct amd_sched_entity *entity,
595a9cd6 486 void *owner)
e686941a
ML
487{
488 job->sched = sched;
489 job->s_entity = entity;
490 job->s_fence = amd_sched_fence_create(entity, owner);
491 if (!job->s_fence)
492 return -ENOMEM;
cb3696fd 493 job->id = atomic64_inc_return(&sched->job_id_count);
e686941a 494
c5f74f78
CK
495 INIT_WORK(&job->finish_work, amd_sched_job_finish);
496 INIT_LIST_HEAD(&job->node);
0e51a772 497 INIT_DELAYED_WORK(&job->work_tdr, amd_sched_job_timedout);
4835096b 498
e686941a
ML
499 return 0;
500}
501
e688b728
CK
502/**
503 * Return ture if we can push more jobs to the hw.
504 */
505static bool amd_sched_ready(struct amd_gpu_scheduler *sched)
506{
507 return atomic_read(&sched->hw_rq_count) <
508 sched->hw_submission_limit;
509}
510
88079006
CK
511/**
512 * Wake up the scheduler when it is ready
513 */
514static void amd_sched_wakeup(struct amd_gpu_scheduler *sched)
515{
516 if (amd_sched_ready(sched))
c2b6bd7e 517 wake_up_interruptible(&sched->wake_up_worker);
88079006
CK
518}
519
e688b728 520/**
3d651936 521 * Select next entity to process
e688b728 522*/
3d651936
CK
523static struct amd_sched_entity *
524amd_sched_select_entity(struct amd_gpu_scheduler *sched)
e688b728 525{
3d651936 526 struct amd_sched_entity *entity;
d033a6de 527 int i;
e688b728
CK
528
529 if (!amd_sched_ready(sched))
530 return NULL;
531
532 /* Kernel run queue has higher priority than normal run queue*/
153de9df 533 for (i = AMD_SCHED_PRIORITY_MAX - 1; i >= AMD_SCHED_PRIORITY_MIN; i--) {
d033a6de
CZ
534 entity = amd_sched_rq_select_entity(&sched->sched_rq[i]);
535 if (entity)
536 break;
537 }
e688b728 538
3d651936 539 return entity;
e688b728
CK
540}
541
f54d1867 542static void amd_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb)
6f0e54a9 543{
258f3f99
CK
544 struct amd_sched_fence *s_fence =
545 container_of(cb, struct amd_sched_fence, cb);
9b398fa5 546 struct amd_gpu_scheduler *sched = s_fence->sched;
6f0e54a9 547
c746ba22 548 atomic_dec(&sched->hw_rq_count);
6fc13675 549 amd_sched_fence_finished(s_fence);
cccd9bce 550
7034decf 551 trace_amd_sched_process_job(s_fence);
f54d1867 552 dma_fence_put(&s_fence->finished);
c2b6bd7e 553 wake_up_interruptible(&sched->wake_up_worker);
6f0e54a9
CK
554}
555
0875dc9e
CZ
556static bool amd_sched_blocked(struct amd_gpu_scheduler *sched)
557{
558 if (kthread_should_park()) {
559 kthread_parkme();
560 return true;
561 }
562
563 return false;
564}
565
a72ce6f8
JZ
566static int amd_sched_main(void *param)
567{
a72ce6f8 568 struct sched_param sparam = {.sched_priority = 1};
a72ce6f8 569 struct amd_gpu_scheduler *sched = (struct amd_gpu_scheduler *)param;
5134e999 570 int r, count;
a72ce6f8
JZ
571
572 sched_setscheduler(current, SCHED_FIFO, &sparam);
573
574 while (!kthread_should_stop()) {
0875dc9e 575 struct amd_sched_entity *entity = NULL;
258f3f99 576 struct amd_sched_fence *s_fence;
4c7eb91c 577 struct amd_sched_job *sched_job;
f54d1867 578 struct dma_fence *fence;
6f0e54a9 579
c2b6bd7e 580 wait_event_interruptible(sched->wake_up_worker,
0875dc9e
CZ
581 (!amd_sched_blocked(sched) &&
582 (entity = amd_sched_select_entity(sched))) ||
583 kthread_should_stop());
f85a6dd9 584
3d651936
CK
585 if (!entity)
586 continue;
587
588 sched_job = amd_sched_entity_pop_job(entity);
4c7eb91c 589 if (!sched_job)
f85a6dd9
CK
590 continue;
591
4c7eb91c 592 s_fence = sched_job->s_fence;
2440ff2c 593
b034b572 594 atomic_inc(&sched->hw_rq_count);
7392c329 595 amd_sched_job_begin(sched_job);
7392c329 596
4c7eb91c 597 fence = sched->ops->run_job(sched_job);
393a0bd4 598 amd_sched_fence_scheduled(s_fence);
6f0e54a9 599 if (fence) {
f54d1867
CW
600 s_fence->parent = dma_fence_get(fence);
601 r = dma_fence_add_callback(fence, &s_fence->cb,
602 amd_sched_process_job);
6f0e54a9 603 if (r == -ENOENT)
258f3f99 604 amd_sched_process_job(fence, &s_fence->cb);
6f0e54a9 605 else if (r)
16a7133f
CK
606 DRM_ERROR("fence add callback failed (%d)\n",
607 r);
f54d1867 608 dma_fence_put(fence);
27439fca
CK
609 } else {
610 DRM_ERROR("Failed to run job!\n");
258f3f99 611 amd_sched_process_job(NULL, &s_fence->cb);
6f0e54a9 612 }
aef4852e 613
4c7eb91c
JZ
614 count = kfifo_out(&entity->job_queue, &sched_job,
615 sizeof(sched_job));
616 WARN_ON(count != sizeof(sched_job));
c2b6bd7e 617 wake_up(&sched->job_scheduled);
a72ce6f8
JZ
618 }
619 return 0;
620}
621
a72ce6f8 622/**
4f839a24 623 * Init a gpu scheduler instance
a72ce6f8 624 *
4f839a24 625 * @sched The pointer to the scheduler
69f7dd65 626 * @ops The backend operations for this scheduler.
69f7dd65 627 * @hw_submissions Number of hw submissions to do.
4f839a24 628 * @name Name used for debugging
a72ce6f8 629 *
4f839a24 630 * Return 0 on success, otherwise error code.
a72ce6f8 631*/
4f839a24 632int amd_sched_init(struct amd_gpu_scheduler *sched,
62250a91 633 const struct amd_sched_backend_ops *ops,
2440ff2c 634 unsigned hw_submission, long timeout, const char *name)
a72ce6f8 635{
d033a6de 636 int i;
a72ce6f8 637 sched->ops = ops;
4cef9267 638 sched->hw_submission_limit = hw_submission;
4f839a24 639 sched->name = name;
2440ff2c 640 sched->timeout = timeout;
153de9df 641 for (i = AMD_SCHED_PRIORITY_MIN; i < AMD_SCHED_PRIORITY_MAX; i++)
d033a6de 642 amd_sched_rq_init(&sched->sched_rq[i]);
a72ce6f8 643
c2b6bd7e
CK
644 init_waitqueue_head(&sched->wake_up_worker);
645 init_waitqueue_head(&sched->job_scheduled);
4835096b
ML
646 INIT_LIST_HEAD(&sched->ring_mirror_list);
647 spin_lock_init(&sched->job_list_lock);
c746ba22 648 atomic_set(&sched->hw_rq_count, 0);
93f8b367 649 atomic64_set(&sched->job_id_count, 0);
4f839a24 650
a72ce6f8 651 /* Each scheduler will run on a seperate kernel thread */
c14692f0 652 sched->thread = kthread_run(amd_sched_main, sched, sched->name);
f4956598 653 if (IS_ERR(sched->thread)) {
4f839a24
CK
654 DRM_ERROR("Failed to create scheduler for %s.\n", name);
655 return PTR_ERR(sched->thread);
a72ce6f8
JZ
656 }
657
4f839a24 658 return 0;
a72ce6f8
JZ
659}
660
661/**
662 * Destroy a gpu scheduler
663 *
664 * @sched The pointer to the scheduler
a72ce6f8 665 */
4f839a24 666void amd_sched_fini(struct amd_gpu_scheduler *sched)
a72ce6f8 667{
32544d02
DA
668 if (sched->thread)
669 kthread_stop(sched->thread);
a72ce6f8 670}