]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blob - drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
Merge remote-tracking branches 'asoc/topic/ac97', 'asoc/topic/ac97-mfd', 'asoc/topic...
[mirror_ubuntu-focal-kernel.git] / drivers / gpu / drm / amd / scheduler / gpu_scheduler.c
1 /*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 *
23 */
24 #include <linux/kthread.h>
25 #include <linux/wait.h>
26 #include <linux/sched.h>
27 #include <uapi/linux/sched/types.h>
28 #include <drm/drmP.h>
29 #include "gpu_scheduler.h"
30
31 #define CREATE_TRACE_POINTS
32 #include "gpu_sched_trace.h"
33
34 static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity);
35 static void amd_sched_wakeup(struct amd_gpu_scheduler *sched);
36 static void amd_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb);
37
38 /* Initialize a given run queue struct */
39 static void amd_sched_rq_init(struct amd_sched_rq *rq)
40 {
41 spin_lock_init(&rq->lock);
42 INIT_LIST_HEAD(&rq->entities);
43 rq->current_entity = NULL;
44 }
45
46 static void amd_sched_rq_add_entity(struct amd_sched_rq *rq,
47 struct amd_sched_entity *entity)
48 {
49 if (!list_empty(&entity->list))
50 return;
51 spin_lock(&rq->lock);
52 list_add_tail(&entity->list, &rq->entities);
53 spin_unlock(&rq->lock);
54 }
55
56 static void amd_sched_rq_remove_entity(struct amd_sched_rq *rq,
57 struct amd_sched_entity *entity)
58 {
59 if (list_empty(&entity->list))
60 return;
61 spin_lock(&rq->lock);
62 list_del_init(&entity->list);
63 if (rq->current_entity == entity)
64 rq->current_entity = NULL;
65 spin_unlock(&rq->lock);
66 }
67
68 /**
69 * Select an entity which could provide a job to run
70 *
71 * @rq The run queue to check.
72 *
73 * Try to find a ready entity, returns NULL if none found.
74 */
75 static struct amd_sched_entity *
76 amd_sched_rq_select_entity(struct amd_sched_rq *rq)
77 {
78 struct amd_sched_entity *entity;
79
80 spin_lock(&rq->lock);
81
82 entity = rq->current_entity;
83 if (entity) {
84 list_for_each_entry_continue(entity, &rq->entities, list) {
85 if (amd_sched_entity_is_ready(entity)) {
86 rq->current_entity = entity;
87 spin_unlock(&rq->lock);
88 return entity;
89 }
90 }
91 }
92
93 list_for_each_entry(entity, &rq->entities, list) {
94
95 if (amd_sched_entity_is_ready(entity)) {
96 rq->current_entity = entity;
97 spin_unlock(&rq->lock);
98 return entity;
99 }
100
101 if (entity == rq->current_entity)
102 break;
103 }
104
105 spin_unlock(&rq->lock);
106
107 return NULL;
108 }
109
110 /**
111 * Init a context entity used by scheduler when submit to HW ring.
112 *
113 * @sched The pointer to the scheduler
114 * @entity The pointer to a valid amd_sched_entity
115 * @rq The run queue this entity belongs
116 * @kernel If this is an entity for the kernel
117 * @jobs The max number of jobs in the job queue
118 *
119 * return 0 if succeed. negative error code on failure
120 */
121 int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
122 struct amd_sched_entity *entity,
123 struct amd_sched_rq *rq,
124 uint32_t jobs)
125 {
126 int r;
127
128 if (!(sched && entity && rq))
129 return -EINVAL;
130
131 memset(entity, 0, sizeof(struct amd_sched_entity));
132 INIT_LIST_HEAD(&entity->list);
133 entity->rq = rq;
134 entity->sched = sched;
135
136 spin_lock_init(&entity->queue_lock);
137 r = kfifo_alloc(&entity->job_queue, jobs * sizeof(void *), GFP_KERNEL);
138 if (r)
139 return r;
140
141 atomic_set(&entity->fence_seq, 0);
142 entity->fence_context = dma_fence_context_alloc(2);
143
144 return 0;
145 }
146
147 /**
148 * Query if entity is initialized
149 *
150 * @sched Pointer to scheduler instance
151 * @entity The pointer to a valid scheduler entity
152 *
153 * return true if entity is initialized, false otherwise
154 */
155 static bool amd_sched_entity_is_initialized(struct amd_gpu_scheduler *sched,
156 struct amd_sched_entity *entity)
157 {
158 return entity->sched == sched &&
159 entity->rq != NULL;
160 }
161
162 /**
163 * Check if entity is idle
164 *
165 * @entity The pointer to a valid scheduler entity
166 *
167 * Return true if entity don't has any unscheduled jobs.
168 */
169 static bool amd_sched_entity_is_idle(struct amd_sched_entity *entity)
170 {
171 rmb();
172 if (kfifo_is_empty(&entity->job_queue))
173 return true;
174
175 return false;
176 }
177
178 /**
179 * Check if entity is ready
180 *
181 * @entity The pointer to a valid scheduler entity
182 *
183 * Return true if entity could provide a job.
184 */
185 static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity)
186 {
187 if (kfifo_is_empty(&entity->job_queue))
188 return false;
189
190 if (ACCESS_ONCE(entity->dependency))
191 return false;
192
193 return true;
194 }
195
196 /**
197 * Destroy a context entity
198 *
199 * @sched Pointer to scheduler instance
200 * @entity The pointer to a valid scheduler entity
201 *
202 * Cleanup and free the allocated resources.
203 */
204 void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
205 struct amd_sched_entity *entity)
206 {
207 struct amd_sched_rq *rq = entity->rq;
208
209 if (!amd_sched_entity_is_initialized(sched, entity))
210 return;
211
212 /**
213 * The client will not queue more IBs during this fini, consume existing
214 * queued IBs
215 */
216 wait_event(sched->job_scheduled, amd_sched_entity_is_idle(entity));
217
218 amd_sched_rq_remove_entity(rq, entity);
219 kfifo_free(&entity->job_queue);
220 }
221
222 static void amd_sched_entity_wakeup(struct dma_fence *f, struct dma_fence_cb *cb)
223 {
224 struct amd_sched_entity *entity =
225 container_of(cb, struct amd_sched_entity, cb);
226 entity->dependency = NULL;
227 dma_fence_put(f);
228 amd_sched_wakeup(entity->sched);
229 }
230
231 static void amd_sched_entity_clear_dep(struct dma_fence *f, struct dma_fence_cb *cb)
232 {
233 struct amd_sched_entity *entity =
234 container_of(cb, struct amd_sched_entity, cb);
235 entity->dependency = NULL;
236 dma_fence_put(f);
237 }
238
239 bool amd_sched_dependency_optimized(struct dma_fence* fence,
240 struct amd_sched_entity *entity)
241 {
242 struct amd_gpu_scheduler *sched = entity->sched;
243 struct amd_sched_fence *s_fence;
244
245 if (!fence || dma_fence_is_signaled(fence))
246 return false;
247 if (fence->context == entity->fence_context)
248 return true;
249 s_fence = to_amd_sched_fence(fence);
250 if (s_fence && s_fence->sched == sched)
251 return true;
252
253 return false;
254 }
255
256 static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity *entity)
257 {
258 struct amd_gpu_scheduler *sched = entity->sched;
259 struct dma_fence * fence = entity->dependency;
260 struct amd_sched_fence *s_fence;
261
262 if (fence->context == entity->fence_context) {
263 /* We can ignore fences from ourself */
264 dma_fence_put(entity->dependency);
265 return false;
266 }
267
268 s_fence = to_amd_sched_fence(fence);
269 if (s_fence && s_fence->sched == sched) {
270
271 /*
272 * Fence is from the same scheduler, only need to wait for
273 * it to be scheduled
274 */
275 fence = dma_fence_get(&s_fence->scheduled);
276 dma_fence_put(entity->dependency);
277 entity->dependency = fence;
278 if (!dma_fence_add_callback(fence, &entity->cb,
279 amd_sched_entity_clear_dep))
280 return true;
281
282 /* Ignore it when it is already scheduled */
283 dma_fence_put(fence);
284 return false;
285 }
286
287 if (!dma_fence_add_callback(entity->dependency, &entity->cb,
288 amd_sched_entity_wakeup))
289 return true;
290
291 dma_fence_put(entity->dependency);
292 return false;
293 }
294
295 static struct amd_sched_job *
296 amd_sched_entity_pop_job(struct amd_sched_entity *entity)
297 {
298 struct amd_gpu_scheduler *sched = entity->sched;
299 struct amd_sched_job *sched_job;
300
301 if (!kfifo_out_peek(&entity->job_queue, &sched_job, sizeof(sched_job)))
302 return NULL;
303
304 while ((entity->dependency = sched->ops->dependency(sched_job)))
305 if (amd_sched_entity_add_dependency_cb(entity))
306 return NULL;
307
308 return sched_job;
309 }
310
311 /**
312 * Helper to submit a job to the job queue
313 *
314 * @sched_job The pointer to job required to submit
315 *
316 * Returns true if we could submit the job.
317 */
318 static bool amd_sched_entity_in(struct amd_sched_job *sched_job)
319 {
320 struct amd_gpu_scheduler *sched = sched_job->sched;
321 struct amd_sched_entity *entity = sched_job->s_entity;
322 bool added, first = false;
323
324 spin_lock(&entity->queue_lock);
325 added = kfifo_in(&entity->job_queue, &sched_job,
326 sizeof(sched_job)) == sizeof(sched_job);
327
328 if (added && kfifo_len(&entity->job_queue) == sizeof(sched_job))
329 first = true;
330
331 spin_unlock(&entity->queue_lock);
332
333 /* first job wakes up scheduler */
334 if (first) {
335 /* Add the entity to the run queue */
336 amd_sched_rq_add_entity(entity->rq, entity);
337 amd_sched_wakeup(sched);
338 }
339 return added;
340 }
341
342 /* job_finish is called after hw fence signaled, and
343 * the job had already been deleted from ring_mirror_list
344 */
345 static void amd_sched_job_finish(struct work_struct *work)
346 {
347 struct amd_sched_job *s_job = container_of(work, struct amd_sched_job,
348 finish_work);
349 struct amd_gpu_scheduler *sched = s_job->sched;
350
351 /* remove job from ring_mirror_list */
352 spin_lock(&sched->job_list_lock);
353 list_del_init(&s_job->node);
354 if (sched->timeout != MAX_SCHEDULE_TIMEOUT) {
355 struct amd_sched_job *next;
356
357 spin_unlock(&sched->job_list_lock);
358 cancel_delayed_work_sync(&s_job->work_tdr);
359 spin_lock(&sched->job_list_lock);
360
361 /* queue TDR for next job */
362 next = list_first_entry_or_null(&sched->ring_mirror_list,
363 struct amd_sched_job, node);
364
365 if (next)
366 schedule_delayed_work(&next->work_tdr, sched->timeout);
367 }
368 spin_unlock(&sched->job_list_lock);
369 sched->ops->free_job(s_job);
370 }
371
372 static void amd_sched_job_finish_cb(struct dma_fence *f,
373 struct dma_fence_cb *cb)
374 {
375 struct amd_sched_job *job = container_of(cb, struct amd_sched_job,
376 finish_cb);
377 schedule_work(&job->finish_work);
378 }
379
380 static void amd_sched_job_begin(struct amd_sched_job *s_job)
381 {
382 struct amd_gpu_scheduler *sched = s_job->sched;
383
384 spin_lock(&sched->job_list_lock);
385 list_add_tail(&s_job->node, &sched->ring_mirror_list);
386 if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
387 list_first_entry_or_null(&sched->ring_mirror_list,
388 struct amd_sched_job, node) == s_job)
389 schedule_delayed_work(&s_job->work_tdr, sched->timeout);
390 spin_unlock(&sched->job_list_lock);
391 }
392
393 static void amd_sched_job_timedout(struct work_struct *work)
394 {
395 struct amd_sched_job *job = container_of(work, struct amd_sched_job,
396 work_tdr.work);
397
398 job->sched->ops->timedout_job(job);
399 }
400
401 void amd_sched_hw_job_reset(struct amd_gpu_scheduler *sched)
402 {
403 struct amd_sched_job *s_job;
404
405 spin_lock(&sched->job_list_lock);
406 list_for_each_entry_reverse(s_job, &sched->ring_mirror_list, node) {
407 if (s_job->s_fence->parent &&
408 dma_fence_remove_callback(s_job->s_fence->parent,
409 &s_job->s_fence->cb)) {
410 dma_fence_put(s_job->s_fence->parent);
411 s_job->s_fence->parent = NULL;
412 atomic_dec(&sched->hw_rq_count);
413 }
414 }
415 spin_unlock(&sched->job_list_lock);
416 }
417
418 void amd_sched_job_kickout(struct amd_sched_job *s_job)
419 {
420 struct amd_gpu_scheduler *sched = s_job->sched;
421
422 spin_lock(&sched->job_list_lock);
423 list_del_init(&s_job->node);
424 spin_unlock(&sched->job_list_lock);
425 }
426
427 void amd_sched_job_recovery(struct amd_gpu_scheduler *sched)
428 {
429 struct amd_sched_job *s_job, *tmp;
430 int r;
431
432 spin_lock(&sched->job_list_lock);
433 s_job = list_first_entry_or_null(&sched->ring_mirror_list,
434 struct amd_sched_job, node);
435 if (s_job && sched->timeout != MAX_SCHEDULE_TIMEOUT)
436 schedule_delayed_work(&s_job->work_tdr, sched->timeout);
437
438 list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
439 struct amd_sched_fence *s_fence = s_job->s_fence;
440 struct dma_fence *fence;
441
442 spin_unlock(&sched->job_list_lock);
443 fence = sched->ops->run_job(s_job);
444 atomic_inc(&sched->hw_rq_count);
445 if (fence) {
446 s_fence->parent = dma_fence_get(fence);
447 r = dma_fence_add_callback(fence, &s_fence->cb,
448 amd_sched_process_job);
449 if (r == -ENOENT)
450 amd_sched_process_job(fence, &s_fence->cb);
451 else if (r)
452 DRM_ERROR("fence add callback failed (%d)\n",
453 r);
454 dma_fence_put(fence);
455 } else {
456 DRM_ERROR("Failed to run job!\n");
457 amd_sched_process_job(NULL, &s_fence->cb);
458 }
459 spin_lock(&sched->job_list_lock);
460 }
461 spin_unlock(&sched->job_list_lock);
462 }
463
464 /**
465 * Submit a job to the job queue
466 *
467 * @sched_job The pointer to job required to submit
468 *
469 * Returns 0 for success, negative error code otherwise.
470 */
471 void amd_sched_entity_push_job(struct amd_sched_job *sched_job)
472 {
473 struct amd_sched_entity *entity = sched_job->s_entity;
474
475 trace_amd_sched_job(sched_job);
476 dma_fence_add_callback(&sched_job->s_fence->finished, &sched_job->finish_cb,
477 amd_sched_job_finish_cb);
478 wait_event(entity->sched->job_scheduled,
479 amd_sched_entity_in(sched_job));
480 }
481
482 /* init a sched_job with basic field */
483 int amd_sched_job_init(struct amd_sched_job *job,
484 struct amd_gpu_scheduler *sched,
485 struct amd_sched_entity *entity,
486 void *owner)
487 {
488 job->sched = sched;
489 job->s_entity = entity;
490 job->s_fence = amd_sched_fence_create(entity, owner);
491 if (!job->s_fence)
492 return -ENOMEM;
493 job->id = atomic64_inc_return(&sched->job_id_count);
494
495 INIT_WORK(&job->finish_work, amd_sched_job_finish);
496 INIT_LIST_HEAD(&job->node);
497 INIT_DELAYED_WORK(&job->work_tdr, amd_sched_job_timedout);
498
499 return 0;
500 }
501
502 /**
503 * Return ture if we can push more jobs to the hw.
504 */
505 static bool amd_sched_ready(struct amd_gpu_scheduler *sched)
506 {
507 return atomic_read(&sched->hw_rq_count) <
508 sched->hw_submission_limit;
509 }
510
511 /**
512 * Wake up the scheduler when it is ready
513 */
514 static void amd_sched_wakeup(struct amd_gpu_scheduler *sched)
515 {
516 if (amd_sched_ready(sched))
517 wake_up_interruptible(&sched->wake_up_worker);
518 }
519
520 /**
521 * Select next entity to process
522 */
523 static struct amd_sched_entity *
524 amd_sched_select_entity(struct amd_gpu_scheduler *sched)
525 {
526 struct amd_sched_entity *entity;
527 int i;
528
529 if (!amd_sched_ready(sched))
530 return NULL;
531
532 /* Kernel run queue has higher priority than normal run queue*/
533 for (i = AMD_SCHED_PRIORITY_MAX - 1; i >= AMD_SCHED_PRIORITY_MIN; i--) {
534 entity = amd_sched_rq_select_entity(&sched->sched_rq[i]);
535 if (entity)
536 break;
537 }
538
539 return entity;
540 }
541
542 static void amd_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb)
543 {
544 struct amd_sched_fence *s_fence =
545 container_of(cb, struct amd_sched_fence, cb);
546 struct amd_gpu_scheduler *sched = s_fence->sched;
547
548 atomic_dec(&sched->hw_rq_count);
549 amd_sched_fence_finished(s_fence);
550
551 trace_amd_sched_process_job(s_fence);
552 dma_fence_put(&s_fence->finished);
553 wake_up_interruptible(&sched->wake_up_worker);
554 }
555
556 static bool amd_sched_blocked(struct amd_gpu_scheduler *sched)
557 {
558 if (kthread_should_park()) {
559 kthread_parkme();
560 return true;
561 }
562
563 return false;
564 }
565
566 static int amd_sched_main(void *param)
567 {
568 struct sched_param sparam = {.sched_priority = 1};
569 struct amd_gpu_scheduler *sched = (struct amd_gpu_scheduler *)param;
570 int r, count;
571
572 sched_setscheduler(current, SCHED_FIFO, &sparam);
573
574 while (!kthread_should_stop()) {
575 struct amd_sched_entity *entity = NULL;
576 struct amd_sched_fence *s_fence;
577 struct amd_sched_job *sched_job;
578 struct dma_fence *fence;
579
580 wait_event_interruptible(sched->wake_up_worker,
581 (!amd_sched_blocked(sched) &&
582 (entity = amd_sched_select_entity(sched))) ||
583 kthread_should_stop());
584
585 if (!entity)
586 continue;
587
588 sched_job = amd_sched_entity_pop_job(entity);
589 if (!sched_job)
590 continue;
591
592 s_fence = sched_job->s_fence;
593
594 atomic_inc(&sched->hw_rq_count);
595 amd_sched_job_begin(sched_job);
596
597 fence = sched->ops->run_job(sched_job);
598 amd_sched_fence_scheduled(s_fence);
599 if (fence) {
600 s_fence->parent = dma_fence_get(fence);
601 r = dma_fence_add_callback(fence, &s_fence->cb,
602 amd_sched_process_job);
603 if (r == -ENOENT)
604 amd_sched_process_job(fence, &s_fence->cb);
605 else if (r)
606 DRM_ERROR("fence add callback failed (%d)\n",
607 r);
608 dma_fence_put(fence);
609 } else {
610 DRM_ERROR("Failed to run job!\n");
611 amd_sched_process_job(NULL, &s_fence->cb);
612 }
613
614 count = kfifo_out(&entity->job_queue, &sched_job,
615 sizeof(sched_job));
616 WARN_ON(count != sizeof(sched_job));
617 wake_up(&sched->job_scheduled);
618 }
619 return 0;
620 }
621
622 /**
623 * Init a gpu scheduler instance
624 *
625 * @sched The pointer to the scheduler
626 * @ops The backend operations for this scheduler.
627 * @hw_submissions Number of hw submissions to do.
628 * @name Name used for debugging
629 *
630 * Return 0 on success, otherwise error code.
631 */
632 int amd_sched_init(struct amd_gpu_scheduler *sched,
633 const struct amd_sched_backend_ops *ops,
634 unsigned hw_submission, long timeout, const char *name)
635 {
636 int i;
637 sched->ops = ops;
638 sched->hw_submission_limit = hw_submission;
639 sched->name = name;
640 sched->timeout = timeout;
641 for (i = AMD_SCHED_PRIORITY_MIN; i < AMD_SCHED_PRIORITY_MAX; i++)
642 amd_sched_rq_init(&sched->sched_rq[i]);
643
644 init_waitqueue_head(&sched->wake_up_worker);
645 init_waitqueue_head(&sched->job_scheduled);
646 INIT_LIST_HEAD(&sched->ring_mirror_list);
647 spin_lock_init(&sched->job_list_lock);
648 atomic_set(&sched->hw_rq_count, 0);
649 atomic64_set(&sched->job_id_count, 0);
650
651 /* Each scheduler will run on a seperate kernel thread */
652 sched->thread = kthread_run(amd_sched_main, sched, sched->name);
653 if (IS_ERR(sched->thread)) {
654 DRM_ERROR("Failed to create scheduler for %s.\n", name);
655 return PTR_ERR(sched->thread);
656 }
657
658 return 0;
659 }
660
661 /**
662 * Destroy a gpu scheduler
663 *
664 * @sched The pointer to the scheduler
665 */
666 void amd_sched_fini(struct amd_gpu_scheduler *sched)
667 {
668 if (sched->thread)
669 kthread_stop(sched->thread);
670 }