2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/kthread.h>
25 #include <linux/wait.h>
26 #include <linux/sched.h>
28 #include "gpu_scheduler.h"
30 #define CREATE_TRACE_POINTS
31 #include "gpu_sched_trace.h"
33 static bool amd_sched_entity_is_ready(struct amd_sched_entity
*entity
);
34 static void amd_sched_wakeup(struct amd_gpu_scheduler
*sched
);
36 struct kmem_cache
*sched_fence_slab
;
37 atomic_t sched_fence_slab_ref
= ATOMIC_INIT(0);
39 /* Initialize a given run queue struct */
40 static void amd_sched_rq_init(struct amd_sched_rq
*rq
)
42 spin_lock_init(&rq
->lock
);
43 INIT_LIST_HEAD(&rq
->entities
);
44 rq
->current_entity
= NULL
;
47 static void amd_sched_rq_add_entity(struct amd_sched_rq
*rq
,
48 struct amd_sched_entity
*entity
)
50 if (!list_empty(&entity
->list
))
53 list_add_tail(&entity
->list
, &rq
->entities
);
54 spin_unlock(&rq
->lock
);
57 static void amd_sched_rq_remove_entity(struct amd_sched_rq
*rq
,
58 struct amd_sched_entity
*entity
)
60 if (list_empty(&entity
->list
))
63 list_del_init(&entity
->list
);
64 if (rq
->current_entity
== entity
)
65 rq
->current_entity
= NULL
;
66 spin_unlock(&rq
->lock
);
70 * Select an entity which could provide a job to run
72 * @rq The run queue to check.
74 * Try to find a ready entity, returns NULL if none found.
76 static struct amd_sched_entity
*
77 amd_sched_rq_select_entity(struct amd_sched_rq
*rq
)
79 struct amd_sched_entity
*entity
;
83 entity
= rq
->current_entity
;
85 list_for_each_entry_continue(entity
, &rq
->entities
, list
) {
86 if (amd_sched_entity_is_ready(entity
)) {
87 rq
->current_entity
= entity
;
88 spin_unlock(&rq
->lock
);
94 list_for_each_entry(entity
, &rq
->entities
, list
) {
96 if (amd_sched_entity_is_ready(entity
)) {
97 rq
->current_entity
= entity
;
98 spin_unlock(&rq
->lock
);
102 if (entity
== rq
->current_entity
)
106 spin_unlock(&rq
->lock
);
112 * Init a context entity used by scheduler when submit to HW ring.
114 * @sched The pointer to the scheduler
115 * @entity The pointer to a valid amd_sched_entity
116 * @rq The run queue this entity belongs
117 * @kernel If this is an entity for the kernel
118 * @jobs The max number of jobs in the job queue
120 * return 0 if succeed. negative error code on failure
122 int amd_sched_entity_init(struct amd_gpu_scheduler
*sched
,
123 struct amd_sched_entity
*entity
,
124 struct amd_sched_rq
*rq
,
129 if (!(sched
&& entity
&& rq
))
132 memset(entity
, 0, sizeof(struct amd_sched_entity
));
133 INIT_LIST_HEAD(&entity
->list
);
135 entity
->sched
= sched
;
137 spin_lock_init(&entity
->queue_lock
);
138 r
= kfifo_alloc(&entity
->job_queue
, jobs
* sizeof(void *), GFP_KERNEL
);
142 atomic_set(&entity
->fence_seq
, 0);
143 entity
->fence_context
= fence_context_alloc(1);
149 * Query if entity is initialized
151 * @sched Pointer to scheduler instance
152 * @entity The pointer to a valid scheduler entity
154 * return true if entity is initialized, false otherwise
156 static bool amd_sched_entity_is_initialized(struct amd_gpu_scheduler
*sched
,
157 struct amd_sched_entity
*entity
)
159 return entity
->sched
== sched
&&
164 * Check if entity is idle
166 * @entity The pointer to a valid scheduler entity
168 * Return true if entity don't has any unscheduled jobs.
170 static bool amd_sched_entity_is_idle(struct amd_sched_entity
*entity
)
173 if (kfifo_is_empty(&entity
->job_queue
))
180 * Check if entity is ready
182 * @entity The pointer to a valid scheduler entity
184 * Return true if entity could provide a job.
186 static bool amd_sched_entity_is_ready(struct amd_sched_entity
*entity
)
188 if (kfifo_is_empty(&entity
->job_queue
))
191 if (ACCESS_ONCE(entity
->dependency
))
198 * Destroy a context entity
200 * @sched Pointer to scheduler instance
201 * @entity The pointer to a valid scheduler entity
203 * Cleanup and free the allocated resources.
205 void amd_sched_entity_fini(struct amd_gpu_scheduler
*sched
,
206 struct amd_sched_entity
*entity
)
208 struct amd_sched_rq
*rq
= entity
->rq
;
210 if (!amd_sched_entity_is_initialized(sched
, entity
))
214 * The client will not queue more IBs during this fini, consume existing
217 wait_event(sched
->job_scheduled
, amd_sched_entity_is_idle(entity
));
219 amd_sched_rq_remove_entity(rq
, entity
);
220 kfifo_free(&entity
->job_queue
);
223 static void amd_sched_entity_wakeup(struct fence
*f
, struct fence_cb
*cb
)
225 struct amd_sched_entity
*entity
=
226 container_of(cb
, struct amd_sched_entity
, cb
);
227 entity
->dependency
= NULL
;
229 amd_sched_wakeup(entity
->sched
);
232 static void amd_sched_entity_clear_dep(struct fence
*f
, struct fence_cb
*cb
)
234 struct amd_sched_entity
*entity
=
235 container_of(cb
, struct amd_sched_entity
, cb
);
236 entity
->dependency
= NULL
;
240 static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity
*entity
)
242 struct amd_gpu_scheduler
*sched
= entity
->sched
;
243 struct fence
* fence
= entity
->dependency
;
244 struct amd_sched_fence
*s_fence
;
246 if (fence
->context
== entity
->fence_context
) {
247 /* We can ignore fences from ourself */
248 fence_put(entity
->dependency
);
252 s_fence
= to_amd_sched_fence(fence
);
253 if (s_fence
&& s_fence
->sched
== sched
) {
254 /* Fence is from the same scheduler */
255 if (test_bit(AMD_SCHED_FENCE_SCHEDULED_BIT
, &fence
->flags
)) {
256 /* Ignore it when it is already scheduled */
257 fence_put(entity
->dependency
);
261 /* Wait for fence to be scheduled */
262 entity
->cb
.func
= amd_sched_entity_clear_dep
;
263 list_add_tail(&entity
->cb
.node
, &s_fence
->scheduled_cb
);
267 if (!fence_add_callback(entity
->dependency
, &entity
->cb
,
268 amd_sched_entity_wakeup
))
271 fence_put(entity
->dependency
);
275 static struct amd_sched_job
*
276 amd_sched_entity_pop_job(struct amd_sched_entity
*entity
)
278 struct amd_gpu_scheduler
*sched
= entity
->sched
;
279 struct amd_sched_job
*sched_job
;
281 if (!kfifo_out_peek(&entity
->job_queue
, &sched_job
, sizeof(sched_job
)))
284 while ((entity
->dependency
= sched
->ops
->dependency(sched_job
)))
285 if (amd_sched_entity_add_dependency_cb(entity
))
292 * Helper to submit a job to the job queue
294 * @sched_job The pointer to job required to submit
296 * Returns true if we could submit the job.
298 static bool amd_sched_entity_in(struct amd_sched_job
*sched_job
)
300 struct amd_gpu_scheduler
*sched
= sched_job
->sched
;
301 struct amd_sched_entity
*entity
= sched_job
->s_entity
;
302 bool added
, first
= false;
304 spin_lock(&entity
->queue_lock
);
305 added
= kfifo_in(&entity
->job_queue
, &sched_job
,
306 sizeof(sched_job
)) == sizeof(sched_job
);
308 if (added
&& kfifo_len(&entity
->job_queue
) == sizeof(sched_job
))
311 spin_unlock(&entity
->queue_lock
);
313 /* first job wakes up scheduler */
315 /* Add the entity to the run queue */
316 amd_sched_rq_add_entity(entity
->rq
, entity
);
317 amd_sched_wakeup(sched
);
323 * Submit a job to the job queue
325 * @sched_job The pointer to job required to submit
327 * Returns 0 for success, negative error code otherwise.
329 void amd_sched_entity_push_job(struct amd_sched_job
*sched_job
)
331 struct amd_sched_entity
*entity
= sched_job
->s_entity
;
333 trace_amd_sched_job(sched_job
);
334 wait_event(entity
->sched
->job_scheduled
,
335 amd_sched_entity_in(sched_job
));
338 /* init a sched_job with basic field */
339 int amd_sched_job_init(struct amd_sched_job
*job
,
340 struct amd_gpu_scheduler
*sched
,
341 struct amd_sched_entity
*entity
,
342 void *owner
, struct fence
**fence
)
345 job
->s_entity
= entity
;
346 job
->s_fence
= amd_sched_fence_create(entity
, owner
);
351 *fence
= &job
->s_fence
->base
;
356 * Return ture if we can push more jobs to the hw.
358 static bool amd_sched_ready(struct amd_gpu_scheduler
*sched
)
360 return atomic_read(&sched
->hw_rq_count
) <
361 sched
->hw_submission_limit
;
365 * Wake up the scheduler when it is ready
367 static void amd_sched_wakeup(struct amd_gpu_scheduler
*sched
)
369 if (amd_sched_ready(sched
))
370 wake_up_interruptible(&sched
->wake_up_worker
);
374 * Select next entity to process
376 static struct amd_sched_entity
*
377 amd_sched_select_entity(struct amd_gpu_scheduler
*sched
)
379 struct amd_sched_entity
*entity
;
382 if (!amd_sched_ready(sched
))
385 /* Kernel run queue has higher priority than normal run queue*/
386 for (i
= 0; i
< AMD_SCHED_MAX_PRIORITY
; i
++) {
387 entity
= amd_sched_rq_select_entity(&sched
->sched_rq
[i
]);
395 static void amd_sched_process_job(struct fence
*f
, struct fence_cb
*cb
)
397 struct amd_sched_fence
*s_fence
=
398 container_of(cb
, struct amd_sched_fence
, cb
);
399 struct amd_gpu_scheduler
*sched
= s_fence
->sched
;
402 atomic_dec(&sched
->hw_rq_count
);
403 amd_sched_fence_signal(s_fence
);
404 if (sched
->timeout
!= MAX_SCHEDULE_TIMEOUT
) {
405 cancel_delayed_work(&s_fence
->dwork
);
406 spin_lock_irqsave(&sched
->fence_list_lock
, flags
);
407 list_del_init(&s_fence
->list
);
408 spin_unlock_irqrestore(&sched
->fence_list_lock
, flags
);
410 trace_amd_sched_process_job(s_fence
);
411 fence_put(&s_fence
->base
);
412 wake_up_interruptible(&sched
->wake_up_worker
);
415 static void amd_sched_fence_work_func(struct work_struct
*work
)
417 struct amd_sched_fence
*s_fence
=
418 container_of(work
, struct amd_sched_fence
, dwork
.work
);
419 struct amd_gpu_scheduler
*sched
= s_fence
->sched
;
420 struct amd_sched_fence
*entity
, *tmp
;
423 DRM_ERROR("[%s] scheduler is timeout!\n", sched
->name
);
425 /* Clean all pending fences */
426 spin_lock_irqsave(&sched
->fence_list_lock
, flags
);
427 list_for_each_entry_safe(entity
, tmp
, &sched
->fence_list
, list
) {
428 DRM_ERROR(" fence no %d\n", entity
->base
.seqno
);
429 cancel_delayed_work(&entity
->dwork
);
430 list_del_init(&entity
->list
);
431 fence_put(&entity
->base
);
433 spin_unlock_irqrestore(&sched
->fence_list_lock
, flags
);
436 static int amd_sched_main(void *param
)
438 struct sched_param sparam
= {.sched_priority
= 1};
439 struct amd_gpu_scheduler
*sched
= (struct amd_gpu_scheduler
*)param
;
442 spin_lock_init(&sched
->fence_list_lock
);
443 INIT_LIST_HEAD(&sched
->fence_list
);
444 sched_setscheduler(current
, SCHED_FIFO
, &sparam
);
446 while (!kthread_should_stop()) {
447 struct amd_sched_entity
*entity
;
448 struct amd_sched_fence
*s_fence
;
449 struct amd_sched_job
*sched_job
;
453 wait_event_interruptible(sched
->wake_up_worker
,
454 (entity
= amd_sched_select_entity(sched
)) ||
455 kthread_should_stop());
460 sched_job
= amd_sched_entity_pop_job(entity
);
464 s_fence
= sched_job
->s_fence
;
466 if (sched
->timeout
!= MAX_SCHEDULE_TIMEOUT
) {
467 INIT_DELAYED_WORK(&s_fence
->dwork
, amd_sched_fence_work_func
);
468 schedule_delayed_work(&s_fence
->dwork
, sched
->timeout
);
469 spin_lock_irqsave(&sched
->fence_list_lock
, flags
);
470 list_add_tail(&s_fence
->list
, &sched
->fence_list
);
471 spin_unlock_irqrestore(&sched
->fence_list_lock
, flags
);
474 atomic_inc(&sched
->hw_rq_count
);
475 fence
= sched
->ops
->run_job(sched_job
);
476 amd_sched_fence_scheduled(s_fence
);
478 r
= fence_add_callback(fence
, &s_fence
->cb
,
479 amd_sched_process_job
);
481 amd_sched_process_job(fence
, &s_fence
->cb
);
483 DRM_ERROR("fence add callback failed (%d)\n", r
);
486 DRM_ERROR("Failed to run job!\n");
487 amd_sched_process_job(NULL
, &s_fence
->cb
);
490 count
= kfifo_out(&entity
->job_queue
, &sched_job
,
492 WARN_ON(count
!= sizeof(sched_job
));
493 wake_up(&sched
->job_scheduled
);
499 * Init a gpu scheduler instance
501 * @sched The pointer to the scheduler
502 * @ops The backend operations for this scheduler.
503 * @hw_submissions Number of hw submissions to do.
504 * @name Name used for debugging
506 * Return 0 on success, otherwise error code.
508 int amd_sched_init(struct amd_gpu_scheduler
*sched
,
509 struct amd_sched_backend_ops
*ops
,
510 unsigned hw_submission
, long timeout
, const char *name
)
514 sched
->hw_submission_limit
= hw_submission
;
516 sched
->timeout
= timeout
;
517 for (i
= 0; i
< AMD_SCHED_MAX_PRIORITY
; i
++)
518 amd_sched_rq_init(&sched
->sched_rq
[i
]);
520 init_waitqueue_head(&sched
->wake_up_worker
);
521 init_waitqueue_head(&sched
->job_scheduled
);
522 atomic_set(&sched
->hw_rq_count
, 0);
523 if (atomic_inc_return(&sched_fence_slab_ref
) == 1) {
524 sched_fence_slab
= kmem_cache_create(
525 "amd_sched_fence", sizeof(struct amd_sched_fence
), 0,
526 SLAB_HWCACHE_ALIGN
, NULL
);
527 if (!sched_fence_slab
)
531 /* Each scheduler will run on a seperate kernel thread */
532 sched
->thread
= kthread_run(amd_sched_main
, sched
, sched
->name
);
533 if (IS_ERR(sched
->thread
)) {
534 DRM_ERROR("Failed to create scheduler for %s.\n", name
);
535 return PTR_ERR(sched
->thread
);
542 * Destroy a gpu scheduler
544 * @sched The pointer to the scheduler
546 void amd_sched_fini(struct amd_gpu_scheduler
*sched
)
549 kthread_stop(sched
->thread
);
550 if (atomic_dec_and_test(&sched_fence_slab_ref
))
551 kmem_cache_destroy(sched_fence_slab
);