2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/kthread.h>
25 #include <linux/wait.h>
26 #include <linux/sched.h>
28 #include "gpu_scheduler.h"
30 #define CREATE_TRACE_POINTS
31 #include "gpu_sched_trace.h"
33 static bool amd_sched_entity_is_ready(struct amd_sched_entity
*entity
);
34 static void amd_sched_wakeup(struct amd_gpu_scheduler
*sched
);
36 struct kmem_cache
*sched_fence_slab
;
37 atomic_t sched_fence_slab_ref
= ATOMIC_INIT(0);
39 /* Initialize a given run queue struct */
40 static void amd_sched_rq_init(struct amd_sched_rq
*rq
)
42 spin_lock_init(&rq
->lock
);
43 INIT_LIST_HEAD(&rq
->entities
);
44 rq
->current_entity
= NULL
;
47 static void amd_sched_rq_add_entity(struct amd_sched_rq
*rq
,
48 struct amd_sched_entity
*entity
)
51 list_add_tail(&entity
->list
, &rq
->entities
);
52 spin_unlock(&rq
->lock
);
55 static void amd_sched_rq_remove_entity(struct amd_sched_rq
*rq
,
56 struct amd_sched_entity
*entity
)
59 list_del_init(&entity
->list
);
60 if (rq
->current_entity
== entity
)
61 rq
->current_entity
= NULL
;
62 spin_unlock(&rq
->lock
);
66 * Select an entity which could provide a job to run
68 * @rq The run queue to check.
70 * Try to find a ready entity, returns NULL if none found.
72 static struct amd_sched_entity
*
73 amd_sched_rq_select_entity(struct amd_sched_rq
*rq
)
75 struct amd_sched_entity
*entity
;
79 entity
= rq
->current_entity
;
81 list_for_each_entry_continue(entity
, &rq
->entities
, list
) {
82 if (amd_sched_entity_is_ready(entity
)) {
83 rq
->current_entity
= entity
;
84 spin_unlock(&rq
->lock
);
90 list_for_each_entry(entity
, &rq
->entities
, list
) {
92 if (amd_sched_entity_is_ready(entity
)) {
93 rq
->current_entity
= entity
;
94 spin_unlock(&rq
->lock
);
98 if (entity
== rq
->current_entity
)
102 spin_unlock(&rq
->lock
);
108 * Init a context entity used by scheduler when submit to HW ring.
110 * @sched The pointer to the scheduler
111 * @entity The pointer to a valid amd_sched_entity
112 * @rq The run queue this entity belongs
113 * @kernel If this is an entity for the kernel
114 * @jobs The max number of jobs in the job queue
116 * return 0 if succeed. negative error code on failure
118 int amd_sched_entity_init(struct amd_gpu_scheduler
*sched
,
119 struct amd_sched_entity
*entity
,
120 struct amd_sched_rq
*rq
,
125 if (!(sched
&& entity
&& rq
))
128 memset(entity
, 0, sizeof(struct amd_sched_entity
));
129 INIT_LIST_HEAD(&entity
->list
);
131 entity
->sched
= sched
;
133 spin_lock_init(&entity
->queue_lock
);
134 r
= kfifo_alloc(&entity
->job_queue
, jobs
* sizeof(void *), GFP_KERNEL
);
138 atomic_set(&entity
->fence_seq
, 0);
139 entity
->fence_context
= fence_context_alloc(1);
141 /* Add the entity to the run queue */
142 amd_sched_rq_add_entity(rq
, entity
);
148 * Query if entity is initialized
150 * @sched Pointer to scheduler instance
151 * @entity The pointer to a valid scheduler entity
153 * return true if entity is initialized, false otherwise
155 static bool amd_sched_entity_is_initialized(struct amd_gpu_scheduler
*sched
,
156 struct amd_sched_entity
*entity
)
158 return entity
->sched
== sched
&&
163 * Check if entity is idle
165 * @entity The pointer to a valid scheduler entity
167 * Return true if entity don't has any unscheduled jobs.
169 static bool amd_sched_entity_is_idle(struct amd_sched_entity
*entity
)
172 if (kfifo_is_empty(&entity
->job_queue
))
179 * Check if entity is ready
181 * @entity The pointer to a valid scheduler entity
183 * Return true if entity could provide a job.
185 static bool amd_sched_entity_is_ready(struct amd_sched_entity
*entity
)
187 if (kfifo_is_empty(&entity
->job_queue
))
190 if (ACCESS_ONCE(entity
->dependency
))
197 * Destroy a context entity
199 * @sched Pointer to scheduler instance
200 * @entity The pointer to a valid scheduler entity
202 * Cleanup and free the allocated resources.
204 void amd_sched_entity_fini(struct amd_gpu_scheduler
*sched
,
205 struct amd_sched_entity
*entity
)
207 struct amd_sched_rq
*rq
= entity
->rq
;
209 if (!amd_sched_entity_is_initialized(sched
, entity
))
213 * The client will not queue more IBs during this fini, consume existing
216 wait_event(sched
->job_scheduled
, amd_sched_entity_is_idle(entity
));
218 amd_sched_rq_remove_entity(rq
, entity
);
219 kfifo_free(&entity
->job_queue
);
222 static void amd_sched_entity_wakeup(struct fence
*f
, struct fence_cb
*cb
)
224 struct amd_sched_entity
*entity
=
225 container_of(cb
, struct amd_sched_entity
, cb
);
226 entity
->dependency
= NULL
;
228 amd_sched_wakeup(entity
->sched
);
231 static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity
*entity
)
233 struct amd_gpu_scheduler
*sched
= entity
->sched
;
234 struct fence
* fence
= entity
->dependency
;
235 struct amd_sched_fence
*s_fence
;
237 if (fence
->context
== entity
->fence_context
) {
238 /* We can ignore fences from ourself */
239 fence_put(entity
->dependency
);
243 s_fence
= to_amd_sched_fence(fence
);
244 if (s_fence
&& s_fence
->sched
== sched
) {
245 /* Fence is from the same scheduler */
246 if (test_bit(AMD_SCHED_FENCE_SCHEDULED_BIT
, &fence
->flags
)) {
247 /* Ignore it when it is already scheduled */
248 fence_put(entity
->dependency
);
252 /* Wait for fence to be scheduled */
253 entity
->cb
.func
= amd_sched_entity_wakeup
;
254 list_add_tail(&entity
->cb
.node
, &s_fence
->scheduled_cb
);
258 if (!fence_add_callback(entity
->dependency
, &entity
->cb
,
259 amd_sched_entity_wakeup
))
262 fence_put(entity
->dependency
);
266 static struct amd_sched_job
*
267 amd_sched_entity_pop_job(struct amd_sched_entity
*entity
)
269 struct amd_gpu_scheduler
*sched
= entity
->sched
;
270 struct amd_sched_job
*sched_job
;
272 if (!kfifo_out_peek(&entity
->job_queue
, &sched_job
, sizeof(sched_job
)))
275 while ((entity
->dependency
= sched
->ops
->dependency(sched_job
)))
276 if (amd_sched_entity_add_dependency_cb(entity
))
283 * Helper to submit a job to the job queue
285 * @sched_job The pointer to job required to submit
287 * Returns true if we could submit the job.
289 static bool amd_sched_entity_in(struct amd_sched_job
*sched_job
)
291 struct amd_sched_entity
*entity
= sched_job
->s_entity
;
292 bool added
, first
= false;
294 spin_lock(&entity
->queue_lock
);
295 added
= kfifo_in(&entity
->job_queue
, &sched_job
,
296 sizeof(sched_job
)) == sizeof(sched_job
);
298 if (added
&& kfifo_len(&entity
->job_queue
) == sizeof(sched_job
))
301 spin_unlock(&entity
->queue_lock
);
303 /* first job wakes up scheduler */
305 amd_sched_wakeup(sched_job
->sched
);
311 * Submit a job to the job queue
313 * @sched_job The pointer to job required to submit
315 * Returns 0 for success, negative error code otherwise.
317 void amd_sched_entity_push_job(struct amd_sched_job
*sched_job
)
319 struct amd_sched_entity
*entity
= sched_job
->s_entity
;
321 wait_event(entity
->sched
->job_scheduled
,
322 amd_sched_entity_in(sched_job
));
323 trace_amd_sched_job(sched_job
);
327 * Return ture if we can push more jobs to the hw.
329 static bool amd_sched_ready(struct amd_gpu_scheduler
*sched
)
331 return atomic_read(&sched
->hw_rq_count
) <
332 sched
->hw_submission_limit
;
336 * Wake up the scheduler when it is ready
338 static void amd_sched_wakeup(struct amd_gpu_scheduler
*sched
)
340 if (amd_sched_ready(sched
))
341 wake_up_interruptible(&sched
->wake_up_worker
);
345 * Select next entity to process
347 static struct amd_sched_entity
*
348 amd_sched_select_entity(struct amd_gpu_scheduler
*sched
)
350 struct amd_sched_entity
*entity
;
352 if (!amd_sched_ready(sched
))
355 /* Kernel run queue has higher priority than normal run queue*/
356 entity
= amd_sched_rq_select_entity(&sched
->kernel_rq
);
358 entity
= amd_sched_rq_select_entity(&sched
->sched_rq
);
363 static void amd_sched_process_job(struct fence
*f
, struct fence_cb
*cb
)
365 struct amd_sched_fence
*s_fence
=
366 container_of(cb
, struct amd_sched_fence
, cb
);
367 struct amd_gpu_scheduler
*sched
= s_fence
->sched
;
370 atomic_dec(&sched
->hw_rq_count
);
371 amd_sched_fence_signal(s_fence
);
372 if (sched
->timeout
!= MAX_SCHEDULE_TIMEOUT
) {
373 cancel_delayed_work(&s_fence
->dwork
);
374 spin_lock_irqsave(&sched
->fence_list_lock
, flags
);
375 list_del_init(&s_fence
->list
);
376 spin_unlock_irqrestore(&sched
->fence_list_lock
, flags
);
378 trace_amd_sched_process_job(s_fence
);
379 fence_put(&s_fence
->base
);
380 wake_up_interruptible(&sched
->wake_up_worker
);
383 static void amd_sched_fence_work_func(struct work_struct
*work
)
385 struct amd_sched_fence
*s_fence
=
386 container_of(work
, struct amd_sched_fence
, dwork
.work
);
387 struct amd_gpu_scheduler
*sched
= s_fence
->sched
;
388 struct amd_sched_fence
*entity
, *tmp
;
391 DRM_ERROR("[%s] scheduler is timeout!\n", sched
->name
);
393 /* Clean all pending fences */
394 spin_lock_irqsave(&sched
->fence_list_lock
, flags
);
395 list_for_each_entry_safe(entity
, tmp
, &sched
->fence_list
, list
) {
396 DRM_ERROR(" fence no %d\n", entity
->base
.seqno
);
397 cancel_delayed_work(&entity
->dwork
);
398 list_del_init(&entity
->list
);
399 fence_put(&entity
->base
);
401 spin_unlock_irqrestore(&sched
->fence_list_lock
, flags
);
404 static int amd_sched_main(void *param
)
406 struct sched_param sparam
= {.sched_priority
= 1};
407 struct amd_gpu_scheduler
*sched
= (struct amd_gpu_scheduler
*)param
;
410 spin_lock_init(&sched
->fence_list_lock
);
411 INIT_LIST_HEAD(&sched
->fence_list
);
412 sched_setscheduler(current
, SCHED_FIFO
, &sparam
);
414 while (!kthread_should_stop()) {
415 struct amd_sched_entity
*entity
;
416 struct amd_sched_fence
*s_fence
;
417 struct amd_sched_job
*sched_job
;
421 wait_event_interruptible(sched
->wake_up_worker
,
422 (entity
= amd_sched_select_entity(sched
)) ||
423 kthread_should_stop());
428 sched_job
= amd_sched_entity_pop_job(entity
);
432 s_fence
= sched_job
->s_fence
;
434 if (sched
->timeout
!= MAX_SCHEDULE_TIMEOUT
) {
435 INIT_DELAYED_WORK(&s_fence
->dwork
, amd_sched_fence_work_func
);
436 schedule_delayed_work(&s_fence
->dwork
, sched
->timeout
);
437 spin_lock_irqsave(&sched
->fence_list_lock
, flags
);
438 list_add_tail(&s_fence
->list
, &sched
->fence_list
);
439 spin_unlock_irqrestore(&sched
->fence_list_lock
, flags
);
442 atomic_inc(&sched
->hw_rq_count
);
443 fence
= sched
->ops
->run_job(sched_job
);
444 amd_sched_fence_scheduled(s_fence
);
446 r
= fence_add_callback(fence
, &s_fence
->cb
,
447 amd_sched_process_job
);
449 amd_sched_process_job(fence
, &s_fence
->cb
);
451 DRM_ERROR("fence add callback failed (%d)\n", r
);
454 DRM_ERROR("Failed to run job!\n");
455 amd_sched_process_job(NULL
, &s_fence
->cb
);
458 count
= kfifo_out(&entity
->job_queue
, &sched_job
,
460 WARN_ON(count
!= sizeof(sched_job
));
461 wake_up(&sched
->job_scheduled
);
467 * Init a gpu scheduler instance
469 * @sched The pointer to the scheduler
470 * @ops The backend operations for this scheduler.
471 * @hw_submissions Number of hw submissions to do.
472 * @name Name used for debugging
474 * Return 0 on success, otherwise error code.
476 int amd_sched_init(struct amd_gpu_scheduler
*sched
,
477 struct amd_sched_backend_ops
*ops
,
478 unsigned hw_submission
, long timeout
, const char *name
)
481 sched
->hw_submission_limit
= hw_submission
;
483 sched
->timeout
= timeout
;
484 amd_sched_rq_init(&sched
->sched_rq
);
485 amd_sched_rq_init(&sched
->kernel_rq
);
487 init_waitqueue_head(&sched
->wake_up_worker
);
488 init_waitqueue_head(&sched
->job_scheduled
);
489 atomic_set(&sched
->hw_rq_count
, 0);
490 if (atomic_inc_return(&sched_fence_slab_ref
) == 1) {
491 sched_fence_slab
= kmem_cache_create(
492 "amd_sched_fence", sizeof(struct amd_sched_fence
), 0,
493 SLAB_HWCACHE_ALIGN
, NULL
);
494 if (!sched_fence_slab
)
498 /* Each scheduler will run on a seperate kernel thread */
499 sched
->thread
= kthread_run(amd_sched_main
, sched
, sched
->name
);
500 if (IS_ERR(sched
->thread
)) {
501 DRM_ERROR("Failed to create scheduler for %s.\n", name
);
502 return PTR_ERR(sched
->thread
);
509 * Destroy a gpu scheduler
511 * @sched The pointer to the scheduler
513 void amd_sched_fini(struct amd_gpu_scheduler
*sched
)
516 kthread_stop(sched
->thread
);
517 if (atomic_dec_and_test(&sched_fence_slab_ref
))
518 kmem_cache_destroy(sched_fence_slab
);