]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blame - drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
drm/amdgpu: cleanup fence queue init v2
[mirror_ubuntu-focal-kernel.git] / drivers / gpu / drm / amd / scheduler / gpu_scheduler.c
CommitLineData
a72ce6f8
JZ
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 *
23 */
24#include <linux/kthread.h>
25#include <linux/wait.h>
26#include <linux/sched.h>
27#include <drm/drmP.h>
28#include "gpu_scheduler.h"
29
353da3c5
CZ
30#define CREATE_TRACE_POINTS
31#include "gpu_sched_trace.h"
32
69bd5bf1
CK
33static struct amd_sched_job *
34amd_sched_entity_pop_job(struct amd_sched_entity *entity);
88079006
CK
35static void amd_sched_wakeup(struct amd_gpu_scheduler *sched);
36
a72ce6f8 37/* Initialize a given run queue struct */
432a4ff8 38static void amd_sched_rq_init(struct amd_sched_rq *rq)
a72ce6f8 39{
2b184d8d 40 spin_lock_init(&rq->lock);
432a4ff8 41 INIT_LIST_HEAD(&rq->entities);
432a4ff8 42 rq->current_entity = NULL;
a72ce6f8
JZ
43}
44
432a4ff8
CK
45static void amd_sched_rq_add_entity(struct amd_sched_rq *rq,
46 struct amd_sched_entity *entity)
a72ce6f8 47{
2b184d8d 48 spin_lock(&rq->lock);
432a4ff8 49 list_add_tail(&entity->list, &rq->entities);
2b184d8d 50 spin_unlock(&rq->lock);
a72ce6f8
JZ
51}
52
432a4ff8
CK
53static void amd_sched_rq_remove_entity(struct amd_sched_rq *rq,
54 struct amd_sched_entity *entity)
a72ce6f8 55{
2b184d8d 56 spin_lock(&rq->lock);
432a4ff8
CK
57 list_del_init(&entity->list);
58 if (rq->current_entity == entity)
59 rq->current_entity = NULL;
2b184d8d 60 spin_unlock(&rq->lock);
a72ce6f8
JZ
61}
62
63/**
69bd5bf1
CK
64 * Select next job from a specified run queue with round robin policy.
65 * Return NULL if nothing available.
a72ce6f8 66 */
69bd5bf1
CK
67static struct amd_sched_job *
68amd_sched_rq_select_job(struct amd_sched_rq *rq)
a72ce6f8 69{
2b184d8d 70 struct amd_sched_entity *entity;
4c7eb91c 71 struct amd_sched_job *sched_job;
432a4ff8 72
2b184d8d
CK
73 spin_lock(&rq->lock);
74
75 entity = rq->current_entity;
432a4ff8
CK
76 if (entity) {
77 list_for_each_entry_continue(entity, &rq->entities, list) {
4c7eb91c
JZ
78 sched_job = amd_sched_entity_pop_job(entity);
79 if (sched_job) {
432a4ff8 80 rq->current_entity = entity;
2b184d8d 81 spin_unlock(&rq->lock);
4c7eb91c 82 return sched_job;
432a4ff8 83 }
a72ce6f8 84 }
a72ce6f8 85 }
a72ce6f8 86
432a4ff8 87 list_for_each_entry(entity, &rq->entities, list) {
a72ce6f8 88
4c7eb91c
JZ
89 sched_job = amd_sched_entity_pop_job(entity);
90 if (sched_job) {
432a4ff8 91 rq->current_entity = entity;
2b184d8d 92 spin_unlock(&rq->lock);
4c7eb91c 93 return sched_job;
432a4ff8 94 }
a72ce6f8 95
432a4ff8
CK
96 if (entity == rq->current_entity)
97 break;
98 }
a72ce6f8 99
2b184d8d
CK
100 spin_unlock(&rq->lock);
101
432a4ff8 102 return NULL;
a72ce6f8
JZ
103}
104
a72ce6f8
JZ
105/**
106 * Init a context entity used by scheduler when submit to HW ring.
107 *
108 * @sched The pointer to the scheduler
91404fb2 109 * @entity The pointer to a valid amd_sched_entity
a72ce6f8 110 * @rq The run queue this entity belongs
0e89d0c1 111 * @kernel If this is an entity for the kernel
1333f723 112 * @jobs The max number of jobs in the job queue
a72ce6f8
JZ
113 *
114 * return 0 if succeed. negative error code on failure
115*/
91404fb2 116int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
6f0e54a9 117 struct amd_sched_entity *entity,
432a4ff8 118 struct amd_sched_rq *rq,
6f0e54a9 119 uint32_t jobs)
a72ce6f8 120{
0f75aee7
CK
121 int r;
122
a72ce6f8
JZ
123 if (!(sched && entity && rq))
124 return -EINVAL;
125
91404fb2 126 memset(entity, 0, sizeof(struct amd_sched_entity));
0f75aee7
CK
127 INIT_LIST_HEAD(&entity->list);
128 entity->rq = rq;
129 entity->sched = sched;
a72ce6f8
JZ
130
131 spin_lock_init(&entity->queue_lock);
0f75aee7
CK
132 r = kfifo_alloc(&entity->job_queue, jobs * sizeof(void *), GFP_KERNEL);
133 if (r)
134 return r;
135
ce882e6d 136 atomic_set(&entity->fence_seq, 0);
0f75aee7 137 entity->fence_context = fence_context_alloc(1);
a72ce6f8
JZ
138
139 /* Add the entity to the run queue */
432a4ff8 140 amd_sched_rq_add_entity(rq, entity);
0f75aee7 141
a72ce6f8
JZ
142 return 0;
143}
144
145/**
146 * Query if entity is initialized
147 *
148 * @sched Pointer to scheduler instance
149 * @entity The pointer to a valid scheduler entity
150 *
151 * return true if entity is initialized, false otherwise
152*/
d54fdb94
CK
153static bool amd_sched_entity_is_initialized(struct amd_gpu_scheduler *sched,
154 struct amd_sched_entity *entity)
a72ce6f8 155{
0f75aee7
CK
156 return entity->sched == sched &&
157 entity->rq != NULL;
a72ce6f8
JZ
158}
159
aef4852e
CK
160/**
161 * Check if entity is idle
162 *
163 * @entity The pointer to a valid scheduler entity
164 *
165 * Return true if entity don't has any unscheduled jobs.
166 */
167static bool amd_sched_entity_is_idle(struct amd_sched_entity *entity)
a72ce6f8 168{
aef4852e
CK
169 rmb();
170 if (kfifo_is_empty(&entity->job_queue))
a72ce6f8
JZ
171 return true;
172
173 return false;
174}
175
176/**
177 * Destroy a context entity
178 *
179 * @sched Pointer to scheduler instance
180 * @entity The pointer to a valid scheduler entity
181 *
062c7fb3 182 * Cleanup and free the allocated resources.
a72ce6f8 183 */
062c7fb3
CK
184void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
185 struct amd_sched_entity *entity)
a72ce6f8 186{
0f75aee7 187 struct amd_sched_rq *rq = entity->rq;
a72ce6f8 188
d54fdb94 189 if (!amd_sched_entity_is_initialized(sched, entity))
062c7fb3 190 return;
6c859274 191
a72ce6f8
JZ
192 /**
193 * The client will not queue more IBs during this fini, consume existing
194 * queued IBs
195 */
c2b6bd7e 196 wait_event(sched->job_scheduled, amd_sched_entity_is_idle(entity));
a72ce6f8 197
432a4ff8 198 amd_sched_rq_remove_entity(rq, entity);
a72ce6f8 199 kfifo_free(&entity->job_queue);
a72ce6f8
JZ
200}
201
e61235db
CK
202static void amd_sched_entity_wakeup(struct fence *f, struct fence_cb *cb)
203{
204 struct amd_sched_entity *entity =
205 container_of(cb, struct amd_sched_entity, cb);
206 entity->dependency = NULL;
207 fence_put(f);
0f75aee7 208 amd_sched_wakeup(entity->sched);
e61235db
CK
209}
210
69bd5bf1
CK
211static struct amd_sched_job *
212amd_sched_entity_pop_job(struct amd_sched_entity *entity)
213{
0f75aee7 214 struct amd_gpu_scheduler *sched = entity->sched;
4c7eb91c 215 struct amd_sched_job *sched_job;
69bd5bf1 216
e61235db
CK
217 if (ACCESS_ONCE(entity->dependency))
218 return NULL;
219
4c7eb91c 220 if (!kfifo_out_peek(&entity->job_queue, &sched_job, sizeof(sched_job)))
69bd5bf1
CK
221 return NULL;
222
4c7eb91c 223 while ((entity->dependency = sched->ops->dependency(sched_job))) {
e61235db
CK
224
225 if (fence_add_callback(entity->dependency, &entity->cb,
226 amd_sched_entity_wakeup))
227 fence_put(entity->dependency);
228 else
229 return NULL;
230 }
231
4c7eb91c 232 return sched_job;
69bd5bf1
CK
233}
234
a72ce6f8 235/**
6c859274 236 * Helper to submit a job to the job queue
a72ce6f8 237 *
4c7eb91c 238 * @sched_job The pointer to job required to submit
6c859274
CK
239 *
240 * Returns true if we could submit the job.
241 */
4c7eb91c 242static bool amd_sched_entity_in(struct amd_sched_job *sched_job)
a72ce6f8 243{
4c7eb91c 244 struct amd_sched_entity *entity = sched_job->s_entity;
6c859274
CK
245 bool added, first = false;
246
247 spin_lock(&entity->queue_lock);
4c7eb91c
JZ
248 added = kfifo_in(&entity->job_queue, &sched_job,
249 sizeof(sched_job)) == sizeof(sched_job);
6c859274 250
4c7eb91c 251 if (added && kfifo_len(&entity->job_queue) == sizeof(sched_job))
6c859274
CK
252 first = true;
253
254 spin_unlock(&entity->queue_lock);
255
256 /* first job wakes up scheduler */
257 if (first)
4c7eb91c 258 amd_sched_wakeup(sched_job->sched);
6c859274
CK
259
260 return added;
261}
262
263/**
264 * Submit a job to the job queue
265 *
4c7eb91c 266 * @sched_job The pointer to job required to submit
6c859274
CK
267 *
268 * Returns 0 for success, negative error code otherwise.
269 */
270int amd_sched_entity_push_job(struct amd_sched_job *sched_job)
271{
272 struct amd_sched_entity *entity = sched_job->s_entity;
84f76ea6
CZ
273 struct amd_sched_fence *fence = amd_sched_fence_create(
274 entity, sched_job->owner);
6c859274 275
f556cb0c 276 if (!fence)
6c859274
CK
277 return -ENOMEM;
278
bb977d37
CZ
279 fence_get(&fence->base);
280 sched_job->s_fence = fence;
6c859274 281
0f75aee7 282 wait_event(entity->sched->job_scheduled,
c9f0fe5e 283 amd_sched_entity_in(sched_job));
353da3c5 284 trace_amd_sched_job(sched_job);
c9f0fe5e 285 return 0;
a72ce6f8
JZ
286}
287
e688b728
CK
288/**
289 * Return ture if we can push more jobs to the hw.
290 */
291static bool amd_sched_ready(struct amd_gpu_scheduler *sched)
292{
293 return atomic_read(&sched->hw_rq_count) <
294 sched->hw_submission_limit;
295}
296
88079006
CK
297/**
298 * Wake up the scheduler when it is ready
299 */
300static void amd_sched_wakeup(struct amd_gpu_scheduler *sched)
301{
302 if (amd_sched_ready(sched))
c2b6bd7e 303 wake_up_interruptible(&sched->wake_up_worker);
88079006
CK
304}
305
e688b728 306/**
69bd5bf1 307 * Select next to run
e688b728 308*/
69bd5bf1
CK
309static struct amd_sched_job *
310amd_sched_select_job(struct amd_gpu_scheduler *sched)
e688b728 311{
4c7eb91c 312 struct amd_sched_job *sched_job;
e688b728
CK
313
314 if (!amd_sched_ready(sched))
315 return NULL;
316
317 /* Kernel run queue has higher priority than normal run queue*/
4c7eb91c
JZ
318 sched_job = amd_sched_rq_select_job(&sched->kernel_rq);
319 if (sched_job == NULL)
320 sched_job = amd_sched_rq_select_job(&sched->sched_rq);
e688b728 321
4c7eb91c 322 return sched_job;
e688b728
CK
323}
324
6f0e54a9
CK
325static void amd_sched_process_job(struct fence *f, struct fence_cb *cb)
326{
258f3f99
CK
327 struct amd_sched_fence *s_fence =
328 container_of(cb, struct amd_sched_fence, cb);
9b398fa5 329 struct amd_gpu_scheduler *sched = s_fence->sched;
6f0e54a9 330
c746ba22 331 atomic_dec(&sched->hw_rq_count);
258f3f99
CK
332 amd_sched_fence_signal(s_fence);
333 fence_put(&s_fence->base);
c2b6bd7e 334 wake_up_interruptible(&sched->wake_up_worker);
6f0e54a9
CK
335}
336
a72ce6f8
JZ
337static int amd_sched_main(void *param)
338{
a72ce6f8 339 struct sched_param sparam = {.sched_priority = 1};
a72ce6f8 340 struct amd_gpu_scheduler *sched = (struct amd_gpu_scheduler *)param;
5134e999 341 int r, count;
a72ce6f8
JZ
342
343 sched_setscheduler(current, SCHED_FIFO, &sparam);
344
345 while (!kthread_should_stop()) {
69bd5bf1 346 struct amd_sched_entity *entity;
258f3f99 347 struct amd_sched_fence *s_fence;
4c7eb91c 348 struct amd_sched_job *sched_job;
6f0e54a9
CK
349 struct fence *fence;
350
c2b6bd7e 351 wait_event_interruptible(sched->wake_up_worker,
f85a6dd9 352 kthread_should_stop() ||
4c7eb91c 353 (sched_job = amd_sched_select_job(sched)));
f85a6dd9 354
4c7eb91c 355 if (!sched_job)
f85a6dd9
CK
356 continue;
357
4c7eb91c
JZ
358 entity = sched_job->s_entity;
359 s_fence = sched_job->s_fence;
b034b572 360 atomic_inc(&sched->hw_rq_count);
4c7eb91c 361 fence = sched->ops->run_job(sched_job);
6f0e54a9 362 if (fence) {
258f3f99 363 r = fence_add_callback(fence, &s_fence->cb,
6f0e54a9
CK
364 amd_sched_process_job);
365 if (r == -ENOENT)
258f3f99 366 amd_sched_process_job(fence, &s_fence->cb);
6f0e54a9
CK
367 else if (r)
368 DRM_ERROR("fence add callback failed (%d)\n", r);
369 fence_put(fence);
27439fca
CK
370 } else {
371 DRM_ERROR("Failed to run job!\n");
258f3f99 372 amd_sched_process_job(NULL, &s_fence->cb);
6f0e54a9 373 }
aef4852e 374
4c7eb91c
JZ
375 count = kfifo_out(&entity->job_queue, &sched_job,
376 sizeof(sched_job));
377 WARN_ON(count != sizeof(sched_job));
c2b6bd7e 378 wake_up(&sched->job_scheduled);
a72ce6f8
JZ
379 }
380 return 0;
381}
382
a72ce6f8
JZ
383/**
384 * Create a gpu scheduler
385 *
69f7dd65
CK
386 * @ops The backend operations for this scheduler.
387 * @ring The the ring id for the scheduler.
388 * @hw_submissions Number of hw submissions to do.
a72ce6f8 389 *
69f7dd65 390 * Return the pointer to scheduler for success, otherwise return NULL
a72ce6f8 391*/
69f7dd65 392struct amd_gpu_scheduler *amd_sched_create(struct amd_sched_backend_ops *ops,
f38fdfdd
CZ
393 unsigned ring, unsigned hw_submission,
394 void *priv)
a72ce6f8
JZ
395{
396 struct amd_gpu_scheduler *sched;
a72ce6f8
JZ
397
398 sched = kzalloc(sizeof(struct amd_gpu_scheduler), GFP_KERNEL);
399 if (!sched)
400 return NULL;
401
a72ce6f8 402 sched->ops = ops;
a72ce6f8 403 sched->ring_id = ring;
4cef9267 404 sched->hw_submission_limit = hw_submission;
f38fdfdd 405 sched->priv = priv;
c14692f0 406 snprintf(sched->name, sizeof(sched->name), "amdgpu[%d]", ring);
432a4ff8
CK
407 amd_sched_rq_init(&sched->sched_rq);
408 amd_sched_rq_init(&sched->kernel_rq);
a72ce6f8 409
c2b6bd7e
CK
410 init_waitqueue_head(&sched->wake_up_worker);
411 init_waitqueue_head(&sched->job_scheduled);
c746ba22 412 atomic_set(&sched->hw_rq_count, 0);
a72ce6f8 413 /* Each scheduler will run on a seperate kernel thread */
c14692f0 414 sched->thread = kthread_run(amd_sched_main, sched, sched->name);
f4956598
CK
415 if (IS_ERR(sched->thread)) {
416 DRM_ERROR("Failed to create scheduler for id %d.\n", ring);
417 kfree(sched);
418 return NULL;
a72ce6f8
JZ
419 }
420
f4956598 421 return sched;
a72ce6f8
JZ
422}
423
424/**
425 * Destroy a gpu scheduler
426 *
427 * @sched The pointer to the scheduler
428 *
429 * return 0 if succeed. -1 if failed.
430 */
431int amd_sched_destroy(struct amd_gpu_scheduler *sched)
432{
433 kthread_stop(sched->thread);
a72ce6f8
JZ
434 kfree(sched);
435 return 0;
436}