]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blame - drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
drm/amdgpu: remove amdgpu_bo_list_clone
[mirror_ubuntu-focal-kernel.git] / drivers / gpu / drm / amd / scheduler / gpu_scheduler.c
CommitLineData
a72ce6f8
JZ
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 *
23 */
24#include <linux/kthread.h>
25#include <linux/wait.h>
26#include <linux/sched.h>
27#include <drm/drmP.h>
28#include "gpu_scheduler.h"
29
30/* Initialize a given run queue struct */
432a4ff8 31static void amd_sched_rq_init(struct amd_sched_rq *rq)
a72ce6f8 32{
2b184d8d 33 spin_lock_init(&rq->lock);
432a4ff8 34 INIT_LIST_HEAD(&rq->entities);
432a4ff8 35 rq->current_entity = NULL;
a72ce6f8
JZ
36}
37
432a4ff8
CK
38static void amd_sched_rq_add_entity(struct amd_sched_rq *rq,
39 struct amd_sched_entity *entity)
a72ce6f8 40{
2b184d8d 41 spin_lock(&rq->lock);
432a4ff8 42 list_add_tail(&entity->list, &rq->entities);
2b184d8d 43 spin_unlock(&rq->lock);
a72ce6f8
JZ
44}
45
432a4ff8
CK
46static void amd_sched_rq_remove_entity(struct amd_sched_rq *rq,
47 struct amd_sched_entity *entity)
a72ce6f8 48{
2b184d8d 49 spin_lock(&rq->lock);
432a4ff8
CK
50 list_del_init(&entity->list);
51 if (rq->current_entity == entity)
52 rq->current_entity = NULL;
2b184d8d 53 spin_unlock(&rq->lock);
a72ce6f8
JZ
54}
55
56/**
57 * Select next entity from a specified run queue with round robin policy.
58 * It could return the same entity as current one if current is the only
59 * available one in the queue. Return NULL if nothing available.
60 */
432a4ff8
CK
61static struct amd_sched_entity *
62amd_sched_rq_select_entity(struct amd_sched_rq *rq)
a72ce6f8 63{
2b184d8d 64 struct amd_sched_entity *entity;
432a4ff8 65
2b184d8d
CK
66 spin_lock(&rq->lock);
67
68 entity = rq->current_entity;
432a4ff8
CK
69 if (entity) {
70 list_for_each_entry_continue(entity, &rq->entities, list) {
71 if (!kfifo_is_empty(&entity->job_queue)) {
72 rq->current_entity = entity;
2b184d8d 73 spin_unlock(&rq->lock);
432a4ff8
CK
74 return rq->current_entity;
75 }
a72ce6f8 76 }
a72ce6f8 77 }
a72ce6f8 78
432a4ff8 79 list_for_each_entry(entity, &rq->entities, list) {
a72ce6f8 80
432a4ff8
CK
81 if (!kfifo_is_empty(&entity->job_queue)) {
82 rq->current_entity = entity;
2b184d8d 83 spin_unlock(&rq->lock);
432a4ff8
CK
84 return rq->current_entity;
85 }
a72ce6f8 86
432a4ff8
CK
87 if (entity == rq->current_entity)
88 break;
89 }
a72ce6f8 90
2b184d8d
CK
91 spin_unlock(&rq->lock);
92
432a4ff8 93 return NULL;
a72ce6f8
JZ
94}
95
a72ce6f8
JZ
96/**
97 * Init a context entity used by scheduler when submit to HW ring.
98 *
99 * @sched The pointer to the scheduler
91404fb2 100 * @entity The pointer to a valid amd_sched_entity
a72ce6f8 101 * @rq The run queue this entity belongs
0e89d0c1 102 * @kernel If this is an entity for the kernel
1333f723 103 * @jobs The max number of jobs in the job queue
a72ce6f8
JZ
104 *
105 * return 0 if succeed. negative error code on failure
106*/
91404fb2 107int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
6f0e54a9 108 struct amd_sched_entity *entity,
432a4ff8 109 struct amd_sched_rq *rq,
6f0e54a9 110 uint32_t jobs)
a72ce6f8 111{
f556cb0c 112 char name[20];
a72ce6f8
JZ
113
114 if (!(sched && entity && rq))
115 return -EINVAL;
116
91404fb2 117 memset(entity, 0, sizeof(struct amd_sched_entity));
91404fb2 118 entity->belongto_rq = rq;
a72ce6f8
JZ
119 entity->scheduler = sched;
120 init_waitqueue_head(&entity->wait_queue);
f556cb0c
CZ
121 entity->fence_context = fence_context_alloc(1);
122 snprintf(name, sizeof(name), "c_entity[%llu]", entity->fence_context);
123 memcpy(entity->name, name, 20);
1c8f805a 124 entity->need_wakeup = false;
a72ce6f8 125 if(kfifo_alloc(&entity->job_queue,
1333f723 126 jobs * sizeof(void *),
a72ce6f8
JZ
127 GFP_KERNEL))
128 return -EINVAL;
129
130 spin_lock_init(&entity->queue_lock);
ce882e6d 131 atomic_set(&entity->fence_seq, 0);
a72ce6f8
JZ
132
133 /* Add the entity to the run queue */
432a4ff8 134 amd_sched_rq_add_entity(rq, entity);
a72ce6f8
JZ
135 return 0;
136}
137
138/**
139 * Query if entity is initialized
140 *
141 * @sched Pointer to scheduler instance
142 * @entity The pointer to a valid scheduler entity
143 *
144 * return true if entity is initialized, false otherwise
145*/
d54fdb94
CK
146static bool amd_sched_entity_is_initialized(struct amd_gpu_scheduler *sched,
147 struct amd_sched_entity *entity)
a72ce6f8
JZ
148{
149 return entity->scheduler == sched &&
91404fb2 150 entity->belongto_rq != NULL;
a72ce6f8
JZ
151}
152
aef4852e
CK
153/**
154 * Check if entity is idle
155 *
156 * @entity The pointer to a valid scheduler entity
157 *
158 * Return true if entity don't has any unscheduled jobs.
159 */
160static bool amd_sched_entity_is_idle(struct amd_sched_entity *entity)
a72ce6f8 161{
aef4852e
CK
162 rmb();
163 if (kfifo_is_empty(&entity->job_queue))
a72ce6f8
JZ
164 return true;
165
166 return false;
167}
168
169/**
170 * Destroy a context entity
171 *
172 * @sched Pointer to scheduler instance
173 * @entity The pointer to a valid scheduler entity
174 *
175 * return 0 if succeed. negative error code on failure
176 */
91404fb2
CK
177int amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
178 struct amd_sched_entity *entity)
a72ce6f8 179{
432a4ff8 180 struct amd_sched_rq *rq = entity->belongto_rq;
aef4852e 181 long r;
a72ce6f8 182
d54fdb94 183 if (!amd_sched_entity_is_initialized(sched, entity))
a72ce6f8 184 return 0;
1c8f805a 185 entity->need_wakeup = true;
a72ce6f8
JZ
186 /**
187 * The client will not queue more IBs during this fini, consume existing
188 * queued IBs
189 */
aef4852e
CK
190 r = wait_event_timeout(entity->wait_queue,
191 amd_sched_entity_is_idle(entity),
192 msecs_to_jiffies(AMD_GPU_WAIT_IDLE_TIMEOUT_IN_MS));
a72ce6f8 193
aef4852e 194 if (r <= 0)
9788ec40
CK
195 DRM_INFO("Entity %p is in waiting state during fini\n",
196 entity);
a72ce6f8 197
432a4ff8 198 amd_sched_rq_remove_entity(rq, entity);
a72ce6f8
JZ
199 kfifo_free(&entity->job_queue);
200 return r;
201}
202
203/**
204 * Submit a normal job to the job queue
205 *
206 * @sched The pointer to the scheduler
91404fb2 207 * @c_entity The pointer to amd_sched_entity
a72ce6f8 208 * @job The pointer to job required to submit
80de5913
CZ
209 * return 0 if succeed. -1 if failed.
210 * -2 indicate queue is full for this client, client should wait untill
211 * scheduler consum some queued command.
212 * -1 other fail.
a72ce6f8 213*/
bb977d37 214int amd_sched_push_job(struct amd_sched_job *sched_job)
a72ce6f8 215{
bb977d37
CZ
216 struct amd_sched_fence *fence =
217 amd_sched_fence_create(sched_job->s_entity);
f556cb0c
CZ
218 if (!fence)
219 return -EINVAL;
bb977d37
CZ
220 fence_get(&fence->base);
221 sched_job->s_fence = fence;
222 while (kfifo_in_spinlocked(&sched_job->s_entity->job_queue,
223 &sched_job, sizeof(void *),
224 &sched_job->s_entity->queue_lock) !=
225 sizeof(void *)) {
a72ce6f8
JZ
226 /**
227 * Current context used up all its IB slots
228 * wait here, or need to check whether GPU is hung
229 */
230 schedule();
231 }
1c8f805a 232 /* first job wake up scheduler */
bb977d37
CZ
233 if ((kfifo_len(&sched_job->s_entity->job_queue) / sizeof(void *)) == 1)
234 wake_up_interruptible(&sched_job->sched->wait_queue);
80de5913 235 return 0;
a72ce6f8
JZ
236}
237
e688b728
CK
238/**
239 * Return ture if we can push more jobs to the hw.
240 */
241static bool amd_sched_ready(struct amd_gpu_scheduler *sched)
242{
243 return atomic_read(&sched->hw_rq_count) <
244 sched->hw_submission_limit;
245}
246
247/**
248 * Select next entity containing real IB submissions
249*/
250static struct amd_sched_entity *
251amd_sched_select_context(struct amd_gpu_scheduler *sched)
252{
253 struct amd_sched_entity *tmp;
254
255 if (!amd_sched_ready(sched))
256 return NULL;
257
258 /* Kernel run queue has higher priority than normal run queue*/
259 tmp = amd_sched_rq_select_entity(&sched->kernel_rq);
260 if (tmp == NULL)
261 tmp = amd_sched_rq_select_entity(&sched->sched_rq);
262
263 return tmp;
264}
265
6f0e54a9
CK
266static void amd_sched_process_job(struct fence *f, struct fence_cb *cb)
267{
268 struct amd_sched_job *sched_job =
269 container_of(cb, struct amd_sched_job, cb);
270 struct amd_gpu_scheduler *sched;
6f0e54a9
CK
271
272 sched = sched_job->sched;
f556cb0c 273 amd_sched_fence_signal(sched_job->s_fence);
c746ba22 274 atomic_dec(&sched->hw_rq_count);
f556cb0c 275 fence_put(&sched_job->s_fence->base);
bb977d37 276 sched->ops->process_job(sched, sched_job);
6f0e54a9
CK
277 wake_up_interruptible(&sched->wait_queue);
278}
279
a72ce6f8
JZ
280static int amd_sched_main(void *param)
281{
a72ce6f8 282 struct sched_param sparam = {.sched_priority = 1};
a72ce6f8 283 struct amd_gpu_scheduler *sched = (struct amd_gpu_scheduler *)param;
f85a6dd9 284 int r;
a72ce6f8
JZ
285
286 sched_setscheduler(current, SCHED_FIFO, &sparam);
287
288 while (!kthread_should_stop()) {
f85a6dd9
CK
289 struct amd_sched_entity *c_entity = NULL;
290 struct amd_sched_job *job;
6f0e54a9
CK
291 struct fence *fence;
292
a72ce6f8 293 wait_event_interruptible(sched->wait_queue,
f85a6dd9
CK
294 kthread_should_stop() ||
295 (c_entity = amd_sched_select_context(sched)));
296
297 if (!c_entity)
298 continue;
299
a72ce6f8
JZ
300 r = kfifo_out(&c_entity->job_queue, &job, sizeof(void *));
301 if (r != sizeof(void *))
302 continue;
b034b572
CK
303 atomic_inc(&sched->hw_rq_count);
304
953e8fd4 305 fence = sched->ops->run_job(sched, c_entity, job);
6f0e54a9 306 if (fence) {
953e8fd4 307 r = fence_add_callback(fence, &job->cb,
6f0e54a9
CK
308 amd_sched_process_job);
309 if (r == -ENOENT)
953e8fd4 310 amd_sched_process_job(fence, &job->cb);
6f0e54a9
CK
311 else if (r)
312 DRM_ERROR("fence add callback failed (%d)\n", r);
313 fence_put(fence);
314 }
aef4852e
CK
315
316 if (c_entity->need_wakeup) {
317 c_entity->need_wakeup = false;
318 wake_up(&c_entity->wait_queue);
319 }
320
a72ce6f8
JZ
321 }
322 return 0;
323}
324
a72ce6f8
JZ
325/**
326 * Create a gpu scheduler
327 *
69f7dd65
CK
328 * @ops The backend operations for this scheduler.
329 * @ring The the ring id for the scheduler.
330 * @hw_submissions Number of hw submissions to do.
a72ce6f8 331 *
69f7dd65 332 * Return the pointer to scheduler for success, otherwise return NULL
a72ce6f8 333*/
69f7dd65
CK
334struct amd_gpu_scheduler *amd_sched_create(struct amd_sched_backend_ops *ops,
335 unsigned ring, unsigned hw_submission)
a72ce6f8
JZ
336{
337 struct amd_gpu_scheduler *sched;
4cd7f42c 338 char name[20];
a72ce6f8
JZ
339
340 sched = kzalloc(sizeof(struct amd_gpu_scheduler), GFP_KERNEL);
341 if (!sched)
342 return NULL;
343
a72ce6f8 344 sched->ops = ops;
a72ce6f8 345 sched->ring_id = ring;
4cef9267 346 sched->hw_submission_limit = hw_submission;
a72ce6f8 347 snprintf(name, sizeof(name), "gpu_sched[%d]", ring);
432a4ff8
CK
348 amd_sched_rq_init(&sched->sched_rq);
349 amd_sched_rq_init(&sched->kernel_rq);
a72ce6f8
JZ
350
351 init_waitqueue_head(&sched->wait_queue);
c746ba22 352 atomic_set(&sched->hw_rq_count, 0);
a72ce6f8 353 /* Each scheduler will run on a seperate kernel thread */
f4956598
CK
354 sched->thread = kthread_run(amd_sched_main, sched, name);
355 if (IS_ERR(sched->thread)) {
356 DRM_ERROR("Failed to create scheduler for id %d.\n", ring);
357 kfree(sched);
358 return NULL;
a72ce6f8
JZ
359 }
360
f4956598 361 return sched;
a72ce6f8
JZ
362}
363
364/**
365 * Destroy a gpu scheduler
366 *
367 * @sched The pointer to the scheduler
368 *
369 * return 0 if succeed. -1 if failed.
370 */
371int amd_sched_destroy(struct amd_gpu_scheduler *sched)
372{
373 kthread_stop(sched->thread);
a72ce6f8
JZ
374 kfree(sched);
375 return 0;
376}