]>
Commit | Line | Data |
---|---|---|
a72ce6f8 JZ |
1 | /* |
2 | * Copyright 2015 Advanced Micro Devices, Inc. | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice shall be included in | |
12 | * all copies or substantial portions of the Software. | |
13 | * | |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
20 | * OTHER DEALINGS IN THE SOFTWARE. | |
21 | * | |
22 | * | |
23 | */ | |
24 | #include <linux/kthread.h> | |
25 | #include <linux/wait.h> | |
26 | #include <linux/sched.h> | |
ae7e81c0 | 27 | #include <uapi/linux/sched/types.h> |
a72ce6f8 JZ |
28 | #include <drm/drmP.h> |
29 | #include "gpu_scheduler.h" | |
30 | ||
353da3c5 CZ |
31 | #define CREATE_TRACE_POINTS |
32 | #include "gpu_sched_trace.h" | |
33 | ||
3d651936 | 34 | static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity); |
88079006 | 35 | static void amd_sched_wakeup(struct amd_gpu_scheduler *sched); |
f54d1867 | 36 | static void amd_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb); |
88079006 | 37 | |
a72ce6f8 | 38 | /* Initialize a given run queue struct */ |
432a4ff8 | 39 | static void amd_sched_rq_init(struct amd_sched_rq *rq) |
a72ce6f8 | 40 | { |
2b184d8d | 41 | spin_lock_init(&rq->lock); |
432a4ff8 | 42 | INIT_LIST_HEAD(&rq->entities); |
432a4ff8 | 43 | rq->current_entity = NULL; |
a72ce6f8 JZ |
44 | } |
45 | ||
432a4ff8 CK |
46 | static void amd_sched_rq_add_entity(struct amd_sched_rq *rq, |
47 | struct amd_sched_entity *entity) | |
a72ce6f8 | 48 | { |
e8deea2d CZ |
49 | if (!list_empty(&entity->list)) |
50 | return; | |
2b184d8d | 51 | spin_lock(&rq->lock); |
432a4ff8 | 52 | list_add_tail(&entity->list, &rq->entities); |
2b184d8d | 53 | spin_unlock(&rq->lock); |
a72ce6f8 JZ |
54 | } |
55 | ||
432a4ff8 CK |
56 | static void amd_sched_rq_remove_entity(struct amd_sched_rq *rq, |
57 | struct amd_sched_entity *entity) | |
a72ce6f8 | 58 | { |
e8deea2d CZ |
59 | if (list_empty(&entity->list)) |
60 | return; | |
2b184d8d | 61 | spin_lock(&rq->lock); |
432a4ff8 CK |
62 | list_del_init(&entity->list); |
63 | if (rq->current_entity == entity) | |
64 | rq->current_entity = NULL; | |
2b184d8d | 65 | spin_unlock(&rq->lock); |
a72ce6f8 JZ |
66 | } |
67 | ||
68 | /** | |
3d651936 CK |
69 | * Select an entity which could provide a job to run |
70 | * | |
71 | * @rq The run queue to check. | |
72 | * | |
73 | * Try to find a ready entity, returns NULL if none found. | |
a72ce6f8 | 74 | */ |
3d651936 CK |
75 | static struct amd_sched_entity * |
76 | amd_sched_rq_select_entity(struct amd_sched_rq *rq) | |
a72ce6f8 | 77 | { |
2b184d8d | 78 | struct amd_sched_entity *entity; |
432a4ff8 | 79 | |
2b184d8d CK |
80 | spin_lock(&rq->lock); |
81 | ||
82 | entity = rq->current_entity; | |
432a4ff8 CK |
83 | if (entity) { |
84 | list_for_each_entry_continue(entity, &rq->entities, list) { | |
3d651936 | 85 | if (amd_sched_entity_is_ready(entity)) { |
432a4ff8 | 86 | rq->current_entity = entity; |
2b184d8d | 87 | spin_unlock(&rq->lock); |
3d651936 | 88 | return entity; |
432a4ff8 | 89 | } |
a72ce6f8 | 90 | } |
a72ce6f8 | 91 | } |
a72ce6f8 | 92 | |
432a4ff8 | 93 | list_for_each_entry(entity, &rq->entities, list) { |
a72ce6f8 | 94 | |
3d651936 | 95 | if (amd_sched_entity_is_ready(entity)) { |
432a4ff8 | 96 | rq->current_entity = entity; |
2b184d8d | 97 | spin_unlock(&rq->lock); |
3d651936 | 98 | return entity; |
432a4ff8 | 99 | } |
a72ce6f8 | 100 | |
432a4ff8 CK |
101 | if (entity == rq->current_entity) |
102 | break; | |
103 | } | |
a72ce6f8 | 104 | |
2b184d8d CK |
105 | spin_unlock(&rq->lock); |
106 | ||
432a4ff8 | 107 | return NULL; |
a72ce6f8 JZ |
108 | } |
109 | ||
a72ce6f8 JZ |
110 | /** |
111 | * Init a context entity used by scheduler when submit to HW ring. | |
112 | * | |
113 | * @sched The pointer to the scheduler | |
91404fb2 | 114 | * @entity The pointer to a valid amd_sched_entity |
a72ce6f8 | 115 | * @rq The run queue this entity belongs |
0e89d0c1 | 116 | * @kernel If this is an entity for the kernel |
1333f723 | 117 | * @jobs The max number of jobs in the job queue |
a72ce6f8 JZ |
118 | * |
119 | * return 0 if succeed. negative error code on failure | |
120 | */ | |
91404fb2 | 121 | int amd_sched_entity_init(struct amd_gpu_scheduler *sched, |
6f0e54a9 | 122 | struct amd_sched_entity *entity, |
432a4ff8 | 123 | struct amd_sched_rq *rq, |
6f0e54a9 | 124 | uint32_t jobs) |
a72ce6f8 | 125 | { |
0f75aee7 CK |
126 | int r; |
127 | ||
a72ce6f8 JZ |
128 | if (!(sched && entity && rq)) |
129 | return -EINVAL; | |
130 | ||
91404fb2 | 131 | memset(entity, 0, sizeof(struct amd_sched_entity)); |
0f75aee7 CK |
132 | INIT_LIST_HEAD(&entity->list); |
133 | entity->rq = rq; | |
134 | entity->sched = sched; | |
a72ce6f8 JZ |
135 | |
136 | spin_lock_init(&entity->queue_lock); | |
0f75aee7 CK |
137 | r = kfifo_alloc(&entity->job_queue, jobs * sizeof(void *), GFP_KERNEL); |
138 | if (r) | |
139 | return r; | |
140 | ||
ce882e6d | 141 | atomic_set(&entity->fence_seq, 0); |
f54d1867 | 142 | entity->fence_context = dma_fence_context_alloc(2); |
a72ce6f8 | 143 | |
a72ce6f8 JZ |
144 | return 0; |
145 | } | |
146 | ||
147 | /** | |
148 | * Query if entity is initialized | |
149 | * | |
150 | * @sched Pointer to scheduler instance | |
151 | * @entity The pointer to a valid scheduler entity | |
152 | * | |
153 | * return true if entity is initialized, false otherwise | |
154 | */ | |
d54fdb94 CK |
155 | static bool amd_sched_entity_is_initialized(struct amd_gpu_scheduler *sched, |
156 | struct amd_sched_entity *entity) | |
a72ce6f8 | 157 | { |
0f75aee7 CK |
158 | return entity->sched == sched && |
159 | entity->rq != NULL; | |
a72ce6f8 JZ |
160 | } |
161 | ||
aef4852e CK |
162 | /** |
163 | * Check if entity is idle | |
164 | * | |
165 | * @entity The pointer to a valid scheduler entity | |
166 | * | |
167 | * Return true if entity don't has any unscheduled jobs. | |
168 | */ | |
169 | static bool amd_sched_entity_is_idle(struct amd_sched_entity *entity) | |
a72ce6f8 | 170 | { |
aef4852e CK |
171 | rmb(); |
172 | if (kfifo_is_empty(&entity->job_queue)) | |
a72ce6f8 JZ |
173 | return true; |
174 | ||
175 | return false; | |
176 | } | |
177 | ||
3d651936 CK |
178 | /** |
179 | * Check if entity is ready | |
180 | * | |
181 | * @entity The pointer to a valid scheduler entity | |
182 | * | |
183 | * Return true if entity could provide a job. | |
184 | */ | |
185 | static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity) | |
186 | { | |
187 | if (kfifo_is_empty(&entity->job_queue)) | |
188 | return false; | |
189 | ||
190 | if (ACCESS_ONCE(entity->dependency)) | |
191 | return false; | |
192 | ||
193 | return true; | |
194 | } | |
195 | ||
a72ce6f8 JZ |
196 | /** |
197 | * Destroy a context entity | |
198 | * | |
199 | * @sched Pointer to scheduler instance | |
200 | * @entity The pointer to a valid scheduler entity | |
201 | * | |
062c7fb3 | 202 | * Cleanup and free the allocated resources. |
a72ce6f8 | 203 | */ |
062c7fb3 CK |
204 | void amd_sched_entity_fini(struct amd_gpu_scheduler *sched, |
205 | struct amd_sched_entity *entity) | |
a72ce6f8 | 206 | { |
0f75aee7 | 207 | struct amd_sched_rq *rq = entity->rq; |
6af0883e | 208 | int r; |
a72ce6f8 | 209 | |
d54fdb94 | 210 | if (!amd_sched_entity_is_initialized(sched, entity)) |
062c7fb3 | 211 | return; |
a72ce6f8 JZ |
212 | /** |
213 | * The client will not queue more IBs during this fini, consume existing | |
6af0883e | 214 | * queued IBs or discard them on SIGKILL |
a72ce6f8 | 215 | */ |
6af0883e CK |
216 | if ((current->flags & PF_SIGNALED) && current->exit_code == SIGKILL) |
217 | r = -ERESTARTSYS; | |
218 | else | |
219 | r = wait_event_killable(sched->job_scheduled, | |
220 | amd_sched_entity_is_idle(entity)); | |
432a4ff8 | 221 | amd_sched_rq_remove_entity(rq, entity); |
6af0883e CK |
222 | if (r) { |
223 | struct amd_sched_job *job; | |
224 | ||
225 | /* Park the kernel for a moment to make sure it isn't processing | |
226 | * our enity. | |
227 | */ | |
228 | kthread_park(sched->thread); | |
229 | kthread_unpark(sched->thread); | |
230 | while (kfifo_out(&entity->job_queue, &job, sizeof(job))) | |
231 | sched->ops->free_job(job); | |
232 | ||
233 | } | |
a72ce6f8 | 234 | kfifo_free(&entity->job_queue); |
a72ce6f8 JZ |
235 | } |
236 | ||
f54d1867 | 237 | static void amd_sched_entity_wakeup(struct dma_fence *f, struct dma_fence_cb *cb) |
e61235db CK |
238 | { |
239 | struct amd_sched_entity *entity = | |
240 | container_of(cb, struct amd_sched_entity, cb); | |
241 | entity->dependency = NULL; | |
f54d1867 | 242 | dma_fence_put(f); |
0f75aee7 | 243 | amd_sched_wakeup(entity->sched); |
e61235db CK |
244 | } |
245 | ||
f54d1867 | 246 | static void amd_sched_entity_clear_dep(struct dma_fence *f, struct dma_fence_cb *cb) |
777dbd45 ML |
247 | { |
248 | struct amd_sched_entity *entity = | |
249 | container_of(cb, struct amd_sched_entity, cb); | |
250 | entity->dependency = NULL; | |
f54d1867 | 251 | dma_fence_put(f); |
777dbd45 ML |
252 | } |
253 | ||
30514dec CZ |
254 | bool amd_sched_dependency_optimized(struct dma_fence* fence, |
255 | struct amd_sched_entity *entity) | |
256 | { | |
257 | struct amd_gpu_scheduler *sched = entity->sched; | |
258 | struct amd_sched_fence *s_fence; | |
259 | ||
260 | if (!fence || dma_fence_is_signaled(fence)) | |
261 | return false; | |
262 | if (fence->context == entity->fence_context) | |
263 | return true; | |
264 | s_fence = to_amd_sched_fence(fence); | |
265 | if (s_fence && s_fence->sched == sched) | |
266 | return true; | |
267 | ||
268 | return false; | |
269 | } | |
270 | ||
393a0bd4 CK |
271 | static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity *entity) |
272 | { | |
273 | struct amd_gpu_scheduler *sched = entity->sched; | |
f54d1867 | 274 | struct dma_fence * fence = entity->dependency; |
393a0bd4 CK |
275 | struct amd_sched_fence *s_fence; |
276 | ||
277 | if (fence->context == entity->fence_context) { | |
278 | /* We can ignore fences from ourself */ | |
f54d1867 | 279 | dma_fence_put(entity->dependency); |
393a0bd4 CK |
280 | return false; |
281 | } | |
282 | ||
283 | s_fence = to_amd_sched_fence(fence); | |
284 | if (s_fence && s_fence->sched == sched) { | |
393a0bd4 | 285 | |
6fc13675 CK |
286 | /* |
287 | * Fence is from the same scheduler, only need to wait for | |
288 | * it to be scheduled | |
289 | */ | |
f54d1867 CW |
290 | fence = dma_fence_get(&s_fence->scheduled); |
291 | dma_fence_put(entity->dependency); | |
6fc13675 | 292 | entity->dependency = fence; |
f54d1867 CW |
293 | if (!dma_fence_add_callback(fence, &entity->cb, |
294 | amd_sched_entity_clear_dep)) | |
6fc13675 CK |
295 | return true; |
296 | ||
297 | /* Ignore it when it is already scheduled */ | |
f54d1867 | 298 | dma_fence_put(fence); |
6fc13675 | 299 | return false; |
393a0bd4 CK |
300 | } |
301 | ||
f54d1867 CW |
302 | if (!dma_fence_add_callback(entity->dependency, &entity->cb, |
303 | amd_sched_entity_wakeup)) | |
393a0bd4 CK |
304 | return true; |
305 | ||
f54d1867 | 306 | dma_fence_put(entity->dependency); |
393a0bd4 CK |
307 | return false; |
308 | } | |
309 | ||
69bd5bf1 CK |
310 | static struct amd_sched_job * |
311 | amd_sched_entity_pop_job(struct amd_sched_entity *entity) | |
312 | { | |
0f75aee7 | 313 | struct amd_gpu_scheduler *sched = entity->sched; |
4c7eb91c | 314 | struct amd_sched_job *sched_job; |
69bd5bf1 | 315 | |
4c7eb91c | 316 | if (!kfifo_out_peek(&entity->job_queue, &sched_job, sizeof(sched_job))) |
69bd5bf1 CK |
317 | return NULL; |
318 | ||
393a0bd4 CK |
319 | while ((entity->dependency = sched->ops->dependency(sched_job))) |
320 | if (amd_sched_entity_add_dependency_cb(entity)) | |
e61235db | 321 | return NULL; |
e61235db | 322 | |
4c7eb91c | 323 | return sched_job; |
69bd5bf1 CK |
324 | } |
325 | ||
a72ce6f8 | 326 | /** |
6c859274 | 327 | * Helper to submit a job to the job queue |
a72ce6f8 | 328 | * |
4c7eb91c | 329 | * @sched_job The pointer to job required to submit |
6c859274 CK |
330 | * |
331 | * Returns true if we could submit the job. | |
332 | */ | |
4c7eb91c | 333 | static bool amd_sched_entity_in(struct amd_sched_job *sched_job) |
a72ce6f8 | 334 | { |
786b5219 | 335 | struct amd_gpu_scheduler *sched = sched_job->sched; |
4c7eb91c | 336 | struct amd_sched_entity *entity = sched_job->s_entity; |
6c859274 CK |
337 | bool added, first = false; |
338 | ||
339 | spin_lock(&entity->queue_lock); | |
4c7eb91c JZ |
340 | added = kfifo_in(&entity->job_queue, &sched_job, |
341 | sizeof(sched_job)) == sizeof(sched_job); | |
6c859274 | 342 | |
4c7eb91c | 343 | if (added && kfifo_len(&entity->job_queue) == sizeof(sched_job)) |
6c859274 CK |
344 | first = true; |
345 | ||
346 | spin_unlock(&entity->queue_lock); | |
347 | ||
348 | /* first job wakes up scheduler */ | |
e8deea2d CZ |
349 | if (first) { |
350 | /* Add the entity to the run queue */ | |
351 | amd_sched_rq_add_entity(entity->rq, entity); | |
786b5219 | 352 | amd_sched_wakeup(sched); |
e8deea2d | 353 | } |
6c859274 CK |
354 | return added; |
355 | } | |
356 | ||
0de2479c ML |
357 | /* job_finish is called after hw fence signaled, and |
358 | * the job had already been deleted from ring_mirror_list | |
359 | */ | |
c5f74f78 | 360 | static void amd_sched_job_finish(struct work_struct *work) |
0de2479c | 361 | { |
c5f74f78 CK |
362 | struct amd_sched_job *s_job = container_of(work, struct amd_sched_job, |
363 | finish_work); | |
0de2479c ML |
364 | struct amd_gpu_scheduler *sched = s_job->sched; |
365 | ||
f42d20a9 | 366 | /* remove job from ring_mirror_list */ |
1059e117 | 367 | spin_lock(&sched->job_list_lock); |
f42d20a9 | 368 | list_del_init(&s_job->node); |
0de2479c | 369 | if (sched->timeout != MAX_SCHEDULE_TIMEOUT) { |
c5f74f78 CK |
370 | struct amd_sched_job *next; |
371 | ||
1059e117 | 372 | spin_unlock(&sched->job_list_lock); |
c5f74f78 | 373 | cancel_delayed_work_sync(&s_job->work_tdr); |
1059e117 | 374 | spin_lock(&sched->job_list_lock); |
0de2479c ML |
375 | |
376 | /* queue TDR for next job */ | |
377 | next = list_first_entry_or_null(&sched->ring_mirror_list, | |
378 | struct amd_sched_job, node); | |
379 | ||
c5f74f78 | 380 | if (next) |
0de2479c | 381 | schedule_delayed_work(&next->work_tdr, sched->timeout); |
0de2479c | 382 | } |
1059e117 | 383 | spin_unlock(&sched->job_list_lock); |
c5f74f78 CK |
384 | sched->ops->free_job(s_job); |
385 | } | |
386 | ||
f54d1867 CW |
387 | static void amd_sched_job_finish_cb(struct dma_fence *f, |
388 | struct dma_fence_cb *cb) | |
c5f74f78 CK |
389 | { |
390 | struct amd_sched_job *job = container_of(cb, struct amd_sched_job, | |
391 | finish_cb); | |
392 | schedule_work(&job->finish_work); | |
0de2479c ML |
393 | } |
394 | ||
7392c329 | 395 | static void amd_sched_job_begin(struct amd_sched_job *s_job) |
0de2479c ML |
396 | { |
397 | struct amd_gpu_scheduler *sched = s_job->sched; | |
398 | ||
1059e117 | 399 | spin_lock(&sched->job_list_lock); |
f42d20a9 | 400 | list_add_tail(&s_job->node, &sched->ring_mirror_list); |
0de2479c | 401 | if (sched->timeout != MAX_SCHEDULE_TIMEOUT && |
16a7133f CK |
402 | list_first_entry_or_null(&sched->ring_mirror_list, |
403 | struct amd_sched_job, node) == s_job) | |
0de2479c | 404 | schedule_delayed_work(&s_job->work_tdr, sched->timeout); |
1059e117 | 405 | spin_unlock(&sched->job_list_lock); |
0de2479c ML |
406 | } |
407 | ||
0e51a772 CK |
408 | static void amd_sched_job_timedout(struct work_struct *work) |
409 | { | |
410 | struct amd_sched_job *job = container_of(work, struct amd_sched_job, | |
411 | work_tdr.work); | |
412 | ||
413 | job->sched->ops->timedout_job(job); | |
414 | } | |
415 | ||
e686e75d CZ |
416 | void amd_sched_hw_job_reset(struct amd_gpu_scheduler *sched) |
417 | { | |
418 | struct amd_sched_job *s_job; | |
419 | ||
420 | spin_lock(&sched->job_list_lock); | |
421 | list_for_each_entry_reverse(s_job, &sched->ring_mirror_list, node) { | |
a6bef67e CZ |
422 | if (s_job->s_fence->parent && |
423 | dma_fence_remove_callback(s_job->s_fence->parent, | |
424 | &s_job->s_fence->cb)) { | |
f54d1867 | 425 | dma_fence_put(s_job->s_fence->parent); |
e686e75d | 426 | s_job->s_fence->parent = NULL; |
65781c78 | 427 | atomic_dec(&sched->hw_rq_count); |
e686e75d CZ |
428 | } |
429 | } | |
65781c78 ML |
430 | spin_unlock(&sched->job_list_lock); |
431 | } | |
432 | ||
433 | void amd_sched_job_kickout(struct amd_sched_job *s_job) | |
434 | { | |
435 | struct amd_gpu_scheduler *sched = s_job->sched; | |
436 | ||
437 | spin_lock(&sched->job_list_lock); | |
438 | list_del_init(&s_job->node); | |
e686e75d CZ |
439 | spin_unlock(&sched->job_list_lock); |
440 | } | |
441 | ||
ec75f573 CZ |
442 | void amd_sched_job_recovery(struct amd_gpu_scheduler *sched) |
443 | { | |
1c62cf91 | 444 | struct amd_sched_job *s_job, *tmp; |
ec75f573 CZ |
445 | int r; |
446 | ||
447 | spin_lock(&sched->job_list_lock); | |
448 | s_job = list_first_entry_or_null(&sched->ring_mirror_list, | |
449 | struct amd_sched_job, node); | |
bdf00137 | 450 | if (s_job && sched->timeout != MAX_SCHEDULE_TIMEOUT) |
ec75f573 CZ |
451 | schedule_delayed_work(&s_job->work_tdr, sched->timeout); |
452 | ||
1c62cf91 | 453 | list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) { |
ec75f573 | 454 | struct amd_sched_fence *s_fence = s_job->s_fence; |
f54d1867 | 455 | struct dma_fence *fence; |
bdc2eea4 | 456 | |
1c62cf91 CZ |
457 | spin_unlock(&sched->job_list_lock); |
458 | fence = sched->ops->run_job(s_job); | |
bdc2eea4 | 459 | atomic_inc(&sched->hw_rq_count); |
ec75f573 | 460 | if (fence) { |
f54d1867 CW |
461 | s_fence->parent = dma_fence_get(fence); |
462 | r = dma_fence_add_callback(fence, &s_fence->cb, | |
463 | amd_sched_process_job); | |
ec75f573 CZ |
464 | if (r == -ENOENT) |
465 | amd_sched_process_job(fence, &s_fence->cb); | |
466 | else if (r) | |
467 | DRM_ERROR("fence add callback failed (%d)\n", | |
468 | r); | |
f54d1867 | 469 | dma_fence_put(fence); |
ec75f573 CZ |
470 | } else { |
471 | DRM_ERROR("Failed to run job!\n"); | |
472 | amd_sched_process_job(NULL, &s_fence->cb); | |
473 | } | |
1c62cf91 | 474 | spin_lock(&sched->job_list_lock); |
ec75f573 CZ |
475 | } |
476 | spin_unlock(&sched->job_list_lock); | |
477 | } | |
478 | ||
6c859274 CK |
479 | /** |
480 | * Submit a job to the job queue | |
481 | * | |
4c7eb91c | 482 | * @sched_job The pointer to job required to submit |
6c859274 CK |
483 | * |
484 | * Returns 0 for success, negative error code otherwise. | |
485 | */ | |
e2840221 | 486 | void amd_sched_entity_push_job(struct amd_sched_job *sched_job) |
6c859274 CK |
487 | { |
488 | struct amd_sched_entity *entity = sched_job->s_entity; | |
6c859274 | 489 | |
786b5219 | 490 | trace_amd_sched_job(sched_job); |
f54d1867 CW |
491 | dma_fence_add_callback(&sched_job->s_fence->finished, &sched_job->finish_cb, |
492 | amd_sched_job_finish_cb); | |
0f75aee7 | 493 | wait_event(entity->sched->job_scheduled, |
c9f0fe5e | 494 | amd_sched_entity_in(sched_job)); |
a72ce6f8 JZ |
495 | } |
496 | ||
e686941a ML |
497 | /* init a sched_job with basic field */ |
498 | int amd_sched_job_init(struct amd_sched_job *job, | |
16a7133f CK |
499 | struct amd_gpu_scheduler *sched, |
500 | struct amd_sched_entity *entity, | |
595a9cd6 | 501 | void *owner) |
e686941a ML |
502 | { |
503 | job->sched = sched; | |
504 | job->s_entity = entity; | |
505 | job->s_fence = amd_sched_fence_create(entity, owner); | |
506 | if (!job->s_fence) | |
507 | return -ENOMEM; | |
cb3696fd | 508 | job->id = atomic64_inc_return(&sched->job_id_count); |
e686941a | 509 | |
c5f74f78 CK |
510 | INIT_WORK(&job->finish_work, amd_sched_job_finish); |
511 | INIT_LIST_HEAD(&job->node); | |
0e51a772 | 512 | INIT_DELAYED_WORK(&job->work_tdr, amd_sched_job_timedout); |
4835096b | 513 | |
e686941a ML |
514 | return 0; |
515 | } | |
516 | ||
e688b728 CK |
517 | /** |
518 | * Return ture if we can push more jobs to the hw. | |
519 | */ | |
520 | static bool amd_sched_ready(struct amd_gpu_scheduler *sched) | |
521 | { | |
522 | return atomic_read(&sched->hw_rq_count) < | |
523 | sched->hw_submission_limit; | |
524 | } | |
525 | ||
88079006 CK |
526 | /** |
527 | * Wake up the scheduler when it is ready | |
528 | */ | |
529 | static void amd_sched_wakeup(struct amd_gpu_scheduler *sched) | |
530 | { | |
531 | if (amd_sched_ready(sched)) | |
c2b6bd7e | 532 | wake_up_interruptible(&sched->wake_up_worker); |
88079006 CK |
533 | } |
534 | ||
e688b728 | 535 | /** |
3d651936 | 536 | * Select next entity to process |
e688b728 | 537 | */ |
3d651936 CK |
538 | static struct amd_sched_entity * |
539 | amd_sched_select_entity(struct amd_gpu_scheduler *sched) | |
e688b728 | 540 | { |
3d651936 | 541 | struct amd_sched_entity *entity; |
d033a6de | 542 | int i; |
e688b728 CK |
543 | |
544 | if (!amd_sched_ready(sched)) | |
545 | return NULL; | |
546 | ||
547 | /* Kernel run queue has higher priority than normal run queue*/ | |
153de9df | 548 | for (i = AMD_SCHED_PRIORITY_MAX - 1; i >= AMD_SCHED_PRIORITY_MIN; i--) { |
d033a6de CZ |
549 | entity = amd_sched_rq_select_entity(&sched->sched_rq[i]); |
550 | if (entity) | |
551 | break; | |
552 | } | |
e688b728 | 553 | |
3d651936 | 554 | return entity; |
e688b728 CK |
555 | } |
556 | ||
f54d1867 | 557 | static void amd_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb) |
6f0e54a9 | 558 | { |
258f3f99 CK |
559 | struct amd_sched_fence *s_fence = |
560 | container_of(cb, struct amd_sched_fence, cb); | |
9b398fa5 | 561 | struct amd_gpu_scheduler *sched = s_fence->sched; |
6f0e54a9 | 562 | |
c746ba22 | 563 | atomic_dec(&sched->hw_rq_count); |
6fc13675 | 564 | amd_sched_fence_finished(s_fence); |
cccd9bce | 565 | |
7034decf | 566 | trace_amd_sched_process_job(s_fence); |
f54d1867 | 567 | dma_fence_put(&s_fence->finished); |
c2b6bd7e | 568 | wake_up_interruptible(&sched->wake_up_worker); |
6f0e54a9 CK |
569 | } |
570 | ||
0875dc9e CZ |
571 | static bool amd_sched_blocked(struct amd_gpu_scheduler *sched) |
572 | { | |
573 | if (kthread_should_park()) { | |
574 | kthread_parkme(); | |
575 | return true; | |
576 | } | |
577 | ||
578 | return false; | |
579 | } | |
580 | ||
a72ce6f8 JZ |
581 | static int amd_sched_main(void *param) |
582 | { | |
a72ce6f8 | 583 | struct sched_param sparam = {.sched_priority = 1}; |
a72ce6f8 | 584 | struct amd_gpu_scheduler *sched = (struct amd_gpu_scheduler *)param; |
5134e999 | 585 | int r, count; |
a72ce6f8 JZ |
586 | |
587 | sched_setscheduler(current, SCHED_FIFO, &sparam); | |
588 | ||
589 | while (!kthread_should_stop()) { | |
0875dc9e | 590 | struct amd_sched_entity *entity = NULL; |
258f3f99 | 591 | struct amd_sched_fence *s_fence; |
4c7eb91c | 592 | struct amd_sched_job *sched_job; |
f54d1867 | 593 | struct dma_fence *fence; |
6f0e54a9 | 594 | |
c2b6bd7e | 595 | wait_event_interruptible(sched->wake_up_worker, |
0875dc9e CZ |
596 | (!amd_sched_blocked(sched) && |
597 | (entity = amd_sched_select_entity(sched))) || | |
598 | kthread_should_stop()); | |
f85a6dd9 | 599 | |
3d651936 CK |
600 | if (!entity) |
601 | continue; | |
602 | ||
603 | sched_job = amd_sched_entity_pop_job(entity); | |
4c7eb91c | 604 | if (!sched_job) |
f85a6dd9 CK |
605 | continue; |
606 | ||
4c7eb91c | 607 | s_fence = sched_job->s_fence; |
2440ff2c | 608 | |
b034b572 | 609 | atomic_inc(&sched->hw_rq_count); |
7392c329 | 610 | amd_sched_job_begin(sched_job); |
7392c329 | 611 | |
4c7eb91c | 612 | fence = sched->ops->run_job(sched_job); |
393a0bd4 | 613 | amd_sched_fence_scheduled(s_fence); |
6f0e54a9 | 614 | if (fence) { |
f54d1867 CW |
615 | s_fence->parent = dma_fence_get(fence); |
616 | r = dma_fence_add_callback(fence, &s_fence->cb, | |
617 | amd_sched_process_job); | |
6f0e54a9 | 618 | if (r == -ENOENT) |
258f3f99 | 619 | amd_sched_process_job(fence, &s_fence->cb); |
6f0e54a9 | 620 | else if (r) |
16a7133f CK |
621 | DRM_ERROR("fence add callback failed (%d)\n", |
622 | r); | |
f54d1867 | 623 | dma_fence_put(fence); |
27439fca CK |
624 | } else { |
625 | DRM_ERROR("Failed to run job!\n"); | |
258f3f99 | 626 | amd_sched_process_job(NULL, &s_fence->cb); |
6f0e54a9 | 627 | } |
aef4852e | 628 | |
4c7eb91c JZ |
629 | count = kfifo_out(&entity->job_queue, &sched_job, |
630 | sizeof(sched_job)); | |
631 | WARN_ON(count != sizeof(sched_job)); | |
c2b6bd7e | 632 | wake_up(&sched->job_scheduled); |
a72ce6f8 JZ |
633 | } |
634 | return 0; | |
635 | } | |
636 | ||
a72ce6f8 | 637 | /** |
4f839a24 | 638 | * Init a gpu scheduler instance |
a72ce6f8 | 639 | * |
4f839a24 | 640 | * @sched The pointer to the scheduler |
69f7dd65 | 641 | * @ops The backend operations for this scheduler. |
69f7dd65 | 642 | * @hw_submissions Number of hw submissions to do. |
4f839a24 | 643 | * @name Name used for debugging |
a72ce6f8 | 644 | * |
4f839a24 | 645 | * Return 0 on success, otherwise error code. |
a72ce6f8 | 646 | */ |
4f839a24 | 647 | int amd_sched_init(struct amd_gpu_scheduler *sched, |
62250a91 | 648 | const struct amd_sched_backend_ops *ops, |
2440ff2c | 649 | unsigned hw_submission, long timeout, const char *name) |
a72ce6f8 | 650 | { |
d033a6de | 651 | int i; |
a72ce6f8 | 652 | sched->ops = ops; |
4cef9267 | 653 | sched->hw_submission_limit = hw_submission; |
4f839a24 | 654 | sched->name = name; |
2440ff2c | 655 | sched->timeout = timeout; |
153de9df | 656 | for (i = AMD_SCHED_PRIORITY_MIN; i < AMD_SCHED_PRIORITY_MAX; i++) |
d033a6de | 657 | amd_sched_rq_init(&sched->sched_rq[i]); |
a72ce6f8 | 658 | |
c2b6bd7e CK |
659 | init_waitqueue_head(&sched->wake_up_worker); |
660 | init_waitqueue_head(&sched->job_scheduled); | |
4835096b ML |
661 | INIT_LIST_HEAD(&sched->ring_mirror_list); |
662 | spin_lock_init(&sched->job_list_lock); | |
c746ba22 | 663 | atomic_set(&sched->hw_rq_count, 0); |
93f8b367 | 664 | atomic64_set(&sched->job_id_count, 0); |
4f839a24 | 665 | |
a72ce6f8 | 666 | /* Each scheduler will run on a seperate kernel thread */ |
c14692f0 | 667 | sched->thread = kthread_run(amd_sched_main, sched, sched->name); |
f4956598 | 668 | if (IS_ERR(sched->thread)) { |
4f839a24 CK |
669 | DRM_ERROR("Failed to create scheduler for %s.\n", name); |
670 | return PTR_ERR(sched->thread); | |
a72ce6f8 JZ |
671 | } |
672 | ||
4f839a24 | 673 | return 0; |
a72ce6f8 JZ |
674 | } |
675 | ||
676 | /** | |
677 | * Destroy a gpu scheduler | |
678 | * | |
679 | * @sched The pointer to the scheduler | |
a72ce6f8 | 680 | */ |
4f839a24 | 681 | void amd_sched_fini(struct amd_gpu_scheduler *sched) |
a72ce6f8 | 682 | { |
32544d02 DA |
683 | if (sched->thread) |
684 | kthread_stop(sched->thread); | |
a72ce6f8 | 685 | } |