]>
Commit | Line | Data |
---|---|---|
a72ce6f8 JZ |
1 | /* |
2 | * Copyright 2015 Advanced Micro Devices, Inc. | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice shall be included in | |
12 | * all copies or substantial portions of the Software. | |
13 | * | |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
20 | * OTHER DEALINGS IN THE SOFTWARE. | |
21 | * | |
22 | * | |
23 | */ | |
24 | #include <linux/kthread.h> | |
25 | #include <linux/wait.h> | |
26 | #include <linux/sched.h> | |
27 | #include <drm/drmP.h> | |
28 | #include "gpu_scheduler.h" | |
29 | ||
353da3c5 CZ |
30 | #define CREATE_TRACE_POINTS |
31 | #include "gpu_sched_trace.h" | |
32 | ||
3d651936 | 33 | static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity); |
88079006 CK |
34 | static void amd_sched_wakeup(struct amd_gpu_scheduler *sched); |
35 | ||
f5617f9d CZ |
36 | struct kmem_cache *sched_fence_slab; |
37 | atomic_t sched_fence_slab_ref = ATOMIC_INIT(0); | |
38 | ||
a72ce6f8 | 39 | /* Initialize a given run queue struct */ |
432a4ff8 | 40 | static void amd_sched_rq_init(struct amd_sched_rq *rq) |
a72ce6f8 | 41 | { |
2b184d8d | 42 | spin_lock_init(&rq->lock); |
432a4ff8 | 43 | INIT_LIST_HEAD(&rq->entities); |
432a4ff8 | 44 | rq->current_entity = NULL; |
a72ce6f8 JZ |
45 | } |
46 | ||
432a4ff8 CK |
47 | static void amd_sched_rq_add_entity(struct amd_sched_rq *rq, |
48 | struct amd_sched_entity *entity) | |
a72ce6f8 | 49 | { |
e8deea2d CZ |
50 | if (!list_empty(&entity->list)) |
51 | return; | |
2b184d8d | 52 | spin_lock(&rq->lock); |
432a4ff8 | 53 | list_add_tail(&entity->list, &rq->entities); |
2b184d8d | 54 | spin_unlock(&rq->lock); |
a72ce6f8 JZ |
55 | } |
56 | ||
432a4ff8 CK |
57 | static void amd_sched_rq_remove_entity(struct amd_sched_rq *rq, |
58 | struct amd_sched_entity *entity) | |
a72ce6f8 | 59 | { |
e8deea2d CZ |
60 | if (list_empty(&entity->list)) |
61 | return; | |
2b184d8d | 62 | spin_lock(&rq->lock); |
432a4ff8 CK |
63 | list_del_init(&entity->list); |
64 | if (rq->current_entity == entity) | |
65 | rq->current_entity = NULL; | |
2b184d8d | 66 | spin_unlock(&rq->lock); |
a72ce6f8 JZ |
67 | } |
68 | ||
69 | /** | |
3d651936 CK |
70 | * Select an entity which could provide a job to run |
71 | * | |
72 | * @rq The run queue to check. | |
73 | * | |
74 | * Try to find a ready entity, returns NULL if none found. | |
a72ce6f8 | 75 | */ |
3d651936 CK |
76 | static struct amd_sched_entity * |
77 | amd_sched_rq_select_entity(struct amd_sched_rq *rq) | |
a72ce6f8 | 78 | { |
2b184d8d | 79 | struct amd_sched_entity *entity; |
432a4ff8 | 80 | |
2b184d8d CK |
81 | spin_lock(&rq->lock); |
82 | ||
83 | entity = rq->current_entity; | |
432a4ff8 CK |
84 | if (entity) { |
85 | list_for_each_entry_continue(entity, &rq->entities, list) { | |
3d651936 | 86 | if (amd_sched_entity_is_ready(entity)) { |
432a4ff8 | 87 | rq->current_entity = entity; |
2b184d8d | 88 | spin_unlock(&rq->lock); |
3d651936 | 89 | return entity; |
432a4ff8 | 90 | } |
a72ce6f8 | 91 | } |
a72ce6f8 | 92 | } |
a72ce6f8 | 93 | |
432a4ff8 | 94 | list_for_each_entry(entity, &rq->entities, list) { |
a72ce6f8 | 95 | |
3d651936 | 96 | if (amd_sched_entity_is_ready(entity)) { |
432a4ff8 | 97 | rq->current_entity = entity; |
2b184d8d | 98 | spin_unlock(&rq->lock); |
3d651936 | 99 | return entity; |
432a4ff8 | 100 | } |
a72ce6f8 | 101 | |
432a4ff8 CK |
102 | if (entity == rq->current_entity) |
103 | break; | |
104 | } | |
a72ce6f8 | 105 | |
2b184d8d CK |
106 | spin_unlock(&rq->lock); |
107 | ||
432a4ff8 | 108 | return NULL; |
a72ce6f8 JZ |
109 | } |
110 | ||
a72ce6f8 JZ |
111 | /** |
112 | * Init a context entity used by scheduler when submit to HW ring. | |
113 | * | |
114 | * @sched The pointer to the scheduler | |
91404fb2 | 115 | * @entity The pointer to a valid amd_sched_entity |
a72ce6f8 | 116 | * @rq The run queue this entity belongs |
0e89d0c1 | 117 | * @kernel If this is an entity for the kernel |
1333f723 | 118 | * @jobs The max number of jobs in the job queue |
a72ce6f8 JZ |
119 | * |
120 | * return 0 if succeed. negative error code on failure | |
121 | */ | |
91404fb2 | 122 | int amd_sched_entity_init(struct amd_gpu_scheduler *sched, |
6f0e54a9 | 123 | struct amd_sched_entity *entity, |
432a4ff8 | 124 | struct amd_sched_rq *rq, |
6f0e54a9 | 125 | uint32_t jobs) |
a72ce6f8 | 126 | { |
0f75aee7 CK |
127 | int r; |
128 | ||
a72ce6f8 JZ |
129 | if (!(sched && entity && rq)) |
130 | return -EINVAL; | |
131 | ||
91404fb2 | 132 | memset(entity, 0, sizeof(struct amd_sched_entity)); |
0f75aee7 CK |
133 | INIT_LIST_HEAD(&entity->list); |
134 | entity->rq = rq; | |
135 | entity->sched = sched; | |
a72ce6f8 JZ |
136 | |
137 | spin_lock_init(&entity->queue_lock); | |
0f75aee7 CK |
138 | r = kfifo_alloc(&entity->job_queue, jobs * sizeof(void *), GFP_KERNEL); |
139 | if (r) | |
140 | return r; | |
141 | ||
ce882e6d | 142 | atomic_set(&entity->fence_seq, 0); |
6fc13675 | 143 | entity->fence_context = fence_context_alloc(2); |
a72ce6f8 | 144 | |
a72ce6f8 JZ |
145 | return 0; |
146 | } | |
147 | ||
148 | /** | |
149 | * Query if entity is initialized | |
150 | * | |
151 | * @sched Pointer to scheduler instance | |
152 | * @entity The pointer to a valid scheduler entity | |
153 | * | |
154 | * return true if entity is initialized, false otherwise | |
155 | */ | |
d54fdb94 CK |
156 | static bool amd_sched_entity_is_initialized(struct amd_gpu_scheduler *sched, |
157 | struct amd_sched_entity *entity) | |
a72ce6f8 | 158 | { |
0f75aee7 CK |
159 | return entity->sched == sched && |
160 | entity->rq != NULL; | |
a72ce6f8 JZ |
161 | } |
162 | ||
aef4852e CK |
163 | /** |
164 | * Check if entity is idle | |
165 | * | |
166 | * @entity The pointer to a valid scheduler entity | |
167 | * | |
168 | * Return true if entity don't has any unscheduled jobs. | |
169 | */ | |
170 | static bool amd_sched_entity_is_idle(struct amd_sched_entity *entity) | |
a72ce6f8 | 171 | { |
aef4852e CK |
172 | rmb(); |
173 | if (kfifo_is_empty(&entity->job_queue)) | |
a72ce6f8 JZ |
174 | return true; |
175 | ||
176 | return false; | |
177 | } | |
178 | ||
3d651936 CK |
179 | /** |
180 | * Check if entity is ready | |
181 | * | |
182 | * @entity The pointer to a valid scheduler entity | |
183 | * | |
184 | * Return true if entity could provide a job. | |
185 | */ | |
186 | static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity) | |
187 | { | |
188 | if (kfifo_is_empty(&entity->job_queue)) | |
189 | return false; | |
190 | ||
191 | if (ACCESS_ONCE(entity->dependency)) | |
192 | return false; | |
193 | ||
194 | return true; | |
195 | } | |
196 | ||
a72ce6f8 JZ |
197 | /** |
198 | * Destroy a context entity | |
199 | * | |
200 | * @sched Pointer to scheduler instance | |
201 | * @entity The pointer to a valid scheduler entity | |
202 | * | |
062c7fb3 | 203 | * Cleanup and free the allocated resources. |
a72ce6f8 | 204 | */ |
062c7fb3 CK |
205 | void amd_sched_entity_fini(struct amd_gpu_scheduler *sched, |
206 | struct amd_sched_entity *entity) | |
a72ce6f8 | 207 | { |
0f75aee7 | 208 | struct amd_sched_rq *rq = entity->rq; |
a72ce6f8 | 209 | |
d54fdb94 | 210 | if (!amd_sched_entity_is_initialized(sched, entity)) |
062c7fb3 | 211 | return; |
6c859274 | 212 | |
a72ce6f8 JZ |
213 | /** |
214 | * The client will not queue more IBs during this fini, consume existing | |
215 | * queued IBs | |
216 | */ | |
c2b6bd7e | 217 | wait_event(sched->job_scheduled, amd_sched_entity_is_idle(entity)); |
a72ce6f8 | 218 | |
432a4ff8 | 219 | amd_sched_rq_remove_entity(rq, entity); |
a72ce6f8 | 220 | kfifo_free(&entity->job_queue); |
a72ce6f8 JZ |
221 | } |
222 | ||
e61235db CK |
223 | static void amd_sched_entity_wakeup(struct fence *f, struct fence_cb *cb) |
224 | { | |
225 | struct amd_sched_entity *entity = | |
226 | container_of(cb, struct amd_sched_entity, cb); | |
227 | entity->dependency = NULL; | |
228 | fence_put(f); | |
0f75aee7 | 229 | amd_sched_wakeup(entity->sched); |
e61235db CK |
230 | } |
231 | ||
777dbd45 ML |
232 | static void amd_sched_entity_clear_dep(struct fence *f, struct fence_cb *cb) |
233 | { | |
234 | struct amd_sched_entity *entity = | |
235 | container_of(cb, struct amd_sched_entity, cb); | |
236 | entity->dependency = NULL; | |
237 | fence_put(f); | |
238 | } | |
239 | ||
393a0bd4 CK |
240 | static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity *entity) |
241 | { | |
242 | struct amd_gpu_scheduler *sched = entity->sched; | |
243 | struct fence * fence = entity->dependency; | |
244 | struct amd_sched_fence *s_fence; | |
245 | ||
246 | if (fence->context == entity->fence_context) { | |
247 | /* We can ignore fences from ourself */ | |
248 | fence_put(entity->dependency); | |
249 | return false; | |
250 | } | |
251 | ||
252 | s_fence = to_amd_sched_fence(fence); | |
253 | if (s_fence && s_fence->sched == sched) { | |
393a0bd4 | 254 | |
6fc13675 CK |
255 | /* |
256 | * Fence is from the same scheduler, only need to wait for | |
257 | * it to be scheduled | |
258 | */ | |
259 | fence = fence_get(&s_fence->scheduled); | |
260 | fence_put(entity->dependency); | |
261 | entity->dependency = fence; | |
262 | if (!fence_add_callback(fence, &entity->cb, | |
263 | amd_sched_entity_clear_dep)) | |
264 | return true; | |
265 | ||
266 | /* Ignore it when it is already scheduled */ | |
267 | fence_put(fence); | |
268 | return false; | |
393a0bd4 CK |
269 | } |
270 | ||
271 | if (!fence_add_callback(entity->dependency, &entity->cb, | |
272 | amd_sched_entity_wakeup)) | |
273 | return true; | |
274 | ||
275 | fence_put(entity->dependency); | |
276 | return false; | |
277 | } | |
278 | ||
69bd5bf1 CK |
279 | static struct amd_sched_job * |
280 | amd_sched_entity_pop_job(struct amd_sched_entity *entity) | |
281 | { | |
0f75aee7 | 282 | struct amd_gpu_scheduler *sched = entity->sched; |
4c7eb91c | 283 | struct amd_sched_job *sched_job; |
69bd5bf1 | 284 | |
4c7eb91c | 285 | if (!kfifo_out_peek(&entity->job_queue, &sched_job, sizeof(sched_job))) |
69bd5bf1 CK |
286 | return NULL; |
287 | ||
393a0bd4 CK |
288 | while ((entity->dependency = sched->ops->dependency(sched_job))) |
289 | if (amd_sched_entity_add_dependency_cb(entity)) | |
e61235db | 290 | return NULL; |
e61235db | 291 | |
4c7eb91c | 292 | return sched_job; |
69bd5bf1 CK |
293 | } |
294 | ||
a72ce6f8 | 295 | /** |
6c859274 | 296 | * Helper to submit a job to the job queue |
a72ce6f8 | 297 | * |
4c7eb91c | 298 | * @sched_job The pointer to job required to submit |
6c859274 CK |
299 | * |
300 | * Returns true if we could submit the job. | |
301 | */ | |
4c7eb91c | 302 | static bool amd_sched_entity_in(struct amd_sched_job *sched_job) |
a72ce6f8 | 303 | { |
786b5219 | 304 | struct amd_gpu_scheduler *sched = sched_job->sched; |
4c7eb91c | 305 | struct amd_sched_entity *entity = sched_job->s_entity; |
6c859274 CK |
306 | bool added, first = false; |
307 | ||
308 | spin_lock(&entity->queue_lock); | |
4c7eb91c JZ |
309 | added = kfifo_in(&entity->job_queue, &sched_job, |
310 | sizeof(sched_job)) == sizeof(sched_job); | |
6c859274 | 311 | |
4c7eb91c | 312 | if (added && kfifo_len(&entity->job_queue) == sizeof(sched_job)) |
6c859274 CK |
313 | first = true; |
314 | ||
315 | spin_unlock(&entity->queue_lock); | |
316 | ||
317 | /* first job wakes up scheduler */ | |
e8deea2d CZ |
318 | if (first) { |
319 | /* Add the entity to the run queue */ | |
320 | amd_sched_rq_add_entity(entity->rq, entity); | |
786b5219 | 321 | amd_sched_wakeup(sched); |
e8deea2d | 322 | } |
6c859274 CK |
323 | return added; |
324 | } | |
325 | ||
0de2479c ML |
326 | /* job_finish is called after hw fence signaled, and |
327 | * the job had already been deleted from ring_mirror_list | |
328 | */ | |
c5f74f78 | 329 | static void amd_sched_job_finish(struct work_struct *work) |
0de2479c | 330 | { |
c5f74f78 CK |
331 | struct amd_sched_job *s_job = container_of(work, struct amd_sched_job, |
332 | finish_work); | |
0de2479c ML |
333 | struct amd_gpu_scheduler *sched = s_job->sched; |
334 | ||
f42d20a9 | 335 | /* remove job from ring_mirror_list */ |
1059e117 | 336 | spin_lock(&sched->job_list_lock); |
f42d20a9 | 337 | list_del_init(&s_job->node); |
0de2479c | 338 | if (sched->timeout != MAX_SCHEDULE_TIMEOUT) { |
c5f74f78 CK |
339 | struct amd_sched_job *next; |
340 | ||
1059e117 | 341 | spin_unlock(&sched->job_list_lock); |
c5f74f78 | 342 | cancel_delayed_work_sync(&s_job->work_tdr); |
1059e117 | 343 | spin_lock(&sched->job_list_lock); |
0de2479c ML |
344 | |
345 | /* queue TDR for next job */ | |
346 | next = list_first_entry_or_null(&sched->ring_mirror_list, | |
347 | struct amd_sched_job, node); | |
348 | ||
c5f74f78 | 349 | if (next) |
0de2479c | 350 | schedule_delayed_work(&next->work_tdr, sched->timeout); |
0de2479c | 351 | } |
1059e117 | 352 | spin_unlock(&sched->job_list_lock); |
c5f74f78 CK |
353 | sched->ops->free_job(s_job); |
354 | } | |
355 | ||
356 | static void amd_sched_job_finish_cb(struct fence *f, struct fence_cb *cb) | |
357 | { | |
358 | struct amd_sched_job *job = container_of(cb, struct amd_sched_job, | |
359 | finish_cb); | |
360 | schedule_work(&job->finish_work); | |
0de2479c ML |
361 | } |
362 | ||
7392c329 | 363 | static void amd_sched_job_begin(struct amd_sched_job *s_job) |
0de2479c ML |
364 | { |
365 | struct amd_gpu_scheduler *sched = s_job->sched; | |
366 | ||
1059e117 | 367 | spin_lock(&sched->job_list_lock); |
f42d20a9 | 368 | list_add_tail(&s_job->node, &sched->ring_mirror_list); |
0de2479c | 369 | if (sched->timeout != MAX_SCHEDULE_TIMEOUT && |
16a7133f CK |
370 | list_first_entry_or_null(&sched->ring_mirror_list, |
371 | struct amd_sched_job, node) == s_job) | |
0de2479c | 372 | schedule_delayed_work(&s_job->work_tdr, sched->timeout); |
1059e117 | 373 | spin_unlock(&sched->job_list_lock); |
0de2479c ML |
374 | } |
375 | ||
0e51a772 CK |
376 | static void amd_sched_job_timedout(struct work_struct *work) |
377 | { | |
378 | struct amd_sched_job *job = container_of(work, struct amd_sched_job, | |
379 | work_tdr.work); | |
380 | ||
381 | job->sched->ops->timedout_job(job); | |
382 | } | |
383 | ||
e686e75d CZ |
384 | void amd_sched_hw_job_reset(struct amd_gpu_scheduler *sched) |
385 | { | |
386 | struct amd_sched_job *s_job; | |
387 | ||
388 | spin_lock(&sched->job_list_lock); | |
389 | list_for_each_entry_reverse(s_job, &sched->ring_mirror_list, node) { | |
390 | if (fence_remove_callback(s_job->s_fence->parent, &s_job->s_fence->cb)) { | |
391 | fence_put(s_job->s_fence->parent); | |
392 | s_job->s_fence->parent = NULL; | |
393 | } | |
394 | } | |
395 | spin_unlock(&sched->job_list_lock); | |
396 | } | |
397 | ||
6c859274 CK |
398 | /** |
399 | * Submit a job to the job queue | |
400 | * | |
4c7eb91c | 401 | * @sched_job The pointer to job required to submit |
6c859274 CK |
402 | * |
403 | * Returns 0 for success, negative error code otherwise. | |
404 | */ | |
e2840221 | 405 | void amd_sched_entity_push_job(struct amd_sched_job *sched_job) |
6c859274 CK |
406 | { |
407 | struct amd_sched_entity *entity = sched_job->s_entity; | |
6c859274 | 408 | |
786b5219 | 409 | trace_amd_sched_job(sched_job); |
6fc13675 | 410 | fence_add_callback(&sched_job->s_fence->finished, &sched_job->finish_cb, |
c5f74f78 | 411 | amd_sched_job_finish_cb); |
0f75aee7 | 412 | wait_event(entity->sched->job_scheduled, |
c9f0fe5e | 413 | amd_sched_entity_in(sched_job)); |
a72ce6f8 JZ |
414 | } |
415 | ||
e686941a ML |
416 | /* init a sched_job with basic field */ |
417 | int amd_sched_job_init(struct amd_sched_job *job, | |
16a7133f CK |
418 | struct amd_gpu_scheduler *sched, |
419 | struct amd_sched_entity *entity, | |
595a9cd6 | 420 | void *owner) |
e686941a ML |
421 | { |
422 | job->sched = sched; | |
423 | job->s_entity = entity; | |
424 | job->s_fence = amd_sched_fence_create(entity, owner); | |
425 | if (!job->s_fence) | |
426 | return -ENOMEM; | |
427 | ||
c5f74f78 CK |
428 | INIT_WORK(&job->finish_work, amd_sched_job_finish); |
429 | INIT_LIST_HEAD(&job->node); | |
0e51a772 | 430 | INIT_DELAYED_WORK(&job->work_tdr, amd_sched_job_timedout); |
4835096b | 431 | |
e686941a ML |
432 | return 0; |
433 | } | |
434 | ||
e688b728 CK |
435 | /** |
436 | * Return ture if we can push more jobs to the hw. | |
437 | */ | |
438 | static bool amd_sched_ready(struct amd_gpu_scheduler *sched) | |
439 | { | |
440 | return atomic_read(&sched->hw_rq_count) < | |
441 | sched->hw_submission_limit; | |
442 | } | |
443 | ||
88079006 CK |
444 | /** |
445 | * Wake up the scheduler when it is ready | |
446 | */ | |
447 | static void amd_sched_wakeup(struct amd_gpu_scheduler *sched) | |
448 | { | |
449 | if (amd_sched_ready(sched)) | |
c2b6bd7e | 450 | wake_up_interruptible(&sched->wake_up_worker); |
88079006 CK |
451 | } |
452 | ||
e688b728 | 453 | /** |
3d651936 | 454 | * Select next entity to process |
e688b728 | 455 | */ |
3d651936 CK |
456 | static struct amd_sched_entity * |
457 | amd_sched_select_entity(struct amd_gpu_scheduler *sched) | |
e688b728 | 458 | { |
3d651936 | 459 | struct amd_sched_entity *entity; |
d033a6de | 460 | int i; |
e688b728 CK |
461 | |
462 | if (!amd_sched_ready(sched)) | |
463 | return NULL; | |
464 | ||
465 | /* Kernel run queue has higher priority than normal run queue*/ | |
d033a6de CZ |
466 | for (i = 0; i < AMD_SCHED_MAX_PRIORITY; i++) { |
467 | entity = amd_sched_rq_select_entity(&sched->sched_rq[i]); | |
468 | if (entity) | |
469 | break; | |
470 | } | |
e688b728 | 471 | |
3d651936 | 472 | return entity; |
e688b728 CK |
473 | } |
474 | ||
6f0e54a9 CK |
475 | static void amd_sched_process_job(struct fence *f, struct fence_cb *cb) |
476 | { | |
258f3f99 CK |
477 | struct amd_sched_fence *s_fence = |
478 | container_of(cb, struct amd_sched_fence, cb); | |
9b398fa5 | 479 | struct amd_gpu_scheduler *sched = s_fence->sched; |
6f0e54a9 | 480 | |
c746ba22 | 481 | atomic_dec(&sched->hw_rq_count); |
6fc13675 | 482 | amd_sched_fence_finished(s_fence); |
cccd9bce | 483 | |
7034decf | 484 | trace_amd_sched_process_job(s_fence); |
6fc13675 | 485 | fence_put(&s_fence->finished); |
c2b6bd7e | 486 | wake_up_interruptible(&sched->wake_up_worker); |
6f0e54a9 CK |
487 | } |
488 | ||
0875dc9e CZ |
489 | static bool amd_sched_blocked(struct amd_gpu_scheduler *sched) |
490 | { | |
491 | if (kthread_should_park()) { | |
492 | kthread_parkme(); | |
493 | return true; | |
494 | } | |
495 | ||
496 | return false; | |
497 | } | |
498 | ||
a72ce6f8 JZ |
499 | static int amd_sched_main(void *param) |
500 | { | |
a72ce6f8 | 501 | struct sched_param sparam = {.sched_priority = 1}; |
a72ce6f8 | 502 | struct amd_gpu_scheduler *sched = (struct amd_gpu_scheduler *)param; |
5134e999 | 503 | int r, count; |
a72ce6f8 JZ |
504 | |
505 | sched_setscheduler(current, SCHED_FIFO, &sparam); | |
506 | ||
507 | while (!kthread_should_stop()) { | |
0875dc9e | 508 | struct amd_sched_entity *entity = NULL; |
258f3f99 | 509 | struct amd_sched_fence *s_fence; |
4c7eb91c | 510 | struct amd_sched_job *sched_job; |
6f0e54a9 CK |
511 | struct fence *fence; |
512 | ||
c2b6bd7e | 513 | wait_event_interruptible(sched->wake_up_worker, |
0875dc9e CZ |
514 | (!amd_sched_blocked(sched) && |
515 | (entity = amd_sched_select_entity(sched))) || | |
516 | kthread_should_stop()); | |
f85a6dd9 | 517 | |
3d651936 CK |
518 | if (!entity) |
519 | continue; | |
520 | ||
521 | sched_job = amd_sched_entity_pop_job(entity); | |
4c7eb91c | 522 | if (!sched_job) |
f85a6dd9 CK |
523 | continue; |
524 | ||
4c7eb91c | 525 | s_fence = sched_job->s_fence; |
2440ff2c | 526 | |
b034b572 | 527 | atomic_inc(&sched->hw_rq_count); |
7392c329 | 528 | amd_sched_job_begin(sched_job); |
7392c329 | 529 | |
4c7eb91c | 530 | fence = sched->ops->run_job(sched_job); |
393a0bd4 | 531 | amd_sched_fence_scheduled(s_fence); |
6f0e54a9 | 532 | if (fence) { |
754ce0fa | 533 | s_fence->parent = fence_get(fence); |
258f3f99 | 534 | r = fence_add_callback(fence, &s_fence->cb, |
6f0e54a9 CK |
535 | amd_sched_process_job); |
536 | if (r == -ENOENT) | |
258f3f99 | 537 | amd_sched_process_job(fence, &s_fence->cb); |
6f0e54a9 | 538 | else if (r) |
16a7133f CK |
539 | DRM_ERROR("fence add callback failed (%d)\n", |
540 | r); | |
6f0e54a9 | 541 | fence_put(fence); |
27439fca CK |
542 | } else { |
543 | DRM_ERROR("Failed to run job!\n"); | |
258f3f99 | 544 | amd_sched_process_job(NULL, &s_fence->cb); |
6f0e54a9 | 545 | } |
aef4852e | 546 | |
4c7eb91c JZ |
547 | count = kfifo_out(&entity->job_queue, &sched_job, |
548 | sizeof(sched_job)); | |
549 | WARN_ON(count != sizeof(sched_job)); | |
c2b6bd7e | 550 | wake_up(&sched->job_scheduled); |
a72ce6f8 JZ |
551 | } |
552 | return 0; | |
553 | } | |
554 | ||
a72ce6f8 | 555 | /** |
4f839a24 | 556 | * Init a gpu scheduler instance |
a72ce6f8 | 557 | * |
4f839a24 | 558 | * @sched The pointer to the scheduler |
69f7dd65 | 559 | * @ops The backend operations for this scheduler. |
69f7dd65 | 560 | * @hw_submissions Number of hw submissions to do. |
4f839a24 | 561 | * @name Name used for debugging |
a72ce6f8 | 562 | * |
4f839a24 | 563 | * Return 0 on success, otherwise error code. |
a72ce6f8 | 564 | */ |
4f839a24 | 565 | int amd_sched_init(struct amd_gpu_scheduler *sched, |
62250a91 | 566 | const struct amd_sched_backend_ops *ops, |
2440ff2c | 567 | unsigned hw_submission, long timeout, const char *name) |
a72ce6f8 | 568 | { |
d033a6de | 569 | int i; |
a72ce6f8 | 570 | sched->ops = ops; |
4cef9267 | 571 | sched->hw_submission_limit = hw_submission; |
4f839a24 | 572 | sched->name = name; |
2440ff2c | 573 | sched->timeout = timeout; |
d033a6de CZ |
574 | for (i = 0; i < AMD_SCHED_MAX_PRIORITY; i++) |
575 | amd_sched_rq_init(&sched->sched_rq[i]); | |
a72ce6f8 | 576 | |
c2b6bd7e CK |
577 | init_waitqueue_head(&sched->wake_up_worker); |
578 | init_waitqueue_head(&sched->job_scheduled); | |
4835096b ML |
579 | INIT_LIST_HEAD(&sched->ring_mirror_list); |
580 | spin_lock_init(&sched->job_list_lock); | |
c746ba22 | 581 | atomic_set(&sched->hw_rq_count, 0); |
f5617f9d CZ |
582 | if (atomic_inc_return(&sched_fence_slab_ref) == 1) { |
583 | sched_fence_slab = kmem_cache_create( | |
584 | "amd_sched_fence", sizeof(struct amd_sched_fence), 0, | |
585 | SLAB_HWCACHE_ALIGN, NULL); | |
586 | if (!sched_fence_slab) | |
587 | return -ENOMEM; | |
588 | } | |
4f839a24 | 589 | |
a72ce6f8 | 590 | /* Each scheduler will run on a seperate kernel thread */ |
c14692f0 | 591 | sched->thread = kthread_run(amd_sched_main, sched, sched->name); |
f4956598 | 592 | if (IS_ERR(sched->thread)) { |
4f839a24 CK |
593 | DRM_ERROR("Failed to create scheduler for %s.\n", name); |
594 | return PTR_ERR(sched->thread); | |
a72ce6f8 JZ |
595 | } |
596 | ||
4f839a24 | 597 | return 0; |
a72ce6f8 JZ |
598 | } |
599 | ||
600 | /** | |
601 | * Destroy a gpu scheduler | |
602 | * | |
603 | * @sched The pointer to the scheduler | |
a72ce6f8 | 604 | */ |
4f839a24 | 605 | void amd_sched_fini(struct amd_gpu_scheduler *sched) |
a72ce6f8 | 606 | { |
32544d02 DA |
607 | if (sched->thread) |
608 | kthread_stop(sched->thread); | |
f5617f9d CZ |
609 | if (atomic_dec_and_test(&sched_fence_slab_ref)) |
610 | kmem_cache_destroy(sched_fence_slab); | |
a72ce6f8 | 611 | } |