]>
Commit | Line | Data |
---|---|---|
a72ce6f8 JZ |
1 | /* |
2 | * Copyright 2015 Advanced Micro Devices, Inc. | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice shall be included in | |
12 | * all copies or substantial portions of the Software. | |
13 | * | |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
20 | * OTHER DEALINGS IN THE SOFTWARE. | |
21 | * | |
22 | * | |
23 | */ | |
24 | #include <linux/kthread.h> | |
25 | #include <linux/wait.h> | |
26 | #include <linux/sched.h> | |
27 | #include <drm/drmP.h> | |
28 | #include "gpu_scheduler.h" | |
29 | ||
353da3c5 CZ |
30 | #define CREATE_TRACE_POINTS |
31 | #include "gpu_sched_trace.h" | |
32 | ||
69bd5bf1 CK |
33 | static struct amd_sched_job * |
34 | amd_sched_entity_pop_job(struct amd_sched_entity *entity); | |
88079006 CK |
35 | static void amd_sched_wakeup(struct amd_gpu_scheduler *sched); |
36 | ||
f5617f9d CZ |
37 | struct kmem_cache *sched_fence_slab; |
38 | atomic_t sched_fence_slab_ref = ATOMIC_INIT(0); | |
39 | ||
a72ce6f8 | 40 | /* Initialize a given run queue struct */ |
432a4ff8 | 41 | static void amd_sched_rq_init(struct amd_sched_rq *rq) |
a72ce6f8 | 42 | { |
2b184d8d | 43 | spin_lock_init(&rq->lock); |
432a4ff8 | 44 | INIT_LIST_HEAD(&rq->entities); |
432a4ff8 | 45 | rq->current_entity = NULL; |
a72ce6f8 JZ |
46 | } |
47 | ||
432a4ff8 CK |
48 | static void amd_sched_rq_add_entity(struct amd_sched_rq *rq, |
49 | struct amd_sched_entity *entity) | |
a72ce6f8 | 50 | { |
2b184d8d | 51 | spin_lock(&rq->lock); |
432a4ff8 | 52 | list_add_tail(&entity->list, &rq->entities); |
2b184d8d | 53 | spin_unlock(&rq->lock); |
a72ce6f8 JZ |
54 | } |
55 | ||
432a4ff8 CK |
56 | static void amd_sched_rq_remove_entity(struct amd_sched_rq *rq, |
57 | struct amd_sched_entity *entity) | |
a72ce6f8 | 58 | { |
2b184d8d | 59 | spin_lock(&rq->lock); |
432a4ff8 CK |
60 | list_del_init(&entity->list); |
61 | if (rq->current_entity == entity) | |
62 | rq->current_entity = NULL; | |
2b184d8d | 63 | spin_unlock(&rq->lock); |
a72ce6f8 JZ |
64 | } |
65 | ||
66 | /** | |
69bd5bf1 CK |
67 | * Select next job from a specified run queue with round robin policy. |
68 | * Return NULL if nothing available. | |
a72ce6f8 | 69 | */ |
69bd5bf1 CK |
70 | static struct amd_sched_job * |
71 | amd_sched_rq_select_job(struct amd_sched_rq *rq) | |
a72ce6f8 | 72 | { |
2b184d8d | 73 | struct amd_sched_entity *entity; |
4c7eb91c | 74 | struct amd_sched_job *sched_job; |
432a4ff8 | 75 | |
2b184d8d CK |
76 | spin_lock(&rq->lock); |
77 | ||
78 | entity = rq->current_entity; | |
432a4ff8 CK |
79 | if (entity) { |
80 | list_for_each_entry_continue(entity, &rq->entities, list) { | |
4c7eb91c JZ |
81 | sched_job = amd_sched_entity_pop_job(entity); |
82 | if (sched_job) { | |
432a4ff8 | 83 | rq->current_entity = entity; |
2b184d8d | 84 | spin_unlock(&rq->lock); |
4c7eb91c | 85 | return sched_job; |
432a4ff8 | 86 | } |
a72ce6f8 | 87 | } |
a72ce6f8 | 88 | } |
a72ce6f8 | 89 | |
432a4ff8 | 90 | list_for_each_entry(entity, &rq->entities, list) { |
a72ce6f8 | 91 | |
4c7eb91c JZ |
92 | sched_job = amd_sched_entity_pop_job(entity); |
93 | if (sched_job) { | |
432a4ff8 | 94 | rq->current_entity = entity; |
2b184d8d | 95 | spin_unlock(&rq->lock); |
4c7eb91c | 96 | return sched_job; |
432a4ff8 | 97 | } |
a72ce6f8 | 98 | |
432a4ff8 CK |
99 | if (entity == rq->current_entity) |
100 | break; | |
101 | } | |
a72ce6f8 | 102 | |
2b184d8d CK |
103 | spin_unlock(&rq->lock); |
104 | ||
432a4ff8 | 105 | return NULL; |
a72ce6f8 JZ |
106 | } |
107 | ||
a72ce6f8 JZ |
108 | /** |
109 | * Init a context entity used by scheduler when submit to HW ring. | |
110 | * | |
111 | * @sched The pointer to the scheduler | |
91404fb2 | 112 | * @entity The pointer to a valid amd_sched_entity |
a72ce6f8 | 113 | * @rq The run queue this entity belongs |
0e89d0c1 | 114 | * @kernel If this is an entity for the kernel |
1333f723 | 115 | * @jobs The max number of jobs in the job queue |
a72ce6f8 JZ |
116 | * |
117 | * return 0 if succeed. negative error code on failure | |
118 | */ | |
91404fb2 | 119 | int amd_sched_entity_init(struct amd_gpu_scheduler *sched, |
6f0e54a9 | 120 | struct amd_sched_entity *entity, |
432a4ff8 | 121 | struct amd_sched_rq *rq, |
6f0e54a9 | 122 | uint32_t jobs) |
a72ce6f8 | 123 | { |
0f75aee7 CK |
124 | int r; |
125 | ||
a72ce6f8 JZ |
126 | if (!(sched && entity && rq)) |
127 | return -EINVAL; | |
128 | ||
91404fb2 | 129 | memset(entity, 0, sizeof(struct amd_sched_entity)); |
0f75aee7 CK |
130 | INIT_LIST_HEAD(&entity->list); |
131 | entity->rq = rq; | |
132 | entity->sched = sched; | |
a72ce6f8 JZ |
133 | |
134 | spin_lock_init(&entity->queue_lock); | |
0f75aee7 CK |
135 | r = kfifo_alloc(&entity->job_queue, jobs * sizeof(void *), GFP_KERNEL); |
136 | if (r) | |
137 | return r; | |
138 | ||
ce882e6d | 139 | atomic_set(&entity->fence_seq, 0); |
0f75aee7 | 140 | entity->fence_context = fence_context_alloc(1); |
a72ce6f8 JZ |
141 | |
142 | /* Add the entity to the run queue */ | |
432a4ff8 | 143 | amd_sched_rq_add_entity(rq, entity); |
0f75aee7 | 144 | |
a72ce6f8 JZ |
145 | return 0; |
146 | } | |
147 | ||
148 | /** | |
149 | * Query if entity is initialized | |
150 | * | |
151 | * @sched Pointer to scheduler instance | |
152 | * @entity The pointer to a valid scheduler entity | |
153 | * | |
154 | * return true if entity is initialized, false otherwise | |
155 | */ | |
d54fdb94 CK |
156 | static bool amd_sched_entity_is_initialized(struct amd_gpu_scheduler *sched, |
157 | struct amd_sched_entity *entity) | |
a72ce6f8 | 158 | { |
0f75aee7 CK |
159 | return entity->sched == sched && |
160 | entity->rq != NULL; | |
a72ce6f8 JZ |
161 | } |
162 | ||
aef4852e CK |
163 | /** |
164 | * Check if entity is idle | |
165 | * | |
166 | * @entity The pointer to a valid scheduler entity | |
167 | * | |
168 | * Return true if entity don't has any unscheduled jobs. | |
169 | */ | |
170 | static bool amd_sched_entity_is_idle(struct amd_sched_entity *entity) | |
a72ce6f8 | 171 | { |
aef4852e CK |
172 | rmb(); |
173 | if (kfifo_is_empty(&entity->job_queue)) | |
a72ce6f8 JZ |
174 | return true; |
175 | ||
176 | return false; | |
177 | } | |
178 | ||
179 | /** | |
180 | * Destroy a context entity | |
181 | * | |
182 | * @sched Pointer to scheduler instance | |
183 | * @entity The pointer to a valid scheduler entity | |
184 | * | |
062c7fb3 | 185 | * Cleanup and free the allocated resources. |
a72ce6f8 | 186 | */ |
062c7fb3 CK |
187 | void amd_sched_entity_fini(struct amd_gpu_scheduler *sched, |
188 | struct amd_sched_entity *entity) | |
a72ce6f8 | 189 | { |
0f75aee7 | 190 | struct amd_sched_rq *rq = entity->rq; |
a72ce6f8 | 191 | |
d54fdb94 | 192 | if (!amd_sched_entity_is_initialized(sched, entity)) |
062c7fb3 | 193 | return; |
6c859274 | 194 | |
a72ce6f8 JZ |
195 | /** |
196 | * The client will not queue more IBs during this fini, consume existing | |
197 | * queued IBs | |
198 | */ | |
c2b6bd7e | 199 | wait_event(sched->job_scheduled, amd_sched_entity_is_idle(entity)); |
a72ce6f8 | 200 | |
432a4ff8 | 201 | amd_sched_rq_remove_entity(rq, entity); |
a72ce6f8 | 202 | kfifo_free(&entity->job_queue); |
a72ce6f8 JZ |
203 | } |
204 | ||
e61235db CK |
205 | static void amd_sched_entity_wakeup(struct fence *f, struct fence_cb *cb) |
206 | { | |
207 | struct amd_sched_entity *entity = | |
208 | container_of(cb, struct amd_sched_entity, cb); | |
209 | entity->dependency = NULL; | |
210 | fence_put(f); | |
0f75aee7 | 211 | amd_sched_wakeup(entity->sched); |
e61235db CK |
212 | } |
213 | ||
69bd5bf1 CK |
214 | static struct amd_sched_job * |
215 | amd_sched_entity_pop_job(struct amd_sched_entity *entity) | |
216 | { | |
0f75aee7 | 217 | struct amd_gpu_scheduler *sched = entity->sched; |
4c7eb91c | 218 | struct amd_sched_job *sched_job; |
69bd5bf1 | 219 | |
e61235db CK |
220 | if (ACCESS_ONCE(entity->dependency)) |
221 | return NULL; | |
222 | ||
4c7eb91c | 223 | if (!kfifo_out_peek(&entity->job_queue, &sched_job, sizeof(sched_job))) |
69bd5bf1 CK |
224 | return NULL; |
225 | ||
4c7eb91c | 226 | while ((entity->dependency = sched->ops->dependency(sched_job))) { |
e61235db | 227 | |
fe537d00 CK |
228 | if (entity->dependency->context == entity->fence_context) { |
229 | /* We can ignore fences from ourself */ | |
230 | fence_put(entity->dependency); | |
231 | continue; | |
232 | } | |
233 | ||
e61235db CK |
234 | if (fence_add_callback(entity->dependency, &entity->cb, |
235 | amd_sched_entity_wakeup)) | |
236 | fence_put(entity->dependency); | |
237 | else | |
238 | return NULL; | |
239 | } | |
240 | ||
4c7eb91c | 241 | return sched_job; |
69bd5bf1 CK |
242 | } |
243 | ||
a72ce6f8 | 244 | /** |
6c859274 | 245 | * Helper to submit a job to the job queue |
a72ce6f8 | 246 | * |
4c7eb91c | 247 | * @sched_job The pointer to job required to submit |
6c859274 CK |
248 | * |
249 | * Returns true if we could submit the job. | |
250 | */ | |
4c7eb91c | 251 | static bool amd_sched_entity_in(struct amd_sched_job *sched_job) |
a72ce6f8 | 252 | { |
4c7eb91c | 253 | struct amd_sched_entity *entity = sched_job->s_entity; |
6c859274 CK |
254 | bool added, first = false; |
255 | ||
256 | spin_lock(&entity->queue_lock); | |
4c7eb91c JZ |
257 | added = kfifo_in(&entity->job_queue, &sched_job, |
258 | sizeof(sched_job)) == sizeof(sched_job); | |
6c859274 | 259 | |
4c7eb91c | 260 | if (added && kfifo_len(&entity->job_queue) == sizeof(sched_job)) |
6c859274 CK |
261 | first = true; |
262 | ||
263 | spin_unlock(&entity->queue_lock); | |
264 | ||
265 | /* first job wakes up scheduler */ | |
266 | if (first) | |
4c7eb91c | 267 | amd_sched_wakeup(sched_job->sched); |
6c859274 CK |
268 | |
269 | return added; | |
270 | } | |
271 | ||
272 | /** | |
273 | * Submit a job to the job queue | |
274 | * | |
4c7eb91c | 275 | * @sched_job The pointer to job required to submit |
6c859274 CK |
276 | * |
277 | * Returns 0 for success, negative error code otherwise. | |
278 | */ | |
279 | int amd_sched_entity_push_job(struct amd_sched_job *sched_job) | |
280 | { | |
281 | struct amd_sched_entity *entity = sched_job->s_entity; | |
84f76ea6 CZ |
282 | struct amd_sched_fence *fence = amd_sched_fence_create( |
283 | entity, sched_job->owner); | |
6c859274 | 284 | |
f556cb0c | 285 | if (!fence) |
6c859274 CK |
286 | return -ENOMEM; |
287 | ||
bb977d37 | 288 | sched_job->s_fence = fence; |
6c859274 | 289 | |
0f75aee7 | 290 | wait_event(entity->sched->job_scheduled, |
c9f0fe5e | 291 | amd_sched_entity_in(sched_job)); |
353da3c5 | 292 | trace_amd_sched_job(sched_job); |
c9f0fe5e | 293 | return 0; |
a72ce6f8 JZ |
294 | } |
295 | ||
e688b728 CK |
296 | /** |
297 | * Return ture if we can push more jobs to the hw. | |
298 | */ | |
299 | static bool amd_sched_ready(struct amd_gpu_scheduler *sched) | |
300 | { | |
301 | return atomic_read(&sched->hw_rq_count) < | |
302 | sched->hw_submission_limit; | |
303 | } | |
304 | ||
88079006 CK |
305 | /** |
306 | * Wake up the scheduler when it is ready | |
307 | */ | |
308 | static void amd_sched_wakeup(struct amd_gpu_scheduler *sched) | |
309 | { | |
310 | if (amd_sched_ready(sched)) | |
c2b6bd7e | 311 | wake_up_interruptible(&sched->wake_up_worker); |
88079006 CK |
312 | } |
313 | ||
e688b728 | 314 | /** |
69bd5bf1 | 315 | * Select next to run |
e688b728 | 316 | */ |
69bd5bf1 CK |
317 | static struct amd_sched_job * |
318 | amd_sched_select_job(struct amd_gpu_scheduler *sched) | |
e688b728 | 319 | { |
4c7eb91c | 320 | struct amd_sched_job *sched_job; |
e688b728 CK |
321 | |
322 | if (!amd_sched_ready(sched)) | |
323 | return NULL; | |
324 | ||
325 | /* Kernel run queue has higher priority than normal run queue*/ | |
4c7eb91c JZ |
326 | sched_job = amd_sched_rq_select_job(&sched->kernel_rq); |
327 | if (sched_job == NULL) | |
328 | sched_job = amd_sched_rq_select_job(&sched->sched_rq); | |
e688b728 | 329 | |
4c7eb91c | 330 | return sched_job; |
e688b728 CK |
331 | } |
332 | ||
6f0e54a9 CK |
333 | static void amd_sched_process_job(struct fence *f, struct fence_cb *cb) |
334 | { | |
258f3f99 CK |
335 | struct amd_sched_fence *s_fence = |
336 | container_of(cb, struct amd_sched_fence, cb); | |
9b398fa5 | 337 | struct amd_gpu_scheduler *sched = s_fence->sched; |
2440ff2c | 338 | unsigned long flags; |
6f0e54a9 | 339 | |
c746ba22 | 340 | atomic_dec(&sched->hw_rq_count); |
258f3f99 | 341 | amd_sched_fence_signal(s_fence); |
2440ff2c | 342 | if (sched->timeout != MAX_SCHEDULE_TIMEOUT) { |
424839a6 | 343 | cancel_delayed_work(&s_fence->dwork); |
2440ff2c JZ |
344 | spin_lock_irqsave(&sched->fence_list_lock, flags); |
345 | list_del_init(&s_fence->list); | |
346 | spin_unlock_irqrestore(&sched->fence_list_lock, flags); | |
347 | } | |
7034decf | 348 | trace_amd_sched_process_job(s_fence); |
258f3f99 | 349 | fence_put(&s_fence->base); |
c2b6bd7e | 350 | wake_up_interruptible(&sched->wake_up_worker); |
6f0e54a9 CK |
351 | } |
352 | ||
2440ff2c JZ |
353 | static void amd_sched_fence_work_func(struct work_struct *work) |
354 | { | |
355 | struct amd_sched_fence *s_fence = | |
356 | container_of(work, struct amd_sched_fence, dwork.work); | |
357 | struct amd_gpu_scheduler *sched = s_fence->sched; | |
358 | struct amd_sched_fence *entity, *tmp; | |
359 | unsigned long flags; | |
360 | ||
361 | DRM_ERROR("[%s] scheduler is timeout!\n", sched->name); | |
362 | ||
363 | /* Clean all pending fences */ | |
2fcef6ec | 364 | spin_lock_irqsave(&sched->fence_list_lock, flags); |
2440ff2c JZ |
365 | list_for_each_entry_safe(entity, tmp, &sched->fence_list, list) { |
366 | DRM_ERROR(" fence no %d\n", entity->base.seqno); | |
2fcef6ec | 367 | cancel_delayed_work(&entity->dwork); |
2440ff2c | 368 | list_del_init(&entity->list); |
2440ff2c JZ |
369 | fence_put(&entity->base); |
370 | } | |
2fcef6ec | 371 | spin_unlock_irqrestore(&sched->fence_list_lock, flags); |
2440ff2c JZ |
372 | } |
373 | ||
a72ce6f8 JZ |
374 | static int amd_sched_main(void *param) |
375 | { | |
a72ce6f8 | 376 | struct sched_param sparam = {.sched_priority = 1}; |
a72ce6f8 | 377 | struct amd_gpu_scheduler *sched = (struct amd_gpu_scheduler *)param; |
5134e999 | 378 | int r, count; |
a72ce6f8 | 379 | |
2440ff2c JZ |
380 | spin_lock_init(&sched->fence_list_lock); |
381 | INIT_LIST_HEAD(&sched->fence_list); | |
a72ce6f8 JZ |
382 | sched_setscheduler(current, SCHED_FIFO, &sparam); |
383 | ||
384 | while (!kthread_should_stop()) { | |
69bd5bf1 | 385 | struct amd_sched_entity *entity; |
258f3f99 | 386 | struct amd_sched_fence *s_fence; |
4c7eb91c | 387 | struct amd_sched_job *sched_job; |
6f0e54a9 | 388 | struct fence *fence; |
2440ff2c | 389 | unsigned long flags; |
6f0e54a9 | 390 | |
c2b6bd7e | 391 | wait_event_interruptible(sched->wake_up_worker, |
f85a6dd9 | 392 | kthread_should_stop() || |
4c7eb91c | 393 | (sched_job = amd_sched_select_job(sched))); |
f85a6dd9 | 394 | |
4c7eb91c | 395 | if (!sched_job) |
f85a6dd9 CK |
396 | continue; |
397 | ||
4c7eb91c JZ |
398 | entity = sched_job->s_entity; |
399 | s_fence = sched_job->s_fence; | |
2440ff2c JZ |
400 | |
401 | if (sched->timeout != MAX_SCHEDULE_TIMEOUT) { | |
402 | INIT_DELAYED_WORK(&s_fence->dwork, amd_sched_fence_work_func); | |
403 | schedule_delayed_work(&s_fence->dwork, sched->timeout); | |
404 | spin_lock_irqsave(&sched->fence_list_lock, flags); | |
405 | list_add_tail(&s_fence->list, &sched->fence_list); | |
406 | spin_unlock_irqrestore(&sched->fence_list_lock, flags); | |
407 | } | |
408 | ||
b034b572 | 409 | atomic_inc(&sched->hw_rq_count); |
4c7eb91c | 410 | fence = sched->ops->run_job(sched_job); |
6f0e54a9 | 411 | if (fence) { |
258f3f99 | 412 | r = fence_add_callback(fence, &s_fence->cb, |
6f0e54a9 CK |
413 | amd_sched_process_job); |
414 | if (r == -ENOENT) | |
258f3f99 | 415 | amd_sched_process_job(fence, &s_fence->cb); |
6f0e54a9 CK |
416 | else if (r) |
417 | DRM_ERROR("fence add callback failed (%d)\n", r); | |
418 | fence_put(fence); | |
27439fca CK |
419 | } else { |
420 | DRM_ERROR("Failed to run job!\n"); | |
258f3f99 | 421 | amd_sched_process_job(NULL, &s_fence->cb); |
6f0e54a9 | 422 | } |
aef4852e | 423 | |
4c7eb91c JZ |
424 | count = kfifo_out(&entity->job_queue, &sched_job, |
425 | sizeof(sched_job)); | |
426 | WARN_ON(count != sizeof(sched_job)); | |
c2b6bd7e | 427 | wake_up(&sched->job_scheduled); |
a72ce6f8 JZ |
428 | } |
429 | return 0; | |
430 | } | |
431 | ||
a72ce6f8 | 432 | /** |
4f839a24 | 433 | * Init a gpu scheduler instance |
a72ce6f8 | 434 | * |
4f839a24 | 435 | * @sched The pointer to the scheduler |
69f7dd65 | 436 | * @ops The backend operations for this scheduler. |
69f7dd65 | 437 | * @hw_submissions Number of hw submissions to do. |
4f839a24 | 438 | * @name Name used for debugging |
a72ce6f8 | 439 | * |
4f839a24 | 440 | * Return 0 on success, otherwise error code. |
a72ce6f8 | 441 | */ |
4f839a24 CK |
442 | int amd_sched_init(struct amd_gpu_scheduler *sched, |
443 | struct amd_sched_backend_ops *ops, | |
2440ff2c | 444 | unsigned hw_submission, long timeout, const char *name) |
a72ce6f8 | 445 | { |
a72ce6f8 | 446 | sched->ops = ops; |
4cef9267 | 447 | sched->hw_submission_limit = hw_submission; |
4f839a24 | 448 | sched->name = name; |
2440ff2c | 449 | sched->timeout = timeout; |
432a4ff8 CK |
450 | amd_sched_rq_init(&sched->sched_rq); |
451 | amd_sched_rq_init(&sched->kernel_rq); | |
a72ce6f8 | 452 | |
c2b6bd7e CK |
453 | init_waitqueue_head(&sched->wake_up_worker); |
454 | init_waitqueue_head(&sched->job_scheduled); | |
c746ba22 | 455 | atomic_set(&sched->hw_rq_count, 0); |
f5617f9d CZ |
456 | if (atomic_inc_return(&sched_fence_slab_ref) == 1) { |
457 | sched_fence_slab = kmem_cache_create( | |
458 | "amd_sched_fence", sizeof(struct amd_sched_fence), 0, | |
459 | SLAB_HWCACHE_ALIGN, NULL); | |
460 | if (!sched_fence_slab) | |
461 | return -ENOMEM; | |
462 | } | |
4f839a24 | 463 | |
a72ce6f8 | 464 | /* Each scheduler will run on a seperate kernel thread */ |
c14692f0 | 465 | sched->thread = kthread_run(amd_sched_main, sched, sched->name); |
f4956598 | 466 | if (IS_ERR(sched->thread)) { |
4f839a24 CK |
467 | DRM_ERROR("Failed to create scheduler for %s.\n", name); |
468 | return PTR_ERR(sched->thread); | |
a72ce6f8 JZ |
469 | } |
470 | ||
4f839a24 | 471 | return 0; |
a72ce6f8 JZ |
472 | } |
473 | ||
474 | /** | |
475 | * Destroy a gpu scheduler | |
476 | * | |
477 | * @sched The pointer to the scheduler | |
a72ce6f8 | 478 | */ |
4f839a24 | 479 | void amd_sched_fini(struct amd_gpu_scheduler *sched) |
a72ce6f8 | 480 | { |
32544d02 DA |
481 | if (sched->thread) |
482 | kthread_stop(sched->thread); | |
f5617f9d CZ |
483 | if (atomic_dec_and_test(&sched_fence_slab_ref)) |
484 | kmem_cache_destroy(sched_fence_slab); | |
a72ce6f8 | 485 | } |