]>
Commit | Line | Data |
---|---|---|
d38ceaf9 AD |
1 | /* |
2 | * Copyright 2009 Jerome Glisse. | |
3 | * All Rights Reserved. | |
4 | * | |
5 | * Permission is hereby granted, free of charge, to any person obtaining a | |
6 | * copy of this software and associated documentation files (the | |
7 | * "Software"), to deal in the Software without restriction, including | |
8 | * without limitation the rights to use, copy, modify, merge, publish, | |
9 | * distribute, sub license, and/or sell copies of the Software, and to | |
10 | * permit persons to whom the Software is furnished to do so, subject to | |
11 | * the following conditions: | |
12 | * | |
13 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
14 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
15 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | |
16 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | |
17 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | |
18 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | |
19 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | |
20 | * | |
21 | * The above copyright notice and this permission notice (including the | |
22 | * next paragraph) shall be included in all copies or substantial portions | |
23 | * of the Software. | |
24 | * | |
25 | */ | |
26 | /* | |
27 | * Authors: | |
28 | * Jerome Glisse <glisse@freedesktop.org> | |
29 | * Dave Airlie | |
30 | */ | |
31 | #include <linux/seq_file.h> | |
32 | #include <linux/atomic.h> | |
33 | #include <linux/wait.h> | |
34 | #include <linux/kref.h> | |
35 | #include <linux/slab.h> | |
36 | #include <linux/firmware.h> | |
37 | #include <drm/drmP.h> | |
38 | #include "amdgpu.h" | |
39 | #include "amdgpu_trace.h" | |
40 | ||
41 | /* | |
42 | * Fences | |
43 | * Fences mark an event in the GPUs pipeline and are used | |
44 | * for GPU/CPU synchronization. When the fence is written, | |
45 | * it is expected that all buffers associated with that fence | |
46 | * are no longer in use by the associated ring on the GPU and | |
47 | * that the the relevant GPU caches have been flushed. | |
48 | */ | |
49 | ||
22e5a2f4 CK |
50 | struct amdgpu_fence { |
51 | struct fence base; | |
52 | ||
53 | /* RB, DMA, etc. */ | |
54 | struct amdgpu_ring *ring; | |
22e5a2f4 CK |
55 | }; |
56 | ||
b49c84a5 | 57 | static struct kmem_cache *amdgpu_fence_slab; |
b49c84a5 | 58 | |
d573de2d RZ |
59 | int amdgpu_fence_slab_init(void) |
60 | { | |
61 | amdgpu_fence_slab = kmem_cache_create( | |
62 | "amdgpu_fence", sizeof(struct amdgpu_fence), 0, | |
63 | SLAB_HWCACHE_ALIGN, NULL); | |
64 | if (!amdgpu_fence_slab) | |
65 | return -ENOMEM; | |
66 | return 0; | |
67 | } | |
68 | ||
69 | void amdgpu_fence_slab_fini(void) | |
70 | { | |
71 | kmem_cache_destroy(amdgpu_fence_slab); | |
72 | } | |
22e5a2f4 CK |
73 | /* |
74 | * Cast helper | |
75 | */ | |
76 | static const struct fence_ops amdgpu_fence_ops; | |
77 | static inline struct amdgpu_fence *to_amdgpu_fence(struct fence *f) | |
78 | { | |
79 | struct amdgpu_fence *__f = container_of(f, struct amdgpu_fence, base); | |
80 | ||
81 | if (__f->base.ops == &amdgpu_fence_ops) | |
82 | return __f; | |
83 | ||
84 | return NULL; | |
85 | } | |
86 | ||
d38ceaf9 AD |
87 | /** |
88 | * amdgpu_fence_write - write a fence value | |
89 | * | |
90 | * @ring: ring the fence is associated with | |
91 | * @seq: sequence number to write | |
92 | * | |
93 | * Writes a fence value to memory (all asics). | |
94 | */ | |
95 | static void amdgpu_fence_write(struct amdgpu_ring *ring, u32 seq) | |
96 | { | |
97 | struct amdgpu_fence_driver *drv = &ring->fence_drv; | |
98 | ||
99 | if (drv->cpu_addr) | |
100 | *drv->cpu_addr = cpu_to_le32(seq); | |
101 | } | |
102 | ||
103 | /** | |
104 | * amdgpu_fence_read - read a fence value | |
105 | * | |
106 | * @ring: ring the fence is associated with | |
107 | * | |
108 | * Reads a fence value from memory (all asics). | |
109 | * Returns the value of the fence read from memory. | |
110 | */ | |
111 | static u32 amdgpu_fence_read(struct amdgpu_ring *ring) | |
112 | { | |
113 | struct amdgpu_fence_driver *drv = &ring->fence_drv; | |
114 | u32 seq = 0; | |
115 | ||
116 | if (drv->cpu_addr) | |
117 | seq = le32_to_cpu(*drv->cpu_addr); | |
118 | else | |
742c085f | 119 | seq = atomic_read(&drv->last_seq); |
d38ceaf9 AD |
120 | |
121 | return seq; | |
122 | } | |
123 | ||
d38ceaf9 AD |
124 | /** |
125 | * amdgpu_fence_emit - emit a fence on the requested ring | |
126 | * | |
127 | * @ring: ring the fence is associated with | |
364beb2c | 128 | * @f: resulting fence object |
d38ceaf9 AD |
129 | * |
130 | * Emits a fence command on the requested ring (all asics). | |
131 | * Returns 0 on success, -ENOMEM on failure. | |
132 | */ | |
364beb2c | 133 | int amdgpu_fence_emit(struct amdgpu_ring *ring, struct fence **f) |
d38ceaf9 AD |
134 | { |
135 | struct amdgpu_device *adev = ring->adev; | |
364beb2c | 136 | struct amdgpu_fence *fence; |
fc387a0b | 137 | struct fence *old, **ptr; |
742c085f | 138 | uint32_t seq; |
d38ceaf9 | 139 | |
364beb2c CK |
140 | fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_KERNEL); |
141 | if (fence == NULL) | |
d38ceaf9 | 142 | return -ENOMEM; |
364beb2c | 143 | |
742c085f | 144 | seq = ++ring->fence_drv.sync_seq; |
364beb2c CK |
145 | fence->ring = ring; |
146 | fence_init(&fence->base, &amdgpu_fence_ops, | |
4a7d74f1 | 147 | &ring->fence_drv.lock, |
364beb2c | 148 | adev->fence_context + ring->idx, |
742c085f | 149 | seq); |
890ee23f | 150 | amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr, |
742c085f | 151 | seq, AMDGPU_FENCE_FLAG_INT); |
c89377d1 | 152 | |
742c085f | 153 | ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask]; |
c89377d1 CK |
154 | /* This function can't be called concurrently anyway, otherwise |
155 | * emitting the fence would mess up the hardware ring buffer. | |
156 | */ | |
fc387a0b CZ |
157 | old = rcu_dereference_protected(*ptr, 1); |
158 | if (old && !fence_is_signaled(old)) { | |
159 | DRM_INFO("rcu slot is busy\n"); | |
160 | fence_wait(old, false); | |
161 | } | |
c89377d1 CK |
162 | |
163 | rcu_assign_pointer(*ptr, fence_get(&fence->base)); | |
164 | ||
364beb2c | 165 | *f = &fence->base; |
c89377d1 | 166 | |
d38ceaf9 AD |
167 | return 0; |
168 | } | |
169 | ||
c2776afe CK |
170 | /** |
171 | * amdgpu_fence_schedule_fallback - schedule fallback check | |
172 | * | |
173 | * @ring: pointer to struct amdgpu_ring | |
174 | * | |
175 | * Start a timer as fallback to our interrupts. | |
176 | */ | |
177 | static void amdgpu_fence_schedule_fallback(struct amdgpu_ring *ring) | |
178 | { | |
179 | mod_timer(&ring->fence_drv.fallback_timer, | |
180 | jiffies + AMDGPU_FENCE_JIFFIES_TIMEOUT); | |
181 | } | |
182 | ||
d38ceaf9 | 183 | /** |
ca08e04d | 184 | * amdgpu_fence_process - check for fence activity |
d38ceaf9 AD |
185 | * |
186 | * @ring: pointer to struct amdgpu_ring | |
187 | * | |
188 | * Checks the current fence value and calculates the last | |
ca08e04d CK |
189 | * signalled fence value. Wakes the fence queue if the |
190 | * sequence number has increased. | |
d38ceaf9 | 191 | */ |
ca08e04d | 192 | void amdgpu_fence_process(struct amdgpu_ring *ring) |
d38ceaf9 | 193 | { |
4a7d74f1 | 194 | struct amdgpu_fence_driver *drv = &ring->fence_drv; |
742c085f | 195 | uint32_t seq, last_seq; |
4a7d74f1 | 196 | int r; |
d38ceaf9 | 197 | |
d38ceaf9 | 198 | do { |
742c085f | 199 | last_seq = atomic_read(&ring->fence_drv.last_seq); |
d38ceaf9 | 200 | seq = amdgpu_fence_read(ring); |
d38ceaf9 | 201 | |
742c085f | 202 | } while (atomic_cmpxchg(&drv->last_seq, last_seq, seq) != last_seq); |
d38ceaf9 | 203 | |
742c085f | 204 | if (seq != ring->fence_drv.sync_seq) |
c2776afe | 205 | amdgpu_fence_schedule_fallback(ring); |
d38ceaf9 | 206 | |
4a7d74f1 CK |
207 | while (last_seq != seq) { |
208 | struct fence *fence, **ptr; | |
209 | ||
210 | ptr = &drv->fences[++last_seq & drv->num_fences_mask]; | |
211 | ||
212 | /* There is always exactly one thread signaling this fence slot */ | |
213 | fence = rcu_dereference_protected(*ptr, 1); | |
84fae133 | 214 | RCU_INIT_POINTER(*ptr, NULL); |
4a7d74f1 CK |
215 | |
216 | BUG_ON(!fence); | |
217 | ||
218 | r = fence_signal(fence); | |
219 | if (!r) | |
220 | FENCE_TRACE(fence, "signaled from irq context\n"); | |
221 | else | |
222 | BUG(); | |
223 | ||
224 | fence_put(fence); | |
225 | } | |
d38ceaf9 AD |
226 | } |
227 | ||
228 | /** | |
c2776afe | 229 | * amdgpu_fence_fallback - fallback for hardware interrupts |
d38ceaf9 | 230 | * |
c2776afe | 231 | * @work: delayed work item |
d38ceaf9 | 232 | * |
c2776afe | 233 | * Checks for fence activity. |
d38ceaf9 | 234 | */ |
c2776afe | 235 | static void amdgpu_fence_fallback(unsigned long arg) |
d38ceaf9 | 236 | { |
c2776afe CK |
237 | struct amdgpu_ring *ring = (void *)arg; |
238 | ||
239 | amdgpu_fence_process(ring); | |
d38ceaf9 AD |
240 | } |
241 | ||
d38ceaf9 AD |
242 | /** |
243 | * amdgpu_fence_wait_empty - wait for all fences to signal | |
244 | * | |
245 | * @adev: amdgpu device pointer | |
246 | * @ring: ring index the fence is associated with | |
247 | * | |
248 | * Wait for all fences on the requested ring to signal (all asics). | |
249 | * Returns 0 if the fences have passed, error for all other cases. | |
d38ceaf9 AD |
250 | */ |
251 | int amdgpu_fence_wait_empty(struct amdgpu_ring *ring) | |
252 | { | |
f09c2be4 CK |
253 | uint64_t seq = ACCESS_ONCE(ring->fence_drv.sync_seq); |
254 | struct fence *fence, **ptr; | |
255 | int r; | |
00d2a2b2 | 256 | |
7f06c236 | 257 | if (!seq) |
d38ceaf9 AD |
258 | return 0; |
259 | ||
f09c2be4 CK |
260 | ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask]; |
261 | rcu_read_lock(); | |
262 | fence = rcu_dereference(*ptr); | |
263 | if (!fence || !fence_get_rcu(fence)) { | |
264 | rcu_read_unlock(); | |
265 | return 0; | |
266 | } | |
267 | rcu_read_unlock(); | |
268 | ||
269 | r = fence_wait(fence, false); | |
270 | fence_put(fence); | |
271 | return r; | |
d38ceaf9 AD |
272 | } |
273 | ||
d38ceaf9 AD |
274 | /** |
275 | * amdgpu_fence_count_emitted - get the count of emitted fences | |
276 | * | |
277 | * @ring: ring the fence is associated with | |
278 | * | |
279 | * Get the number of fences emitted on the requested ring (all asics). | |
280 | * Returns the number of emitted fences on the ring. Used by the | |
281 | * dynpm code to ring track activity. | |
282 | */ | |
283 | unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring) | |
284 | { | |
285 | uint64_t emitted; | |
286 | ||
287 | /* We are not protected by ring lock when reading the last sequence | |
288 | * but it's ok to report slightly wrong fence count here. | |
289 | */ | |
290 | amdgpu_fence_process(ring); | |
742c085f CK |
291 | emitted = 0x100000000ull; |
292 | emitted -= atomic_read(&ring->fence_drv.last_seq); | |
293 | emitted += ACCESS_ONCE(ring->fence_drv.sync_seq); | |
294 | return lower_32_bits(emitted); | |
d38ceaf9 AD |
295 | } |
296 | ||
d38ceaf9 AD |
297 | /** |
298 | * amdgpu_fence_driver_start_ring - make the fence driver | |
299 | * ready for use on the requested ring. | |
300 | * | |
301 | * @ring: ring to start the fence driver on | |
302 | * @irq_src: interrupt source to use for this ring | |
303 | * @irq_type: interrupt type to use for this ring | |
304 | * | |
305 | * Make the fence driver ready for processing (all asics). | |
306 | * Not all asics have all rings, so each asic will only | |
307 | * start the fence driver on the rings it has. | |
308 | * Returns 0 for success, errors for failure. | |
309 | */ | |
310 | int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring, | |
311 | struct amdgpu_irq_src *irq_src, | |
312 | unsigned irq_type) | |
313 | { | |
314 | struct amdgpu_device *adev = ring->adev; | |
315 | uint64_t index; | |
316 | ||
317 | if (ring != &adev->uvd.ring) { | |
318 | ring->fence_drv.cpu_addr = &adev->wb.wb[ring->fence_offs]; | |
319 | ring->fence_drv.gpu_addr = adev->wb.gpu_addr + (ring->fence_offs * 4); | |
320 | } else { | |
321 | /* put fence directly behind firmware */ | |
322 | index = ALIGN(adev->uvd.fw->size, 8); | |
323 | ring->fence_drv.cpu_addr = adev->uvd.cpu_addr + index; | |
324 | ring->fence_drv.gpu_addr = adev->uvd.gpu_addr + index; | |
325 | } | |
742c085f | 326 | amdgpu_fence_write(ring, atomic_read(&ring->fence_drv.last_seq)); |
c6a4079b CZ |
327 | amdgpu_irq_get(adev, irq_src, irq_type); |
328 | ||
d38ceaf9 AD |
329 | ring->fence_drv.irq_src = irq_src; |
330 | ring->fence_drv.irq_type = irq_type; | |
c6a4079b CZ |
331 | ring->fence_drv.initialized = true; |
332 | ||
d38ceaf9 AD |
333 | dev_info(adev->dev, "fence driver on ring %d use gpu addr 0x%016llx, " |
334 | "cpu addr 0x%p\n", ring->idx, | |
335 | ring->fence_drv.gpu_addr, ring->fence_drv.cpu_addr); | |
336 | return 0; | |
337 | } | |
338 | ||
339 | /** | |
340 | * amdgpu_fence_driver_init_ring - init the fence driver | |
341 | * for the requested ring. | |
342 | * | |
343 | * @ring: ring to init the fence driver on | |
e6151a08 | 344 | * @num_hw_submission: number of entries on the hardware queue |
d38ceaf9 AD |
345 | * |
346 | * Init the fence driver for the requested ring (all asics). | |
347 | * Helper function for amdgpu_fence_driver_init(). | |
348 | */ | |
e6151a08 CK |
349 | int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring, |
350 | unsigned num_hw_submission) | |
d38ceaf9 | 351 | { |
cadf97b1 | 352 | long timeout; |
5907a0d8 | 353 | int r; |
d38ceaf9 | 354 | |
e6151a08 CK |
355 | /* Check that num_hw_submission is a power of two */ |
356 | if ((num_hw_submission & (num_hw_submission - 1)) != 0) | |
357 | return -EINVAL; | |
358 | ||
d38ceaf9 AD |
359 | ring->fence_drv.cpu_addr = NULL; |
360 | ring->fence_drv.gpu_addr = 0; | |
5907a0d8 | 361 | ring->fence_drv.sync_seq = 0; |
742c085f | 362 | atomic_set(&ring->fence_drv.last_seq, 0); |
d38ceaf9 AD |
363 | ring->fence_drv.initialized = false; |
364 | ||
c2776afe CK |
365 | setup_timer(&ring->fence_drv.fallback_timer, amdgpu_fence_fallback, |
366 | (unsigned long)ring); | |
b80d8475 | 367 | |
66067ad7 | 368 | ring->fence_drv.num_fences_mask = num_hw_submission * 2 - 1; |
4a7d74f1 | 369 | spin_lock_init(&ring->fence_drv.lock); |
66067ad7 | 370 | ring->fence_drv.fences = kcalloc(num_hw_submission * 2, sizeof(void *), |
c89377d1 CK |
371 | GFP_KERNEL); |
372 | if (!ring->fence_drv.fences) | |
373 | return -ENOMEM; | |
5ec92a76 | 374 | |
cadf97b1 CZ |
375 | timeout = msecs_to_jiffies(amdgpu_lockup_timeout); |
376 | if (timeout == 0) { | |
377 | /* | |
378 | * FIXME: | |
379 | * Delayed workqueue cannot use it directly, | |
380 | * so the scheduler will not use delayed workqueue if | |
381 | * MAX_SCHEDULE_TIMEOUT is set. | |
382 | * Currently keep it simple and silly. | |
383 | */ | |
384 | timeout = MAX_SCHEDULE_TIMEOUT; | |
385 | } | |
386 | r = amd_sched_init(&ring->sched, &amdgpu_sched_ops, | |
e6151a08 | 387 | num_hw_submission, |
cadf97b1 CZ |
388 | timeout, ring->name); |
389 | if (r) { | |
390 | DRM_ERROR("Failed to create scheduler on ring %s.\n", | |
391 | ring->name); | |
392 | return r; | |
b80d8475 | 393 | } |
4f839a24 CK |
394 | |
395 | return 0; | |
d38ceaf9 AD |
396 | } |
397 | ||
398 | /** | |
399 | * amdgpu_fence_driver_init - init the fence driver | |
400 | * for all possible rings. | |
401 | * | |
402 | * @adev: amdgpu device pointer | |
403 | * | |
404 | * Init the fence driver for all possible rings (all asics). | |
405 | * Not all asics have all rings, so each asic will only | |
406 | * start the fence driver on the rings it has using | |
407 | * amdgpu_fence_driver_start_ring(). | |
408 | * Returns 0 for success. | |
409 | */ | |
410 | int amdgpu_fence_driver_init(struct amdgpu_device *adev) | |
411 | { | |
d38ceaf9 AD |
412 | if (amdgpu_debugfs_fence_init(adev)) |
413 | dev_err(adev->dev, "fence debugfs file creation failed\n"); | |
414 | ||
415 | return 0; | |
416 | } | |
417 | ||
418 | /** | |
419 | * amdgpu_fence_driver_fini - tear down the fence driver | |
420 | * for all possible rings. | |
421 | * | |
422 | * @adev: amdgpu device pointer | |
423 | * | |
424 | * Tear down the fence driver for all possible rings (all asics). | |
425 | */ | |
426 | void amdgpu_fence_driver_fini(struct amdgpu_device *adev) | |
427 | { | |
c89377d1 CK |
428 | unsigned i, j; |
429 | int r; | |
d38ceaf9 | 430 | |
d38ceaf9 AD |
431 | for (i = 0; i < AMDGPU_MAX_RINGS; i++) { |
432 | struct amdgpu_ring *ring = adev->rings[i]; | |
c2776afe | 433 | |
d38ceaf9 AD |
434 | if (!ring || !ring->fence_drv.initialized) |
435 | continue; | |
436 | r = amdgpu_fence_wait_empty(ring); | |
437 | if (r) { | |
438 | /* no need to trigger GPU reset as we are unloading */ | |
439 | amdgpu_fence_driver_force_completion(adev); | |
440 | } | |
c6a4079b CZ |
441 | amdgpu_irq_put(adev, ring->fence_drv.irq_src, |
442 | ring->fence_drv.irq_type); | |
4f839a24 | 443 | amd_sched_fini(&ring->sched); |
c2776afe | 444 | del_timer_sync(&ring->fence_drv.fallback_timer); |
c89377d1 | 445 | for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j) |
48c60c03 | 446 | fence_put(ring->fence_drv.fences[j]); |
c89377d1 | 447 | kfree(ring->fence_drv.fences); |
d38ceaf9 AD |
448 | ring->fence_drv.initialized = false; |
449 | } | |
d38ceaf9 AD |
450 | } |
451 | ||
5ceb54c6 AD |
452 | /** |
453 | * amdgpu_fence_driver_suspend - suspend the fence driver | |
454 | * for all possible rings. | |
455 | * | |
456 | * @adev: amdgpu device pointer | |
457 | * | |
458 | * Suspend the fence driver for all possible rings (all asics). | |
459 | */ | |
460 | void amdgpu_fence_driver_suspend(struct amdgpu_device *adev) | |
461 | { | |
462 | int i, r; | |
463 | ||
5ceb54c6 AD |
464 | for (i = 0; i < AMDGPU_MAX_RINGS; i++) { |
465 | struct amdgpu_ring *ring = adev->rings[i]; | |
466 | if (!ring || !ring->fence_drv.initialized) | |
467 | continue; | |
468 | ||
469 | /* wait for gpu to finish processing current batch */ | |
470 | r = amdgpu_fence_wait_empty(ring); | |
471 | if (r) { | |
472 | /* delay GPU reset to resume */ | |
473 | amdgpu_fence_driver_force_completion(adev); | |
474 | } | |
475 | ||
476 | /* disable the interrupt */ | |
477 | amdgpu_irq_put(adev, ring->fence_drv.irq_src, | |
478 | ring->fence_drv.irq_type); | |
479 | } | |
5ceb54c6 AD |
480 | } |
481 | ||
482 | /** | |
483 | * amdgpu_fence_driver_resume - resume the fence driver | |
484 | * for all possible rings. | |
485 | * | |
486 | * @adev: amdgpu device pointer | |
487 | * | |
488 | * Resume the fence driver for all possible rings (all asics). | |
489 | * Not all asics have all rings, so each asic will only | |
490 | * start the fence driver on the rings it has using | |
491 | * amdgpu_fence_driver_start_ring(). | |
492 | * Returns 0 for success. | |
493 | */ | |
494 | void amdgpu_fence_driver_resume(struct amdgpu_device *adev) | |
495 | { | |
496 | int i; | |
497 | ||
5ceb54c6 AD |
498 | for (i = 0; i < AMDGPU_MAX_RINGS; i++) { |
499 | struct amdgpu_ring *ring = adev->rings[i]; | |
500 | if (!ring || !ring->fence_drv.initialized) | |
501 | continue; | |
502 | ||
503 | /* enable the interrupt */ | |
504 | amdgpu_irq_get(adev, ring->fence_drv.irq_src, | |
505 | ring->fence_drv.irq_type); | |
506 | } | |
5ceb54c6 AD |
507 | } |
508 | ||
d38ceaf9 AD |
509 | /** |
510 | * amdgpu_fence_driver_force_completion - force all fence waiter to complete | |
511 | * | |
512 | * @adev: amdgpu device pointer | |
513 | * | |
514 | * In case of GPU reset failure make sure no process keep waiting on fence | |
515 | * that will never complete. | |
516 | */ | |
517 | void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev) | |
518 | { | |
519 | int i; | |
520 | ||
521 | for (i = 0; i < AMDGPU_MAX_RINGS; i++) { | |
522 | struct amdgpu_ring *ring = adev->rings[i]; | |
523 | if (!ring || !ring->fence_drv.initialized) | |
524 | continue; | |
525 | ||
5907a0d8 | 526 | amdgpu_fence_write(ring, ring->fence_drv.sync_seq); |
d38ceaf9 AD |
527 | } |
528 | } | |
529 | ||
a95e2642 CK |
530 | /* |
531 | * Common fence implementation | |
532 | */ | |
533 | ||
534 | static const char *amdgpu_fence_get_driver_name(struct fence *fence) | |
535 | { | |
536 | return "amdgpu"; | |
537 | } | |
538 | ||
539 | static const char *amdgpu_fence_get_timeline_name(struct fence *f) | |
540 | { | |
541 | struct amdgpu_fence *fence = to_amdgpu_fence(f); | |
542 | return (const char *)fence->ring->name; | |
543 | } | |
544 | ||
a95e2642 CK |
545 | /** |
546 | * amdgpu_fence_enable_signaling - enable signalling on fence | |
547 | * @fence: fence | |
548 | * | |
549 | * This function is called with fence_queue lock held, and adds a callback | |
550 | * to fence_queue that checks if this fence is signaled, and if so it | |
551 | * signals the fence and removes itself. | |
552 | */ | |
553 | static bool amdgpu_fence_enable_signaling(struct fence *f) | |
554 | { | |
555 | struct amdgpu_fence *fence = to_amdgpu_fence(f); | |
556 | struct amdgpu_ring *ring = fence->ring; | |
557 | ||
c2776afe CK |
558 | if (!timer_pending(&ring->fence_drv.fallback_timer)) |
559 | amdgpu_fence_schedule_fallback(ring); | |
4a7d74f1 | 560 | |
a95e2642 | 561 | FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx); |
4a7d74f1 | 562 | |
a95e2642 CK |
563 | return true; |
564 | } | |
565 | ||
b4413535 CK |
566 | /** |
567 | * amdgpu_fence_free - free up the fence memory | |
568 | * | |
569 | * @rcu: RCU callback head | |
570 | * | |
571 | * Free up the fence memory after the RCU grace period. | |
572 | */ | |
573 | static void amdgpu_fence_free(struct rcu_head *rcu) | |
b49c84a5 | 574 | { |
b4413535 | 575 | struct fence *f = container_of(rcu, struct fence, rcu); |
b49c84a5 CZ |
576 | struct amdgpu_fence *fence = to_amdgpu_fence(f); |
577 | kmem_cache_free(amdgpu_fence_slab, fence); | |
578 | } | |
579 | ||
b4413535 CK |
580 | /** |
581 | * amdgpu_fence_release - callback that fence can be freed | |
582 | * | |
583 | * @fence: fence | |
584 | * | |
585 | * This function is called when the reference count becomes zero. | |
586 | * It just RCU schedules freeing up the fence. | |
587 | */ | |
588 | static void amdgpu_fence_release(struct fence *f) | |
589 | { | |
590 | call_rcu(&f->rcu, amdgpu_fence_free); | |
591 | } | |
592 | ||
22e5a2f4 | 593 | static const struct fence_ops amdgpu_fence_ops = { |
a95e2642 CK |
594 | .get_driver_name = amdgpu_fence_get_driver_name, |
595 | .get_timeline_name = amdgpu_fence_get_timeline_name, | |
596 | .enable_signaling = amdgpu_fence_enable_signaling, | |
a95e2642 | 597 | .wait = fence_default_wait, |
b49c84a5 | 598 | .release = amdgpu_fence_release, |
a95e2642 | 599 | }; |
d38ceaf9 AD |
600 | |
601 | /* | |
602 | * Fence debugfs | |
603 | */ | |
604 | #if defined(CONFIG_DEBUG_FS) | |
605 | static int amdgpu_debugfs_fence_info(struct seq_file *m, void *data) | |
606 | { | |
607 | struct drm_info_node *node = (struct drm_info_node *)m->private; | |
608 | struct drm_device *dev = node->minor->dev; | |
609 | struct amdgpu_device *adev = dev->dev_private; | |
5907a0d8 | 610 | int i; |
d38ceaf9 AD |
611 | |
612 | for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { | |
613 | struct amdgpu_ring *ring = adev->rings[i]; | |
614 | if (!ring || !ring->fence_drv.initialized) | |
615 | continue; | |
616 | ||
617 | amdgpu_fence_process(ring); | |
618 | ||
344c19f9 | 619 | seq_printf(m, "--- ring %d (%s) ---\n", i, ring->name); |
742c085f CK |
620 | seq_printf(m, "Last signaled fence 0x%08x\n", |
621 | atomic_read(&ring->fence_drv.last_seq)); | |
622 | seq_printf(m, "Last emitted 0x%08x\n", | |
5907a0d8 | 623 | ring->fence_drv.sync_seq); |
d38ceaf9 AD |
624 | } |
625 | return 0; | |
626 | } | |
627 | ||
18db89b4 AD |
628 | /** |
629 | * amdgpu_debugfs_gpu_reset - manually trigger a gpu reset | |
630 | * | |
631 | * Manually trigger a gpu reset at the next fence wait. | |
632 | */ | |
633 | static int amdgpu_debugfs_gpu_reset(struct seq_file *m, void *data) | |
634 | { | |
635 | struct drm_info_node *node = (struct drm_info_node *) m->private; | |
636 | struct drm_device *dev = node->minor->dev; | |
637 | struct amdgpu_device *adev = dev->dev_private; | |
638 | ||
639 | seq_printf(m, "gpu reset\n"); | |
640 | amdgpu_gpu_reset(adev); | |
641 | ||
642 | return 0; | |
643 | } | |
644 | ||
06ab6832 | 645 | static const struct drm_info_list amdgpu_debugfs_fence_list[] = { |
d38ceaf9 | 646 | {"amdgpu_fence_info", &amdgpu_debugfs_fence_info, 0, NULL}, |
18db89b4 | 647 | {"amdgpu_gpu_reset", &amdgpu_debugfs_gpu_reset, 0, NULL} |
d38ceaf9 AD |
648 | }; |
649 | #endif | |
650 | ||
651 | int amdgpu_debugfs_fence_init(struct amdgpu_device *adev) | |
652 | { | |
653 | #if defined(CONFIG_DEBUG_FS) | |
18db89b4 | 654 | return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_fence_list, 2); |
d38ceaf9 AD |
655 | #else |
656 | return 0; | |
657 | #endif | |
658 | } | |
659 |