]>
Commit | Line | Data |
---|---|---|
d38ceaf9 AD |
1 | /* |
2 | * Copyright 2009 Jerome Glisse. | |
3 | * All Rights Reserved. | |
4 | * | |
5 | * Permission is hereby granted, free of charge, to any person obtaining a | |
6 | * copy of this software and associated documentation files (the | |
7 | * "Software"), to deal in the Software without restriction, including | |
8 | * without limitation the rights to use, copy, modify, merge, publish, | |
9 | * distribute, sub license, and/or sell copies of the Software, and to | |
10 | * permit persons to whom the Software is furnished to do so, subject to | |
11 | * the following conditions: | |
12 | * | |
13 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
14 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
15 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | |
16 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | |
17 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | |
18 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | |
19 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | |
20 | * | |
21 | * The above copyright notice and this permission notice (including the | |
22 | * next paragraph) shall be included in all copies or substantial portions | |
23 | * of the Software. | |
24 | * | |
25 | */ | |
26 | /* | |
27 | * Authors: | |
28 | * Jerome Glisse <glisse@freedesktop.org> | |
29 | * Dave Airlie | |
30 | */ | |
31 | #include <linux/seq_file.h> | |
32 | #include <linux/atomic.h> | |
33 | #include <linux/wait.h> | |
34 | #include <linux/kref.h> | |
35 | #include <linux/slab.h> | |
36 | #include <linux/firmware.h> | |
fdf2f6c5 SR |
37 | |
38 | #include <drm/drm_debugfs.h> | |
39 | ||
d38ceaf9 AD |
40 | #include "amdgpu.h" |
41 | #include "amdgpu_trace.h" | |
42 | ||
43 | /* | |
44 | * Fences | |
45 | * Fences mark an event in the GPUs pipeline and are used | |
46 | * for GPU/CPU synchronization. When the fence is written, | |
47 | * it is expected that all buffers associated with that fence | |
48 | * are no longer in use by the associated ring on the GPU and | |
49 | * that the the relevant GPU caches have been flushed. | |
50 | */ | |
51 | ||
22e5a2f4 | 52 | struct amdgpu_fence { |
f54d1867 | 53 | struct dma_fence base; |
22e5a2f4 CK |
54 | |
55 | /* RB, DMA, etc. */ | |
56 | struct amdgpu_ring *ring; | |
22e5a2f4 CK |
57 | }; |
58 | ||
b49c84a5 | 59 | static struct kmem_cache *amdgpu_fence_slab; |
b49c84a5 | 60 | |
d573de2d RZ |
61 | int amdgpu_fence_slab_init(void) |
62 | { | |
63 | amdgpu_fence_slab = kmem_cache_create( | |
64 | "amdgpu_fence", sizeof(struct amdgpu_fence), 0, | |
65 | SLAB_HWCACHE_ALIGN, NULL); | |
66 | if (!amdgpu_fence_slab) | |
67 | return -ENOMEM; | |
68 | return 0; | |
69 | } | |
70 | ||
71 | void amdgpu_fence_slab_fini(void) | |
72 | { | |
0f10425e | 73 | rcu_barrier(); |
d573de2d RZ |
74 | kmem_cache_destroy(amdgpu_fence_slab); |
75 | } | |
22e5a2f4 CK |
76 | /* |
77 | * Cast helper | |
78 | */ | |
f54d1867 CW |
79 | static const struct dma_fence_ops amdgpu_fence_ops; |
80 | static inline struct amdgpu_fence *to_amdgpu_fence(struct dma_fence *f) | |
22e5a2f4 CK |
81 | { |
82 | struct amdgpu_fence *__f = container_of(f, struct amdgpu_fence, base); | |
83 | ||
84 | if (__f->base.ops == &amdgpu_fence_ops) | |
85 | return __f; | |
86 | ||
87 | return NULL; | |
88 | } | |
89 | ||
d38ceaf9 AD |
90 | /** |
91 | * amdgpu_fence_write - write a fence value | |
92 | * | |
93 | * @ring: ring the fence is associated with | |
94 | * @seq: sequence number to write | |
95 | * | |
96 | * Writes a fence value to memory (all asics). | |
97 | */ | |
98 | static void amdgpu_fence_write(struct amdgpu_ring *ring, u32 seq) | |
99 | { | |
100 | struct amdgpu_fence_driver *drv = &ring->fence_drv; | |
101 | ||
102 | if (drv->cpu_addr) | |
103 | *drv->cpu_addr = cpu_to_le32(seq); | |
104 | } | |
105 | ||
106 | /** | |
107 | * amdgpu_fence_read - read a fence value | |
108 | * | |
109 | * @ring: ring the fence is associated with | |
110 | * | |
111 | * Reads a fence value from memory (all asics). | |
112 | * Returns the value of the fence read from memory. | |
113 | */ | |
114 | static u32 amdgpu_fence_read(struct amdgpu_ring *ring) | |
115 | { | |
116 | struct amdgpu_fence_driver *drv = &ring->fence_drv; | |
117 | u32 seq = 0; | |
118 | ||
119 | if (drv->cpu_addr) | |
120 | seq = le32_to_cpu(*drv->cpu_addr); | |
121 | else | |
742c085f | 122 | seq = atomic_read(&drv->last_seq); |
d38ceaf9 AD |
123 | |
124 | return seq; | |
125 | } | |
126 | ||
d38ceaf9 AD |
127 | /** |
128 | * amdgpu_fence_emit - emit a fence on the requested ring | |
129 | * | |
130 | * @ring: ring the fence is associated with | |
364beb2c | 131 | * @f: resulting fence object |
d38ceaf9 AD |
132 | * |
133 | * Emits a fence command on the requested ring (all asics). | |
134 | * Returns 0 on success, -ENOMEM on failure. | |
135 | */ | |
d240cd9e MO |
136 | int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, |
137 | unsigned flags) | |
d38ceaf9 AD |
138 | { |
139 | struct amdgpu_device *adev = ring->adev; | |
364beb2c | 140 | struct amdgpu_fence *fence; |
3d2aca8c | 141 | struct dma_fence __rcu **ptr; |
742c085f | 142 | uint32_t seq; |
3d2aca8c | 143 | int r; |
d38ceaf9 | 144 | |
364beb2c CK |
145 | fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_KERNEL); |
146 | if (fence == NULL) | |
d38ceaf9 | 147 | return -ENOMEM; |
364beb2c | 148 | |
742c085f | 149 | seq = ++ring->fence_drv.sync_seq; |
364beb2c | 150 | fence->ring = ring; |
f54d1867 CW |
151 | dma_fence_init(&fence->base, &amdgpu_fence_ops, |
152 | &ring->fence_drv.lock, | |
153 | adev->fence_context + ring->idx, | |
154 | seq); | |
890ee23f | 155 | amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr, |
d240cd9e | 156 | seq, flags | AMDGPU_FENCE_FLAG_INT); |
c89377d1 | 157 | |
742c085f | 158 | ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask]; |
3d2aca8c CK |
159 | if (unlikely(rcu_dereference_protected(*ptr, 1))) { |
160 | struct dma_fence *old; | |
161 | ||
162 | rcu_read_lock(); | |
163 | old = dma_fence_get_rcu_safe(ptr); | |
164 | rcu_read_unlock(); | |
165 | ||
166 | if (old) { | |
167 | r = dma_fence_wait(old, false); | |
168 | dma_fence_put(old); | |
169 | if (r) | |
170 | return r; | |
171 | } | |
172 | } | |
173 | ||
c89377d1 CK |
174 | /* This function can't be called concurrently anyway, otherwise |
175 | * emitting the fence would mess up the hardware ring buffer. | |
176 | */ | |
f54d1867 | 177 | rcu_assign_pointer(*ptr, dma_fence_get(&fence->base)); |
c89377d1 | 178 | |
364beb2c | 179 | *f = &fence->base; |
c89377d1 | 180 | |
d38ceaf9 AD |
181 | return 0; |
182 | } | |
183 | ||
43ca8efa | 184 | /** |
185 | * amdgpu_fence_emit_polling - emit a fence on the requeste ring | |
186 | * | |
187 | * @ring: ring the fence is associated with | |
188 | * @s: resulting sequence number | |
189 | * | |
190 | * Emits a fence command on the requested ring (all asics). | |
191 | * Used For polling fence. | |
192 | * Returns 0 on success, -ENOMEM on failure. | |
193 | */ | |
194 | int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s) | |
195 | { | |
196 | uint32_t seq; | |
197 | ||
198 | if (!s) | |
199 | return -EINVAL; | |
200 | ||
201 | seq = ++ring->fence_drv.sync_seq; | |
202 | amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr, | |
d118a621 | 203 | seq, 0); |
43ca8efa | 204 | |
205 | *s = seq; | |
206 | ||
207 | return 0; | |
208 | } | |
209 | ||
8c5e13ec AG |
210 | /** |
211 | * amdgpu_fence_schedule_fallback - schedule fallback check | |
212 | * | |
213 | * @ring: pointer to struct amdgpu_ring | |
214 | * | |
215 | * Start a timer as fallback to our interrupts. | |
216 | */ | |
217 | static void amdgpu_fence_schedule_fallback(struct amdgpu_ring *ring) | |
218 | { | |
219 | mod_timer(&ring->fence_drv.fallback_timer, | |
220 | jiffies + AMDGPU_FENCE_JIFFIES_TIMEOUT); | |
221 | } | |
222 | ||
d38ceaf9 | 223 | /** |
ca08e04d | 224 | * amdgpu_fence_process - check for fence activity |
d38ceaf9 AD |
225 | * |
226 | * @ring: pointer to struct amdgpu_ring | |
227 | * | |
228 | * Checks the current fence value and calculates the last | |
ca08e04d CK |
229 | * signalled fence value. Wakes the fence queue if the |
230 | * sequence number has increased. | |
95d7fc4a AG |
231 | * |
232 | * Returns true if fence was processed | |
d38ceaf9 | 233 | */ |
95d7fc4a | 234 | bool amdgpu_fence_process(struct amdgpu_ring *ring) |
d38ceaf9 | 235 | { |
4a7d74f1 | 236 | struct amdgpu_fence_driver *drv = &ring->fence_drv; |
742c085f | 237 | uint32_t seq, last_seq; |
4a7d74f1 | 238 | int r; |
d38ceaf9 | 239 | |
d38ceaf9 | 240 | do { |
742c085f | 241 | last_seq = atomic_read(&ring->fence_drv.last_seq); |
d38ceaf9 | 242 | seq = amdgpu_fence_read(ring); |
d38ceaf9 | 243 | |
742c085f | 244 | } while (atomic_cmpxchg(&drv->last_seq, last_seq, seq) != last_seq); |
d38ceaf9 | 245 | |
3547e3cf AG |
246 | if (del_timer(&ring->fence_drv.fallback_timer) && |
247 | seq != ring->fence_drv.sync_seq) | |
8c5e13ec AG |
248 | amdgpu_fence_schedule_fallback(ring); |
249 | ||
2ef004d9 | 250 | if (unlikely(seq == last_seq)) |
95d7fc4a | 251 | return false; |
2ef004d9 | 252 | |
4f399a08 CK |
253 | last_seq &= drv->num_fences_mask; |
254 | seq &= drv->num_fences_mask; | |
255 | ||
2ef004d9 | 256 | do { |
f54d1867 | 257 | struct dma_fence *fence, **ptr; |
4a7d74f1 | 258 | |
4f399a08 CK |
259 | ++last_seq; |
260 | last_seq &= drv->num_fences_mask; | |
261 | ptr = &drv->fences[last_seq]; | |
4a7d74f1 CK |
262 | |
263 | /* There is always exactly one thread signaling this fence slot */ | |
264 | fence = rcu_dereference_protected(*ptr, 1); | |
84fae133 | 265 | RCU_INIT_POINTER(*ptr, NULL); |
4a7d74f1 | 266 | |
4f399a08 CK |
267 | if (!fence) |
268 | continue; | |
4a7d74f1 | 269 | |
f54d1867 | 270 | r = dma_fence_signal(fence); |
4a7d74f1 | 271 | if (!r) |
f54d1867 | 272 | DMA_FENCE_TRACE(fence, "signaled from irq context\n"); |
4a7d74f1 CK |
273 | else |
274 | BUG(); | |
275 | ||
f54d1867 | 276 | dma_fence_put(fence); |
2ef004d9 | 277 | } while (last_seq != seq); |
95d7fc4a AG |
278 | |
279 | return true; | |
d38ceaf9 AD |
280 | } |
281 | ||
8c5e13ec AG |
282 | /** |
283 | * amdgpu_fence_fallback - fallback for hardware interrupts | |
284 | * | |
285 | * @work: delayed work item | |
286 | * | |
287 | * Checks for fence activity. | |
288 | */ | |
289 | static void amdgpu_fence_fallback(struct timer_list *t) | |
290 | { | |
291 | struct amdgpu_ring *ring = from_timer(ring, t, | |
292 | fence_drv.fallback_timer); | |
293 | ||
95d7fc4a AG |
294 | if (amdgpu_fence_process(ring)) |
295 | DRM_WARN("Fence fallback timer expired on ring %s\n", ring->name); | |
8c5e13ec AG |
296 | } |
297 | ||
d38ceaf9 AD |
298 | /** |
299 | * amdgpu_fence_wait_empty - wait for all fences to signal | |
300 | * | |
301 | * @adev: amdgpu device pointer | |
302 | * @ring: ring index the fence is associated with | |
303 | * | |
304 | * Wait for all fences on the requested ring to signal (all asics). | |
305 | * Returns 0 if the fences have passed, error for all other cases. | |
d38ceaf9 AD |
306 | */ |
307 | int amdgpu_fence_wait_empty(struct amdgpu_ring *ring) | |
308 | { | |
6aa7de05 | 309 | uint64_t seq = READ_ONCE(ring->fence_drv.sync_seq); |
f54d1867 | 310 | struct dma_fence *fence, **ptr; |
f09c2be4 | 311 | int r; |
00d2a2b2 | 312 | |
7f06c236 | 313 | if (!seq) |
d38ceaf9 AD |
314 | return 0; |
315 | ||
f09c2be4 CK |
316 | ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask]; |
317 | rcu_read_lock(); | |
318 | fence = rcu_dereference(*ptr); | |
f54d1867 | 319 | if (!fence || !dma_fence_get_rcu(fence)) { |
f09c2be4 CK |
320 | rcu_read_unlock(); |
321 | return 0; | |
322 | } | |
323 | rcu_read_unlock(); | |
324 | ||
f54d1867 CW |
325 | r = dma_fence_wait(fence, false); |
326 | dma_fence_put(fence); | |
f09c2be4 | 327 | return r; |
d38ceaf9 AD |
328 | } |
329 | ||
43ca8efa | 330 | /** |
331 | * amdgpu_fence_wait_polling - busy wait for givn sequence number | |
332 | * | |
333 | * @ring: ring index the fence is associated with | |
334 | * @wait_seq: sequence number to wait | |
335 | * @timeout: the timeout for waiting in usecs | |
336 | * | |
337 | * Wait for all fences on the requested ring to signal (all asics). | |
338 | * Returns left time if no timeout, 0 or minus if timeout. | |
339 | */ | |
340 | signed long amdgpu_fence_wait_polling(struct amdgpu_ring *ring, | |
341 | uint32_t wait_seq, | |
342 | signed long timeout) | |
343 | { | |
344 | uint32_t seq; | |
345 | ||
346 | do { | |
347 | seq = amdgpu_fence_read(ring); | |
348 | udelay(5); | |
349 | timeout -= 5; | |
350 | } while ((int32_t)(wait_seq - seq) > 0 && timeout > 0); | |
351 | ||
352 | return timeout > 0 ? timeout : 0; | |
353 | } | |
d38ceaf9 AD |
354 | /** |
355 | * amdgpu_fence_count_emitted - get the count of emitted fences | |
356 | * | |
357 | * @ring: ring the fence is associated with | |
358 | * | |
359 | * Get the number of fences emitted on the requested ring (all asics). | |
360 | * Returns the number of emitted fences on the ring. Used by the | |
361 | * dynpm code to ring track activity. | |
362 | */ | |
363 | unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring) | |
364 | { | |
365 | uint64_t emitted; | |
366 | ||
367 | /* We are not protected by ring lock when reading the last sequence | |
368 | * but it's ok to report slightly wrong fence count here. | |
369 | */ | |
370 | amdgpu_fence_process(ring); | |
742c085f CK |
371 | emitted = 0x100000000ull; |
372 | emitted -= atomic_read(&ring->fence_drv.last_seq); | |
6aa7de05 | 373 | emitted += READ_ONCE(ring->fence_drv.sync_seq); |
742c085f | 374 | return lower_32_bits(emitted); |
d38ceaf9 AD |
375 | } |
376 | ||
d38ceaf9 AD |
377 | /** |
378 | * amdgpu_fence_driver_start_ring - make the fence driver | |
379 | * ready for use on the requested ring. | |
380 | * | |
381 | * @ring: ring to start the fence driver on | |
382 | * @irq_src: interrupt source to use for this ring | |
383 | * @irq_type: interrupt type to use for this ring | |
384 | * | |
385 | * Make the fence driver ready for processing (all asics). | |
386 | * Not all asics have all rings, so each asic will only | |
387 | * start the fence driver on the rings it has. | |
388 | * Returns 0 for success, errors for failure. | |
389 | */ | |
390 | int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring, | |
391 | struct amdgpu_irq_src *irq_src, | |
392 | unsigned irq_type) | |
393 | { | |
394 | struct amdgpu_device *adev = ring->adev; | |
395 | uint64_t index; | |
396 | ||
d9e98ee2 | 397 | if (ring->funcs->type != AMDGPU_RING_TYPE_UVD) { |
d38ceaf9 AD |
398 | ring->fence_drv.cpu_addr = &adev->wb.wb[ring->fence_offs]; |
399 | ring->fence_drv.gpu_addr = adev->wb.gpu_addr + (ring->fence_offs * 4); | |
400 | } else { | |
401 | /* put fence directly behind firmware */ | |
402 | index = ALIGN(adev->uvd.fw->size, 8); | |
10dd74ea JZ |
403 | ring->fence_drv.cpu_addr = adev->uvd.inst[ring->me].cpu_addr + index; |
404 | ring->fence_drv.gpu_addr = adev->uvd.inst[ring->me].gpu_addr + index; | |
d38ceaf9 | 405 | } |
742c085f | 406 | amdgpu_fence_write(ring, atomic_read(&ring->fence_drv.last_seq)); |
c6a4079b CZ |
407 | amdgpu_irq_get(adev, irq_src, irq_type); |
408 | ||
d38ceaf9 AD |
409 | ring->fence_drv.irq_src = irq_src; |
410 | ring->fence_drv.irq_type = irq_type; | |
c6a4079b CZ |
411 | ring->fence_drv.initialized = true; |
412 | ||
6e82c6e0 CK |
413 | DRM_DEV_DEBUG(adev->dev, "fence driver on ring %s use gpu addr " |
414 | "0x%016llx, cpu addr 0x%p\n", ring->name, | |
415 | ring->fence_drv.gpu_addr, ring->fence_drv.cpu_addr); | |
d38ceaf9 AD |
416 | return 0; |
417 | } | |
418 | ||
419 | /** | |
420 | * amdgpu_fence_driver_init_ring - init the fence driver | |
421 | * for the requested ring. | |
422 | * | |
423 | * @ring: ring to init the fence driver on | |
e6151a08 | 424 | * @num_hw_submission: number of entries on the hardware queue |
d38ceaf9 AD |
425 | * |
426 | * Init the fence driver for the requested ring (all asics). | |
427 | * Helper function for amdgpu_fence_driver_init(). | |
428 | */ | |
e6151a08 CK |
429 | int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring, |
430 | unsigned num_hw_submission) | |
d38ceaf9 | 431 | { |
912dfc84 | 432 | struct amdgpu_device *adev = ring->adev; |
687c1c2e | 433 | long timeout; |
5907a0d8 | 434 | int r; |
d38ceaf9 | 435 | |
912dfc84 EQ |
436 | if (!adev) |
437 | return -EINVAL; | |
438 | ||
e6151a08 CK |
439 | /* Check that num_hw_submission is a power of two */ |
440 | if ((num_hw_submission & (num_hw_submission - 1)) != 0) | |
441 | return -EINVAL; | |
442 | ||
d38ceaf9 AD |
443 | ring->fence_drv.cpu_addr = NULL; |
444 | ring->fence_drv.gpu_addr = 0; | |
5907a0d8 | 445 | ring->fence_drv.sync_seq = 0; |
742c085f | 446 | atomic_set(&ring->fence_drv.last_seq, 0); |
d38ceaf9 AD |
447 | ring->fence_drv.initialized = false; |
448 | ||
8c5e13ec AG |
449 | timer_setup(&ring->fence_drv.fallback_timer, amdgpu_fence_fallback, 0); |
450 | ||
66067ad7 | 451 | ring->fence_drv.num_fences_mask = num_hw_submission * 2 - 1; |
4a7d74f1 | 452 | spin_lock_init(&ring->fence_drv.lock); |
66067ad7 | 453 | ring->fence_drv.fences = kcalloc(num_hw_submission * 2, sizeof(void *), |
c89377d1 CK |
454 | GFP_KERNEL); |
455 | if (!ring->fence_drv.fences) | |
456 | return -ENOMEM; | |
5ec92a76 | 457 | |
e2250442 TH |
458 | /* No need to setup the GPU scheduler for KIQ ring */ |
459 | if (ring->funcs->type != AMDGPU_RING_TYPE_KIQ) { | |
912dfc84 EQ |
460 | switch (ring->funcs->type) { |
461 | case AMDGPU_RING_TYPE_GFX: | |
462 | timeout = adev->gfx_timeout; | |
463 | break; | |
464 | case AMDGPU_RING_TYPE_COMPUTE: | |
465 | /* | |
466 | * For non-sriov case, no timeout enforce | |
467 | * on compute ring by default. Unless user | |
468 | * specifies a timeout for compute ring. | |
469 | * | |
470 | * For sriov case, always use the timeout | |
471 | * as gfx ring | |
472 | */ | |
473 | if (!amdgpu_sriov_vf(ring->adev)) | |
474 | timeout = adev->compute_timeout; | |
475 | else | |
476 | timeout = adev->gfx_timeout; | |
477 | break; | |
478 | case AMDGPU_RING_TYPE_SDMA: | |
479 | timeout = adev->sdma_timeout; | |
480 | break; | |
481 | default: | |
482 | timeout = adev->video_timeout; | |
483 | break; | |
484 | } | |
687c1c2e | 485 | |
1b1f42d8 | 486 | r = drm_sched_init(&ring->sched, &amdgpu_sched_ops, |
95aa9b1d | 487 | num_hw_submission, amdgpu_job_hang_limit, |
687c1c2e | 488 | timeout, ring->name); |
e2250442 TH |
489 | if (r) { |
490 | DRM_ERROR("Failed to create scheduler on ring %s.\n", | |
491 | ring->name); | |
492 | return r; | |
493 | } | |
b80d8475 | 494 | } |
4f839a24 CK |
495 | |
496 | return 0; | |
d38ceaf9 AD |
497 | } |
498 | ||
499 | /** | |
500 | * amdgpu_fence_driver_init - init the fence driver | |
501 | * for all possible rings. | |
502 | * | |
503 | * @adev: amdgpu device pointer | |
504 | * | |
505 | * Init the fence driver for all possible rings (all asics). | |
506 | * Not all asics have all rings, so each asic will only | |
507 | * start the fence driver on the rings it has using | |
508 | * amdgpu_fence_driver_start_ring(). | |
509 | * Returns 0 for success. | |
510 | */ | |
511 | int amdgpu_fence_driver_init(struct amdgpu_device *adev) | |
512 | { | |
d38ceaf9 AD |
513 | if (amdgpu_debugfs_fence_init(adev)) |
514 | dev_err(adev->dev, "fence debugfs file creation failed\n"); | |
515 | ||
516 | return 0; | |
517 | } | |
518 | ||
519 | /** | |
520 | * amdgpu_fence_driver_fini - tear down the fence driver | |
521 | * for all possible rings. | |
522 | * | |
523 | * @adev: amdgpu device pointer | |
524 | * | |
525 | * Tear down the fence driver for all possible rings (all asics). | |
526 | */ | |
527 | void amdgpu_fence_driver_fini(struct amdgpu_device *adev) | |
528 | { | |
c89377d1 CK |
529 | unsigned i, j; |
530 | int r; | |
d38ceaf9 | 531 | |
d38ceaf9 AD |
532 | for (i = 0; i < AMDGPU_MAX_RINGS; i++) { |
533 | struct amdgpu_ring *ring = adev->rings[i]; | |
c2776afe | 534 | |
d38ceaf9 AD |
535 | if (!ring || !ring->fence_drv.initialized) |
536 | continue; | |
537 | r = amdgpu_fence_wait_empty(ring); | |
538 | if (r) { | |
539 | /* no need to trigger GPU reset as we are unloading */ | |
2f9d4084 | 540 | amdgpu_fence_driver_force_completion(ring); |
d38ceaf9 | 541 | } |
c6a4079b CZ |
542 | amdgpu_irq_put(adev, ring->fence_drv.irq_src, |
543 | ring->fence_drv.irq_type); | |
1b1f42d8 | 544 | drm_sched_fini(&ring->sched); |
8c5e13ec | 545 | del_timer_sync(&ring->fence_drv.fallback_timer); |
c89377d1 | 546 | for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j) |
f54d1867 | 547 | dma_fence_put(ring->fence_drv.fences[j]); |
c89377d1 | 548 | kfree(ring->fence_drv.fences); |
54ddf3a6 | 549 | ring->fence_drv.fences = NULL; |
d38ceaf9 AD |
550 | ring->fence_drv.initialized = false; |
551 | } | |
d38ceaf9 AD |
552 | } |
553 | ||
5ceb54c6 AD |
554 | /** |
555 | * amdgpu_fence_driver_suspend - suspend the fence driver | |
556 | * for all possible rings. | |
557 | * | |
558 | * @adev: amdgpu device pointer | |
559 | * | |
560 | * Suspend the fence driver for all possible rings (all asics). | |
561 | */ | |
562 | void amdgpu_fence_driver_suspend(struct amdgpu_device *adev) | |
563 | { | |
564 | int i, r; | |
565 | ||
5ceb54c6 AD |
566 | for (i = 0; i < AMDGPU_MAX_RINGS; i++) { |
567 | struct amdgpu_ring *ring = adev->rings[i]; | |
568 | if (!ring || !ring->fence_drv.initialized) | |
569 | continue; | |
570 | ||
571 | /* wait for gpu to finish processing current batch */ | |
572 | r = amdgpu_fence_wait_empty(ring); | |
573 | if (r) { | |
574 | /* delay GPU reset to resume */ | |
2f9d4084 | 575 | amdgpu_fence_driver_force_completion(ring); |
5ceb54c6 AD |
576 | } |
577 | ||
578 | /* disable the interrupt */ | |
579 | amdgpu_irq_put(adev, ring->fence_drv.irq_src, | |
580 | ring->fence_drv.irq_type); | |
581 | } | |
5ceb54c6 AD |
582 | } |
583 | ||
584 | /** | |
585 | * amdgpu_fence_driver_resume - resume the fence driver | |
586 | * for all possible rings. | |
587 | * | |
588 | * @adev: amdgpu device pointer | |
589 | * | |
590 | * Resume the fence driver for all possible rings (all asics). | |
591 | * Not all asics have all rings, so each asic will only | |
592 | * start the fence driver on the rings it has using | |
593 | * amdgpu_fence_driver_start_ring(). | |
594 | * Returns 0 for success. | |
595 | */ | |
596 | void amdgpu_fence_driver_resume(struct amdgpu_device *adev) | |
597 | { | |
598 | int i; | |
599 | ||
5ceb54c6 AD |
600 | for (i = 0; i < AMDGPU_MAX_RINGS; i++) { |
601 | struct amdgpu_ring *ring = adev->rings[i]; | |
602 | if (!ring || !ring->fence_drv.initialized) | |
603 | continue; | |
604 | ||
605 | /* enable the interrupt */ | |
606 | amdgpu_irq_get(adev, ring->fence_drv.irq_src, | |
607 | ring->fence_drv.irq_type); | |
608 | } | |
5ceb54c6 AD |
609 | } |
610 | ||
d38ceaf9 | 611 | /** |
2f9d4084 | 612 | * amdgpu_fence_driver_force_completion - force signal latest fence of ring |
d38ceaf9 | 613 | * |
2f9d4084 | 614 | * @ring: fence of the ring to signal |
d38ceaf9 | 615 | * |
d38ceaf9 | 616 | */ |
2f9d4084 | 617 | void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring) |
d38ceaf9 | 618 | { |
2f9d4084 ML |
619 | amdgpu_fence_write(ring, ring->fence_drv.sync_seq); |
620 | amdgpu_fence_process(ring); | |
65781c78 ML |
621 | } |
622 | ||
a95e2642 CK |
623 | /* |
624 | * Common fence implementation | |
625 | */ | |
626 | ||
f54d1867 | 627 | static const char *amdgpu_fence_get_driver_name(struct dma_fence *fence) |
a95e2642 CK |
628 | { |
629 | return "amdgpu"; | |
630 | } | |
631 | ||
f54d1867 | 632 | static const char *amdgpu_fence_get_timeline_name(struct dma_fence *f) |
a95e2642 CK |
633 | { |
634 | struct amdgpu_fence *fence = to_amdgpu_fence(f); | |
635 | return (const char *)fence->ring->name; | |
636 | } | |
637 | ||
8c5e13ec AG |
638 | /** |
639 | * amdgpu_fence_enable_signaling - enable signalling on fence | |
640 | * @fence: fence | |
641 | * | |
642 | * This function is called with fence_queue lock held, and adds a callback | |
643 | * to fence_queue that checks if this fence is signaled, and if so it | |
644 | * signals the fence and removes itself. | |
645 | */ | |
646 | static bool amdgpu_fence_enable_signaling(struct dma_fence *f) | |
647 | { | |
648 | struct amdgpu_fence *fence = to_amdgpu_fence(f); | |
649 | struct amdgpu_ring *ring = fence->ring; | |
650 | ||
651 | if (!timer_pending(&ring->fence_drv.fallback_timer)) | |
652 | amdgpu_fence_schedule_fallback(ring); | |
653 | ||
654 | DMA_FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx); | |
655 | ||
656 | return true; | |
657 | } | |
658 | ||
b4413535 CK |
659 | /** |
660 | * amdgpu_fence_free - free up the fence memory | |
661 | * | |
662 | * @rcu: RCU callback head | |
663 | * | |
664 | * Free up the fence memory after the RCU grace period. | |
665 | */ | |
666 | static void amdgpu_fence_free(struct rcu_head *rcu) | |
b49c84a5 | 667 | { |
f54d1867 | 668 | struct dma_fence *f = container_of(rcu, struct dma_fence, rcu); |
b49c84a5 CZ |
669 | struct amdgpu_fence *fence = to_amdgpu_fence(f); |
670 | kmem_cache_free(amdgpu_fence_slab, fence); | |
671 | } | |
672 | ||
b4413535 CK |
673 | /** |
674 | * amdgpu_fence_release - callback that fence can be freed | |
675 | * | |
676 | * @fence: fence | |
677 | * | |
678 | * This function is called when the reference count becomes zero. | |
679 | * It just RCU schedules freeing up the fence. | |
680 | */ | |
f54d1867 | 681 | static void amdgpu_fence_release(struct dma_fence *f) |
b4413535 CK |
682 | { |
683 | call_rcu(&f->rcu, amdgpu_fence_free); | |
684 | } | |
685 | ||
f54d1867 | 686 | static const struct dma_fence_ops amdgpu_fence_ops = { |
a95e2642 CK |
687 | .get_driver_name = amdgpu_fence_get_driver_name, |
688 | .get_timeline_name = amdgpu_fence_get_timeline_name, | |
8c5e13ec | 689 | .enable_signaling = amdgpu_fence_enable_signaling, |
b49c84a5 | 690 | .release = amdgpu_fence_release, |
a95e2642 | 691 | }; |
d38ceaf9 AD |
692 | |
693 | /* | |
694 | * Fence debugfs | |
695 | */ | |
696 | #if defined(CONFIG_DEBUG_FS) | |
697 | static int amdgpu_debugfs_fence_info(struct seq_file *m, void *data) | |
698 | { | |
699 | struct drm_info_node *node = (struct drm_info_node *)m->private; | |
700 | struct drm_device *dev = node->minor->dev; | |
701 | struct amdgpu_device *adev = dev->dev_private; | |
5907a0d8 | 702 | int i; |
d38ceaf9 AD |
703 | |
704 | for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { | |
705 | struct amdgpu_ring *ring = adev->rings[i]; | |
706 | if (!ring || !ring->fence_drv.initialized) | |
707 | continue; | |
708 | ||
709 | amdgpu_fence_process(ring); | |
710 | ||
344c19f9 | 711 | seq_printf(m, "--- ring %d (%s) ---\n", i, ring->name); |
ef3e1323 | 712 | seq_printf(m, "Last signaled fence 0x%08x\n", |
742c085f | 713 | atomic_read(&ring->fence_drv.last_seq)); |
ef3e1323 | 714 | seq_printf(m, "Last emitted 0x%08x\n", |
5907a0d8 | 715 | ring->fence_drv.sync_seq); |
e71de076 | 716 | |
ef3e1323 JX |
717 | if (ring->funcs->type == AMDGPU_RING_TYPE_GFX || |
718 | ring->funcs->type == AMDGPU_RING_TYPE_SDMA) { | |
719 | seq_printf(m, "Last signaled trailing fence 0x%08x\n", | |
720 | le32_to_cpu(*ring->trail_fence_cpu_addr)); | |
721 | seq_printf(m, "Last emitted 0x%08x\n", | |
722 | ring->trail_seq); | |
723 | } | |
724 | ||
e71de076 | 725 | if (ring->funcs->type != AMDGPU_RING_TYPE_GFX) |
726 | continue; | |
727 | ||
728 | /* set in CP_VMID_PREEMPT and preemption occurred */ | |
ef3e1323 | 729 | seq_printf(m, "Last preempted 0x%08x\n", |
e71de076 | 730 | le32_to_cpu(*(ring->fence_drv.cpu_addr + 2))); |
731 | /* set in CP_VMID_RESET and reset occurred */ | |
ef3e1323 | 732 | seq_printf(m, "Last reset 0x%08x\n", |
e71de076 | 733 | le32_to_cpu(*(ring->fence_drv.cpu_addr + 4))); |
734 | /* Both preemption and reset occurred */ | |
ef3e1323 | 735 | seq_printf(m, "Last both 0x%08x\n", |
e71de076 | 736 | le32_to_cpu(*(ring->fence_drv.cpu_addr + 6))); |
d38ceaf9 AD |
737 | } |
738 | return 0; | |
739 | } | |
740 | ||
18db89b4 | 741 | /** |
5740682e | 742 | * amdgpu_debugfs_gpu_recover - manually trigger a gpu reset & recover |
18db89b4 AD |
743 | * |
744 | * Manually trigger a gpu reset at the next fence wait. | |
745 | */ | |
5740682e | 746 | static int amdgpu_debugfs_gpu_recover(struct seq_file *m, void *data) |
18db89b4 AD |
747 | { |
748 | struct drm_info_node *node = (struct drm_info_node *) m->private; | |
749 | struct drm_device *dev = node->minor->dev; | |
750 | struct amdgpu_device *adev = dev->dev_private; | |
751 | ||
5740682e | 752 | seq_printf(m, "gpu recover\n"); |
12938fad | 753 | amdgpu_device_gpu_recover(adev, NULL); |
18db89b4 AD |
754 | |
755 | return 0; | |
756 | } | |
757 | ||
06ab6832 | 758 | static const struct drm_info_list amdgpu_debugfs_fence_list[] = { |
d38ceaf9 | 759 | {"amdgpu_fence_info", &amdgpu_debugfs_fence_info, 0, NULL}, |
5740682e | 760 | {"amdgpu_gpu_recover", &amdgpu_debugfs_gpu_recover, 0, NULL} |
d38ceaf9 | 761 | }; |
4fbf87e2 ML |
762 | |
763 | static const struct drm_info_list amdgpu_debugfs_fence_list_sriov[] = { | |
764 | {"amdgpu_fence_info", &amdgpu_debugfs_fence_info, 0, NULL}, | |
765 | }; | |
d38ceaf9 AD |
766 | #endif |
767 | ||
768 | int amdgpu_debugfs_fence_init(struct amdgpu_device *adev) | |
769 | { | |
770 | #if defined(CONFIG_DEBUG_FS) | |
4fbf87e2 ML |
771 | if (amdgpu_sriov_vf(adev)) |
772 | return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_fence_list_sriov, 1); | |
18db89b4 | 773 | return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_fence_list, 2); |
d38ceaf9 AD |
774 | #else |
775 | return 0; | |
776 | #endif | |
777 | } | |
778 |