2 * Copyright 2009 Jerome Glisse.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
28 * Jerome Glisse <glisse@freedesktop.org>
31 #include <linux/seq_file.h>
32 #include <linux/atomic.h>
33 #include <linux/wait.h>
34 #include <linux/kref.h>
35 #include <linux/slab.h>
36 #include <linux/firmware.h>
39 #include "amdgpu_trace.h"
43 * Fences mark an event in the GPUs pipeline and are used
44 * for GPU/CPU synchronization. When the fence is written,
45 * it is expected that all buffers associated with that fence
46 * are no longer in use by the associated ring on the GPU and
47 * that the the relevant GPU caches have been flushed.
51 struct dma_fence base
;
54 struct amdgpu_ring
*ring
;
57 static struct kmem_cache
*amdgpu_fence_slab
;
59 int amdgpu_fence_slab_init(void)
61 amdgpu_fence_slab
= kmem_cache_create(
62 "amdgpu_fence", sizeof(struct amdgpu_fence
), 0,
63 SLAB_HWCACHE_ALIGN
, NULL
);
64 if (!amdgpu_fence_slab
)
69 void amdgpu_fence_slab_fini(void)
72 kmem_cache_destroy(amdgpu_fence_slab
);
77 static const struct dma_fence_ops amdgpu_fence_ops
;
78 static inline struct amdgpu_fence
*to_amdgpu_fence(struct dma_fence
*f
)
80 struct amdgpu_fence
*__f
= container_of(f
, struct amdgpu_fence
, base
);
82 if (__f
->base
.ops
== &amdgpu_fence_ops
)
89 * amdgpu_fence_write - write a fence value
91 * @ring: ring the fence is associated with
92 * @seq: sequence number to write
94 * Writes a fence value to memory (all asics).
96 static void amdgpu_fence_write(struct amdgpu_ring
*ring
, u32 seq
)
98 struct amdgpu_fence_driver
*drv
= &ring
->fence_drv
;
101 *drv
->cpu_addr
= cpu_to_le32(seq
);
105 * amdgpu_fence_read - read a fence value
107 * @ring: ring the fence is associated with
109 * Reads a fence value from memory (all asics).
110 * Returns the value of the fence read from memory.
112 static u32
amdgpu_fence_read(struct amdgpu_ring
*ring
)
114 struct amdgpu_fence_driver
*drv
= &ring
->fence_drv
;
118 seq
= le32_to_cpu(*drv
->cpu_addr
);
120 seq
= atomic_read(&drv
->last_seq
);
126 * amdgpu_fence_emit - emit a fence on the requested ring
128 * @ring: ring the fence is associated with
129 * @f: resulting fence object
131 * Emits a fence command on the requested ring (all asics).
132 * Returns 0 on success, -ENOMEM on failure.
134 int amdgpu_fence_emit(struct amdgpu_ring
*ring
, struct dma_fence
**f
)
136 struct amdgpu_device
*adev
= ring
->adev
;
137 struct amdgpu_fence
*fence
;
138 struct dma_fence
*old
, **ptr
;
141 fence
= kmem_cache_alloc(amdgpu_fence_slab
, GFP_KERNEL
);
145 seq
= ++ring
->fence_drv
.sync_seq
;
147 dma_fence_init(&fence
->base
, &amdgpu_fence_ops
,
148 &ring
->fence_drv
.lock
,
149 adev
->fence_context
+ ring
->idx
,
151 amdgpu_ring_emit_fence(ring
, ring
->fence_drv
.gpu_addr
,
152 seq
, AMDGPU_FENCE_FLAG_INT
);
154 ptr
= &ring
->fence_drv
.fences
[seq
& ring
->fence_drv
.num_fences_mask
];
155 /* This function can't be called concurrently anyway, otherwise
156 * emitting the fence would mess up the hardware ring buffer.
158 old
= rcu_dereference_protected(*ptr
, 1);
159 if (old
&& !dma_fence_is_signaled(old
)) {
160 DRM_INFO("rcu slot is busy\n");
161 dma_fence_wait(old
, false);
164 rcu_assign_pointer(*ptr
, dma_fence_get(&fence
->base
));
172 * amdgpu_fence_emit_polling - emit a fence on the requeste ring
174 * @ring: ring the fence is associated with
175 * @s: resulting sequence number
177 * Emits a fence command on the requested ring (all asics).
178 * Used For polling fence.
179 * Returns 0 on success, -ENOMEM on failure.
181 int amdgpu_fence_emit_polling(struct amdgpu_ring
*ring
, uint32_t *s
)
188 seq
= ++ring
->fence_drv
.sync_seq
;
189 amdgpu_ring_emit_fence(ring
, ring
->fence_drv
.gpu_addr
,
190 seq
, AMDGPU_FENCE_FLAG_INT
);
198 * amdgpu_fence_schedule_fallback - schedule fallback check
200 * @ring: pointer to struct amdgpu_ring
202 * Start a timer as fallback to our interrupts.
204 static void amdgpu_fence_schedule_fallback(struct amdgpu_ring
*ring
)
206 mod_timer(&ring
->fence_drv
.fallback_timer
,
207 jiffies
+ AMDGPU_FENCE_JIFFIES_TIMEOUT
);
211 * amdgpu_fence_process - check for fence activity
213 * @ring: pointer to struct amdgpu_ring
215 * Checks the current fence value and calculates the last
216 * signalled fence value. Wakes the fence queue if the
217 * sequence number has increased.
219 void amdgpu_fence_process(struct amdgpu_ring
*ring
)
221 struct amdgpu_fence_driver
*drv
= &ring
->fence_drv
;
222 uint32_t seq
, last_seq
;
226 last_seq
= atomic_read(&ring
->fence_drv
.last_seq
);
227 seq
= amdgpu_fence_read(ring
);
229 } while (atomic_cmpxchg(&drv
->last_seq
, last_seq
, seq
) != last_seq
);
231 if (seq
!= ring
->fence_drv
.sync_seq
)
232 amdgpu_fence_schedule_fallback(ring
);
234 if (unlikely(seq
== last_seq
))
237 last_seq
&= drv
->num_fences_mask
;
238 seq
&= drv
->num_fences_mask
;
241 struct dma_fence
*fence
, **ptr
;
244 last_seq
&= drv
->num_fences_mask
;
245 ptr
= &drv
->fences
[last_seq
];
247 /* There is always exactly one thread signaling this fence slot */
248 fence
= rcu_dereference_protected(*ptr
, 1);
249 RCU_INIT_POINTER(*ptr
, NULL
);
254 r
= dma_fence_signal(fence
);
256 DMA_FENCE_TRACE(fence
, "signaled from irq context\n");
260 dma_fence_put(fence
);
261 } while (last_seq
!= seq
);
265 * amdgpu_fence_fallback - fallback for hardware interrupts
267 * @work: delayed work item
269 * Checks for fence activity.
271 static void amdgpu_fence_fallback(unsigned long arg
)
273 struct amdgpu_ring
*ring
= (void *)arg
;
275 amdgpu_fence_process(ring
);
279 * amdgpu_fence_wait_empty - wait for all fences to signal
281 * @adev: amdgpu device pointer
282 * @ring: ring index the fence is associated with
284 * Wait for all fences on the requested ring to signal (all asics).
285 * Returns 0 if the fences have passed, error for all other cases.
287 int amdgpu_fence_wait_empty(struct amdgpu_ring
*ring
)
289 uint64_t seq
= ACCESS_ONCE(ring
->fence_drv
.sync_seq
);
290 struct dma_fence
*fence
, **ptr
;
296 ptr
= &ring
->fence_drv
.fences
[seq
& ring
->fence_drv
.num_fences_mask
];
298 fence
= rcu_dereference(*ptr
);
299 if (!fence
|| !dma_fence_get_rcu(fence
)) {
305 r
= dma_fence_wait(fence
, false);
306 dma_fence_put(fence
);
311 * amdgpu_fence_wait_polling - busy wait for givn sequence number
313 * @ring: ring index the fence is associated with
314 * @wait_seq: sequence number to wait
315 * @timeout: the timeout for waiting in usecs
317 * Wait for all fences on the requested ring to signal (all asics).
318 * Returns left time if no timeout, 0 or minus if timeout.
320 signed long amdgpu_fence_wait_polling(struct amdgpu_ring
*ring
,
327 seq
= amdgpu_fence_read(ring
);
330 } while ((int32_t)(wait_seq
- seq
) > 0 && timeout
> 0);
332 return timeout
> 0 ? timeout
: 0;
335 * amdgpu_fence_count_emitted - get the count of emitted fences
337 * @ring: ring the fence is associated with
339 * Get the number of fences emitted on the requested ring (all asics).
340 * Returns the number of emitted fences on the ring. Used by the
341 * dynpm code to ring track activity.
343 unsigned amdgpu_fence_count_emitted(struct amdgpu_ring
*ring
)
347 /* We are not protected by ring lock when reading the last sequence
348 * but it's ok to report slightly wrong fence count here.
350 amdgpu_fence_process(ring
);
351 emitted
= 0x100000000ull
;
352 emitted
-= atomic_read(&ring
->fence_drv
.last_seq
);
353 emitted
+= ACCESS_ONCE(ring
->fence_drv
.sync_seq
);
354 return lower_32_bits(emitted
);
358 * amdgpu_fence_driver_start_ring - make the fence driver
359 * ready for use on the requested ring.
361 * @ring: ring to start the fence driver on
362 * @irq_src: interrupt source to use for this ring
363 * @irq_type: interrupt type to use for this ring
365 * Make the fence driver ready for processing (all asics).
366 * Not all asics have all rings, so each asic will only
367 * start the fence driver on the rings it has.
368 * Returns 0 for success, errors for failure.
370 int amdgpu_fence_driver_start_ring(struct amdgpu_ring
*ring
,
371 struct amdgpu_irq_src
*irq_src
,
374 struct amdgpu_device
*adev
= ring
->adev
;
377 if (ring
!= &adev
->uvd
.ring
) {
378 ring
->fence_drv
.cpu_addr
= &adev
->wb
.wb
[ring
->fence_offs
];
379 ring
->fence_drv
.gpu_addr
= adev
->wb
.gpu_addr
+ (ring
->fence_offs
* 4);
381 /* put fence directly behind firmware */
382 index
= ALIGN(adev
->uvd
.fw
->size
, 8);
383 ring
->fence_drv
.cpu_addr
= adev
->uvd
.cpu_addr
+ index
;
384 ring
->fence_drv
.gpu_addr
= adev
->uvd
.gpu_addr
+ index
;
386 amdgpu_fence_write(ring
, atomic_read(&ring
->fence_drv
.last_seq
));
387 amdgpu_irq_get(adev
, irq_src
, irq_type
);
389 ring
->fence_drv
.irq_src
= irq_src
;
390 ring
->fence_drv
.irq_type
= irq_type
;
391 ring
->fence_drv
.initialized
= true;
393 dev_info(adev
->dev
, "fence driver on ring %d use gpu addr 0x%016llx, "
394 "cpu addr 0x%p\n", ring
->idx
,
395 ring
->fence_drv
.gpu_addr
, ring
->fence_drv
.cpu_addr
);
400 * amdgpu_fence_driver_init_ring - init the fence driver
401 * for the requested ring.
403 * @ring: ring to init the fence driver on
404 * @num_hw_submission: number of entries on the hardware queue
406 * Init the fence driver for the requested ring (all asics).
407 * Helper function for amdgpu_fence_driver_init().
409 int amdgpu_fence_driver_init_ring(struct amdgpu_ring
*ring
,
410 unsigned num_hw_submission
)
415 /* Check that num_hw_submission is a power of two */
416 if ((num_hw_submission
& (num_hw_submission
- 1)) != 0)
419 ring
->fence_drv
.cpu_addr
= NULL
;
420 ring
->fence_drv
.gpu_addr
= 0;
421 ring
->fence_drv
.sync_seq
= 0;
422 atomic_set(&ring
->fence_drv
.last_seq
, 0);
423 ring
->fence_drv
.initialized
= false;
425 setup_timer(&ring
->fence_drv
.fallback_timer
, amdgpu_fence_fallback
,
426 (unsigned long)ring
);
428 ring
->fence_drv
.num_fences_mask
= num_hw_submission
* 2 - 1;
429 spin_lock_init(&ring
->fence_drv
.lock
);
430 ring
->fence_drv
.fences
= kcalloc(num_hw_submission
* 2, sizeof(void *),
432 if (!ring
->fence_drv
.fences
)
435 /* No need to setup the GPU scheduler for KIQ ring */
436 if (ring
->funcs
->type
!= AMDGPU_RING_TYPE_KIQ
) {
437 timeout
= msecs_to_jiffies(amdgpu_lockup_timeout
);
441 * Delayed workqueue cannot use it directly,
442 * so the scheduler will not use delayed workqueue if
443 * MAX_SCHEDULE_TIMEOUT is set.
444 * Currently keep it simple and silly.
446 timeout
= MAX_SCHEDULE_TIMEOUT
;
448 r
= amd_sched_init(&ring
->sched
, &amdgpu_sched_ops
,
450 timeout
, ring
->name
);
452 DRM_ERROR("Failed to create scheduler on ring %s.\n",
462 * amdgpu_fence_driver_init - init the fence driver
463 * for all possible rings.
465 * @adev: amdgpu device pointer
467 * Init the fence driver for all possible rings (all asics).
468 * Not all asics have all rings, so each asic will only
469 * start the fence driver on the rings it has using
470 * amdgpu_fence_driver_start_ring().
471 * Returns 0 for success.
473 int amdgpu_fence_driver_init(struct amdgpu_device
*adev
)
475 if (amdgpu_debugfs_fence_init(adev
))
476 dev_err(adev
->dev
, "fence debugfs file creation failed\n");
482 * amdgpu_fence_driver_fini - tear down the fence driver
483 * for all possible rings.
485 * @adev: amdgpu device pointer
487 * Tear down the fence driver for all possible rings (all asics).
489 void amdgpu_fence_driver_fini(struct amdgpu_device
*adev
)
494 for (i
= 0; i
< AMDGPU_MAX_RINGS
; i
++) {
495 struct amdgpu_ring
*ring
= adev
->rings
[i
];
497 if (!ring
|| !ring
->fence_drv
.initialized
)
499 r
= amdgpu_fence_wait_empty(ring
);
501 /* no need to trigger GPU reset as we are unloading */
502 amdgpu_fence_driver_force_completion(adev
);
504 amdgpu_irq_put(adev
, ring
->fence_drv
.irq_src
,
505 ring
->fence_drv
.irq_type
);
506 amd_sched_fini(&ring
->sched
);
507 del_timer_sync(&ring
->fence_drv
.fallback_timer
);
508 for (j
= 0; j
<= ring
->fence_drv
.num_fences_mask
; ++j
)
509 dma_fence_put(ring
->fence_drv
.fences
[j
]);
510 kfree(ring
->fence_drv
.fences
);
511 ring
->fence_drv
.fences
= NULL
;
512 ring
->fence_drv
.initialized
= false;
517 * amdgpu_fence_driver_suspend - suspend the fence driver
518 * for all possible rings.
520 * @adev: amdgpu device pointer
522 * Suspend the fence driver for all possible rings (all asics).
524 void amdgpu_fence_driver_suspend(struct amdgpu_device
*adev
)
528 for (i
= 0; i
< AMDGPU_MAX_RINGS
; i
++) {
529 struct amdgpu_ring
*ring
= adev
->rings
[i
];
530 if (!ring
|| !ring
->fence_drv
.initialized
)
533 /* wait for gpu to finish processing current batch */
534 r
= amdgpu_fence_wait_empty(ring
);
536 /* delay GPU reset to resume */
537 amdgpu_fence_driver_force_completion(adev
);
540 /* disable the interrupt */
541 amdgpu_irq_put(adev
, ring
->fence_drv
.irq_src
,
542 ring
->fence_drv
.irq_type
);
547 * amdgpu_fence_driver_resume - resume the fence driver
548 * for all possible rings.
550 * @adev: amdgpu device pointer
552 * Resume the fence driver for all possible rings (all asics).
553 * Not all asics have all rings, so each asic will only
554 * start the fence driver on the rings it has using
555 * amdgpu_fence_driver_start_ring().
556 * Returns 0 for success.
558 void amdgpu_fence_driver_resume(struct amdgpu_device
*adev
)
562 for (i
= 0; i
< AMDGPU_MAX_RINGS
; i
++) {
563 struct amdgpu_ring
*ring
= adev
->rings
[i
];
564 if (!ring
|| !ring
->fence_drv
.initialized
)
567 /* enable the interrupt */
568 amdgpu_irq_get(adev
, ring
->fence_drv
.irq_src
,
569 ring
->fence_drv
.irq_type
);
574 * amdgpu_fence_driver_force_completion - force all fence waiter to complete
576 * @adev: amdgpu device pointer
578 * In case of GPU reset failure make sure no process keep waiting on fence
579 * that will never complete.
581 void amdgpu_fence_driver_force_completion(struct amdgpu_device
*adev
)
585 for (i
= 0; i
< AMDGPU_MAX_RINGS
; i
++) {
586 struct amdgpu_ring
*ring
= adev
->rings
[i
];
587 if (!ring
|| !ring
->fence_drv
.initialized
)
590 amdgpu_fence_write(ring
, ring
->fence_drv
.sync_seq
);
594 void amdgpu_fence_driver_force_completion_ring(struct amdgpu_ring
*ring
)
597 amdgpu_fence_write(ring
, ring
->fence_drv
.sync_seq
);
601 * Common fence implementation
604 static const char *amdgpu_fence_get_driver_name(struct dma_fence
*fence
)
609 static const char *amdgpu_fence_get_timeline_name(struct dma_fence
*f
)
611 struct amdgpu_fence
*fence
= to_amdgpu_fence(f
);
612 return (const char *)fence
->ring
->name
;
616 * amdgpu_fence_enable_signaling - enable signalling on fence
619 * This function is called with fence_queue lock held, and adds a callback
620 * to fence_queue that checks if this fence is signaled, and if so it
621 * signals the fence and removes itself.
623 static bool amdgpu_fence_enable_signaling(struct dma_fence
*f
)
625 struct amdgpu_fence
*fence
= to_amdgpu_fence(f
);
626 struct amdgpu_ring
*ring
= fence
->ring
;
628 if (!timer_pending(&ring
->fence_drv
.fallback_timer
))
629 amdgpu_fence_schedule_fallback(ring
);
631 DMA_FENCE_TRACE(&fence
->base
, "armed on ring %i!\n", ring
->idx
);
637 * amdgpu_fence_free - free up the fence memory
639 * @rcu: RCU callback head
641 * Free up the fence memory after the RCU grace period.
643 static void amdgpu_fence_free(struct rcu_head
*rcu
)
645 struct dma_fence
*f
= container_of(rcu
, struct dma_fence
, rcu
);
646 struct amdgpu_fence
*fence
= to_amdgpu_fence(f
);
647 kmem_cache_free(amdgpu_fence_slab
, fence
);
651 * amdgpu_fence_release - callback that fence can be freed
655 * This function is called when the reference count becomes zero.
656 * It just RCU schedules freeing up the fence.
658 static void amdgpu_fence_release(struct dma_fence
*f
)
660 call_rcu(&f
->rcu
, amdgpu_fence_free
);
663 static const struct dma_fence_ops amdgpu_fence_ops
= {
664 .get_driver_name
= amdgpu_fence_get_driver_name
,
665 .get_timeline_name
= amdgpu_fence_get_timeline_name
,
666 .enable_signaling
= amdgpu_fence_enable_signaling
,
667 .wait
= dma_fence_default_wait
,
668 .release
= amdgpu_fence_release
,
674 #if defined(CONFIG_DEBUG_FS)
675 static int amdgpu_debugfs_fence_info(struct seq_file
*m
, void *data
)
677 struct drm_info_node
*node
= (struct drm_info_node
*)m
->private;
678 struct drm_device
*dev
= node
->minor
->dev
;
679 struct amdgpu_device
*adev
= dev
->dev_private
;
682 for (i
= 0; i
< AMDGPU_MAX_RINGS
; ++i
) {
683 struct amdgpu_ring
*ring
= adev
->rings
[i
];
684 if (!ring
|| !ring
->fence_drv
.initialized
)
687 amdgpu_fence_process(ring
);
689 seq_printf(m
, "--- ring %d (%s) ---\n", i
, ring
->name
);
690 seq_printf(m
, "Last signaled fence 0x%08x\n",
691 atomic_read(&ring
->fence_drv
.last_seq
));
692 seq_printf(m
, "Last emitted 0x%08x\n",
693 ring
->fence_drv
.sync_seq
);
695 if (ring
->funcs
->type
!= AMDGPU_RING_TYPE_GFX
)
698 /* set in CP_VMID_PREEMPT and preemption occurred */
699 seq_printf(m
, "Last preempted 0x%08x\n",
700 le32_to_cpu(*(ring
->fence_drv
.cpu_addr
+ 2)));
701 /* set in CP_VMID_RESET and reset occurred */
702 seq_printf(m
, "Last reset 0x%08x\n",
703 le32_to_cpu(*(ring
->fence_drv
.cpu_addr
+ 4)));
704 /* Both preemption and reset occurred */
705 seq_printf(m
, "Last both 0x%08x\n",
706 le32_to_cpu(*(ring
->fence_drv
.cpu_addr
+ 6)));
712 * amdgpu_debugfs_gpu_reset - manually trigger a gpu reset
714 * Manually trigger a gpu reset at the next fence wait.
716 static int amdgpu_debugfs_gpu_reset(struct seq_file
*m
, void *data
)
718 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
719 struct drm_device
*dev
= node
->minor
->dev
;
720 struct amdgpu_device
*adev
= dev
->dev_private
;
722 seq_printf(m
, "gpu reset\n");
723 amdgpu_gpu_reset(adev
);
728 static const struct drm_info_list amdgpu_debugfs_fence_list
[] = {
729 {"amdgpu_fence_info", &amdgpu_debugfs_fence_info
, 0, NULL
},
730 {"amdgpu_gpu_reset", &amdgpu_debugfs_gpu_reset
, 0, NULL
}
733 static const struct drm_info_list amdgpu_debugfs_fence_list_sriov
[] = {
734 {"amdgpu_fence_info", &amdgpu_debugfs_fence_info
, 0, NULL
},
738 int amdgpu_debugfs_fence_init(struct amdgpu_device
*adev
)
740 #if defined(CONFIG_DEBUG_FS)
741 if (amdgpu_sriov_vf(adev
))
742 return amdgpu_debugfs_add_files(adev
, amdgpu_debugfs_fence_list_sriov
, 1);
743 return amdgpu_debugfs_add_files(adev
, amdgpu_debugfs_fence_list
, 2);