2 * Copyright 2009 Jerome Glisse.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
28 * Jerome Glisse <glisse@freedesktop.org>
31 #include <linux/seq_file.h>
32 #include <linux/atomic.h>
33 #include <linux/wait.h>
34 #include <linux/kref.h>
35 #include <linux/slab.h>
36 #include <linux/firmware.h>
39 #include "amdgpu_trace.h"
43 * Fences mark an event in the GPUs pipeline and are used
44 * for GPU/CPU synchronization. When the fence is written,
45 * it is expected that all buffers associated with that fence
46 * are no longer in use by the associated ring on the GPU and
47 * that the the relevant GPU caches have been flushed.
51 struct dma_fence base
;
54 struct amdgpu_ring
*ring
;
57 static struct kmem_cache
*amdgpu_fence_slab
;
59 int amdgpu_fence_slab_init(void)
61 amdgpu_fence_slab
= kmem_cache_create(
62 "amdgpu_fence", sizeof(struct amdgpu_fence
), 0,
63 SLAB_HWCACHE_ALIGN
, NULL
);
64 if (!amdgpu_fence_slab
)
69 void amdgpu_fence_slab_fini(void)
72 kmem_cache_destroy(amdgpu_fence_slab
);
77 static const struct dma_fence_ops amdgpu_fence_ops
;
78 static inline struct amdgpu_fence
*to_amdgpu_fence(struct dma_fence
*f
)
80 struct amdgpu_fence
*__f
= container_of(f
, struct amdgpu_fence
, base
);
82 if (__f
->base
.ops
== &amdgpu_fence_ops
)
89 * amdgpu_fence_write - write a fence value
91 * @ring: ring the fence is associated with
92 * @seq: sequence number to write
94 * Writes a fence value to memory (all asics).
96 static void amdgpu_fence_write(struct amdgpu_ring
*ring
, u32 seq
)
98 struct amdgpu_fence_driver
*drv
= &ring
->fence_drv
;
101 *drv
->cpu_addr
= cpu_to_le32(seq
);
105 * amdgpu_fence_read - read a fence value
107 * @ring: ring the fence is associated with
109 * Reads a fence value from memory (all asics).
110 * Returns the value of the fence read from memory.
112 static u32
amdgpu_fence_read(struct amdgpu_ring
*ring
)
114 struct amdgpu_fence_driver
*drv
= &ring
->fence_drv
;
118 seq
= le32_to_cpu(*drv
->cpu_addr
);
120 seq
= atomic_read(&drv
->last_seq
);
126 * amdgpu_fence_emit - emit a fence on the requested ring
128 * @ring: ring the fence is associated with
129 * @f: resulting fence object
131 * Emits a fence command on the requested ring (all asics).
132 * Returns 0 on success, -ENOMEM on failure.
134 int amdgpu_fence_emit(struct amdgpu_ring
*ring
, struct dma_fence
**f
)
136 struct amdgpu_device
*adev
= ring
->adev
;
137 struct amdgpu_fence
*fence
;
138 struct dma_fence
*old
, **ptr
;
141 fence
= kmem_cache_alloc(amdgpu_fence_slab
, GFP_KERNEL
);
145 seq
= ++ring
->fence_drv
.sync_seq
;
147 dma_fence_init(&fence
->base
, &amdgpu_fence_ops
,
148 &ring
->fence_drv
.lock
,
149 adev
->fence_context
+ ring
->idx
,
151 amdgpu_ring_emit_fence(ring
, ring
->fence_drv
.gpu_addr
,
152 seq
, AMDGPU_FENCE_FLAG_INT
);
154 ptr
= &ring
->fence_drv
.fences
[seq
& ring
->fence_drv
.num_fences_mask
];
155 /* This function can't be called concurrently anyway, otherwise
156 * emitting the fence would mess up the hardware ring buffer.
158 old
= rcu_dereference_protected(*ptr
, 1);
159 if (old
&& !dma_fence_is_signaled(old
)) {
160 DRM_INFO("rcu slot is busy\n");
161 dma_fence_wait(old
, false);
164 rcu_assign_pointer(*ptr
, dma_fence_get(&fence
->base
));
172 * amdgpu_fence_schedule_fallback - schedule fallback check
174 * @ring: pointer to struct amdgpu_ring
176 * Start a timer as fallback to our interrupts.
178 static void amdgpu_fence_schedule_fallback(struct amdgpu_ring
*ring
)
180 mod_timer(&ring
->fence_drv
.fallback_timer
,
181 jiffies
+ AMDGPU_FENCE_JIFFIES_TIMEOUT
);
185 * amdgpu_fence_process - check for fence activity
187 * @ring: pointer to struct amdgpu_ring
189 * Checks the current fence value and calculates the last
190 * signalled fence value. Wakes the fence queue if the
191 * sequence number has increased.
193 void amdgpu_fence_process(struct amdgpu_ring
*ring
)
195 struct amdgpu_fence_driver
*drv
= &ring
->fence_drv
;
196 uint32_t seq
, last_seq
;
200 last_seq
= atomic_read(&ring
->fence_drv
.last_seq
);
201 seq
= amdgpu_fence_read(ring
);
203 } while (atomic_cmpxchg(&drv
->last_seq
, last_seq
, seq
) != last_seq
);
205 if (seq
!= ring
->fence_drv
.sync_seq
)
206 amdgpu_fence_schedule_fallback(ring
);
208 if (unlikely(seq
== last_seq
))
211 last_seq
&= drv
->num_fences_mask
;
212 seq
&= drv
->num_fences_mask
;
215 struct dma_fence
*fence
, **ptr
;
218 last_seq
&= drv
->num_fences_mask
;
219 ptr
= &drv
->fences
[last_seq
];
221 /* There is always exactly one thread signaling this fence slot */
222 fence
= rcu_dereference_protected(*ptr
, 1);
223 RCU_INIT_POINTER(*ptr
, NULL
);
228 r
= dma_fence_signal(fence
);
230 DMA_FENCE_TRACE(fence
, "signaled from irq context\n");
234 dma_fence_put(fence
);
235 } while (last_seq
!= seq
);
239 * amdgpu_fence_fallback - fallback for hardware interrupts
241 * @work: delayed work item
243 * Checks for fence activity.
245 static void amdgpu_fence_fallback(unsigned long arg
)
247 struct amdgpu_ring
*ring
= (void *)arg
;
249 amdgpu_fence_process(ring
);
253 * amdgpu_fence_wait_empty - wait for all fences to signal
255 * @adev: amdgpu device pointer
256 * @ring: ring index the fence is associated with
258 * Wait for all fences on the requested ring to signal (all asics).
259 * Returns 0 if the fences have passed, error for all other cases.
261 int amdgpu_fence_wait_empty(struct amdgpu_ring
*ring
)
263 uint64_t seq
= ACCESS_ONCE(ring
->fence_drv
.sync_seq
);
264 struct dma_fence
*fence
, **ptr
;
270 ptr
= &ring
->fence_drv
.fences
[seq
& ring
->fence_drv
.num_fences_mask
];
272 fence
= rcu_dereference(*ptr
);
273 if (!fence
|| !dma_fence_get_rcu(fence
)) {
279 r
= dma_fence_wait(fence
, false);
280 dma_fence_put(fence
);
285 * amdgpu_fence_count_emitted - get the count of emitted fences
287 * @ring: ring the fence is associated with
289 * Get the number of fences emitted on the requested ring (all asics).
290 * Returns the number of emitted fences on the ring. Used by the
291 * dynpm code to ring track activity.
293 unsigned amdgpu_fence_count_emitted(struct amdgpu_ring
*ring
)
297 /* We are not protected by ring lock when reading the last sequence
298 * but it's ok to report slightly wrong fence count here.
300 amdgpu_fence_process(ring
);
301 emitted
= 0x100000000ull
;
302 emitted
-= atomic_read(&ring
->fence_drv
.last_seq
);
303 emitted
+= ACCESS_ONCE(ring
->fence_drv
.sync_seq
);
304 return lower_32_bits(emitted
);
308 * amdgpu_fence_driver_start_ring - make the fence driver
309 * ready for use on the requested ring.
311 * @ring: ring to start the fence driver on
312 * @irq_src: interrupt source to use for this ring
313 * @irq_type: interrupt type to use for this ring
315 * Make the fence driver ready for processing (all asics).
316 * Not all asics have all rings, so each asic will only
317 * start the fence driver on the rings it has.
318 * Returns 0 for success, errors for failure.
320 int amdgpu_fence_driver_start_ring(struct amdgpu_ring
*ring
,
321 struct amdgpu_irq_src
*irq_src
,
324 struct amdgpu_device
*adev
= ring
->adev
;
327 if (ring
!= &adev
->uvd
.ring
) {
328 ring
->fence_drv
.cpu_addr
= &adev
->wb
.wb
[ring
->fence_offs
];
329 ring
->fence_drv
.gpu_addr
= adev
->wb
.gpu_addr
+ (ring
->fence_offs
* 4);
331 /* put fence directly behind firmware */
332 index
= ALIGN(adev
->uvd
.fw
->size
, 8);
333 ring
->fence_drv
.cpu_addr
= adev
->uvd
.cpu_addr
+ index
;
334 ring
->fence_drv
.gpu_addr
= adev
->uvd
.gpu_addr
+ index
;
336 amdgpu_fence_write(ring
, atomic_read(&ring
->fence_drv
.last_seq
));
337 amdgpu_irq_get(adev
, irq_src
, irq_type
);
339 ring
->fence_drv
.irq_src
= irq_src
;
340 ring
->fence_drv
.irq_type
= irq_type
;
341 ring
->fence_drv
.initialized
= true;
343 dev_info(adev
->dev
, "fence driver on ring %d use gpu addr 0x%016llx, "
344 "cpu addr 0x%p\n", ring
->idx
,
345 ring
->fence_drv
.gpu_addr
, ring
->fence_drv
.cpu_addr
);
350 * amdgpu_fence_driver_init_ring - init the fence driver
351 * for the requested ring.
353 * @ring: ring to init the fence driver on
354 * @num_hw_submission: number of entries on the hardware queue
356 * Init the fence driver for the requested ring (all asics).
357 * Helper function for amdgpu_fence_driver_init().
359 int amdgpu_fence_driver_init_ring(struct amdgpu_ring
*ring
,
360 unsigned num_hw_submission
)
365 /* Check that num_hw_submission is a power of two */
366 if ((num_hw_submission
& (num_hw_submission
- 1)) != 0)
369 ring
->fence_drv
.cpu_addr
= NULL
;
370 ring
->fence_drv
.gpu_addr
= 0;
371 ring
->fence_drv
.sync_seq
= 0;
372 atomic_set(&ring
->fence_drv
.last_seq
, 0);
373 ring
->fence_drv
.initialized
= false;
375 setup_timer(&ring
->fence_drv
.fallback_timer
, amdgpu_fence_fallback
,
376 (unsigned long)ring
);
378 ring
->fence_drv
.num_fences_mask
= num_hw_submission
* 2 - 1;
379 spin_lock_init(&ring
->fence_drv
.lock
);
380 ring
->fence_drv
.fences
= kcalloc(num_hw_submission
* 2, sizeof(void *),
382 if (!ring
->fence_drv
.fences
)
385 /* No need to setup the GPU scheduler for KIQ ring */
386 if (ring
->funcs
->type
!= AMDGPU_RING_TYPE_KIQ
) {
387 timeout
= msecs_to_jiffies(amdgpu_lockup_timeout
);
391 * Delayed workqueue cannot use it directly,
392 * so the scheduler will not use delayed workqueue if
393 * MAX_SCHEDULE_TIMEOUT is set.
394 * Currently keep it simple and silly.
396 timeout
= MAX_SCHEDULE_TIMEOUT
;
398 r
= amd_sched_init(&ring
->sched
, &amdgpu_sched_ops
,
400 timeout
, ring
->name
);
402 DRM_ERROR("Failed to create scheduler on ring %s.\n",
412 * amdgpu_fence_driver_init - init the fence driver
413 * for all possible rings.
415 * @adev: amdgpu device pointer
417 * Init the fence driver for all possible rings (all asics).
418 * Not all asics have all rings, so each asic will only
419 * start the fence driver on the rings it has using
420 * amdgpu_fence_driver_start_ring().
421 * Returns 0 for success.
423 int amdgpu_fence_driver_init(struct amdgpu_device
*adev
)
425 if (amdgpu_debugfs_fence_init(adev
))
426 dev_err(adev
->dev
, "fence debugfs file creation failed\n");
432 * amdgpu_fence_driver_fini - tear down the fence driver
433 * for all possible rings.
435 * @adev: amdgpu device pointer
437 * Tear down the fence driver for all possible rings (all asics).
439 void amdgpu_fence_driver_fini(struct amdgpu_device
*adev
)
444 for (i
= 0; i
< AMDGPU_MAX_RINGS
; i
++) {
445 struct amdgpu_ring
*ring
= adev
->rings
[i
];
447 if (!ring
|| !ring
->fence_drv
.initialized
)
449 r
= amdgpu_fence_wait_empty(ring
);
451 /* no need to trigger GPU reset as we are unloading */
452 amdgpu_fence_driver_force_completion(adev
);
454 amdgpu_irq_put(adev
, ring
->fence_drv
.irq_src
,
455 ring
->fence_drv
.irq_type
);
456 amd_sched_fini(&ring
->sched
);
457 del_timer_sync(&ring
->fence_drv
.fallback_timer
);
458 for (j
= 0; j
<= ring
->fence_drv
.num_fences_mask
; ++j
)
459 dma_fence_put(ring
->fence_drv
.fences
[j
]);
460 kfree(ring
->fence_drv
.fences
);
461 ring
->fence_drv
.fences
= NULL
;
462 ring
->fence_drv
.initialized
= false;
467 * amdgpu_fence_driver_suspend - suspend the fence driver
468 * for all possible rings.
470 * @adev: amdgpu device pointer
472 * Suspend the fence driver for all possible rings (all asics).
474 void amdgpu_fence_driver_suspend(struct amdgpu_device
*adev
)
478 for (i
= 0; i
< AMDGPU_MAX_RINGS
; i
++) {
479 struct amdgpu_ring
*ring
= adev
->rings
[i
];
480 if (!ring
|| !ring
->fence_drv
.initialized
)
483 /* wait for gpu to finish processing current batch */
484 r
= amdgpu_fence_wait_empty(ring
);
486 /* delay GPU reset to resume */
487 amdgpu_fence_driver_force_completion(adev
);
490 /* disable the interrupt */
491 amdgpu_irq_put(adev
, ring
->fence_drv
.irq_src
,
492 ring
->fence_drv
.irq_type
);
497 * amdgpu_fence_driver_resume - resume the fence driver
498 * for all possible rings.
500 * @adev: amdgpu device pointer
502 * Resume the fence driver for all possible rings (all asics).
503 * Not all asics have all rings, so each asic will only
504 * start the fence driver on the rings it has using
505 * amdgpu_fence_driver_start_ring().
506 * Returns 0 for success.
508 void amdgpu_fence_driver_resume(struct amdgpu_device
*adev
)
512 for (i
= 0; i
< AMDGPU_MAX_RINGS
; i
++) {
513 struct amdgpu_ring
*ring
= adev
->rings
[i
];
514 if (!ring
|| !ring
->fence_drv
.initialized
)
517 /* enable the interrupt */
518 amdgpu_irq_get(adev
, ring
->fence_drv
.irq_src
,
519 ring
->fence_drv
.irq_type
);
524 * amdgpu_fence_driver_force_completion - force all fence waiter to complete
526 * @adev: amdgpu device pointer
528 * In case of GPU reset failure make sure no process keep waiting on fence
529 * that will never complete.
531 void amdgpu_fence_driver_force_completion(struct amdgpu_device
*adev
)
535 for (i
= 0; i
< AMDGPU_MAX_RINGS
; i
++) {
536 struct amdgpu_ring
*ring
= adev
->rings
[i
];
537 if (!ring
|| !ring
->fence_drv
.initialized
)
540 amdgpu_fence_write(ring
, ring
->fence_drv
.sync_seq
);
544 void amdgpu_fence_driver_force_completion_ring(struct amdgpu_ring
*ring
)
547 amdgpu_fence_write(ring
, ring
->fence_drv
.sync_seq
);
551 * Common fence implementation
554 static const char *amdgpu_fence_get_driver_name(struct dma_fence
*fence
)
559 static const char *amdgpu_fence_get_timeline_name(struct dma_fence
*f
)
561 struct amdgpu_fence
*fence
= to_amdgpu_fence(f
);
562 return (const char *)fence
->ring
->name
;
566 * amdgpu_fence_enable_signaling - enable signalling on fence
569 * This function is called with fence_queue lock held, and adds a callback
570 * to fence_queue that checks if this fence is signaled, and if so it
571 * signals the fence and removes itself.
573 static bool amdgpu_fence_enable_signaling(struct dma_fence
*f
)
575 struct amdgpu_fence
*fence
= to_amdgpu_fence(f
);
576 struct amdgpu_ring
*ring
= fence
->ring
;
578 if (!timer_pending(&ring
->fence_drv
.fallback_timer
))
579 amdgpu_fence_schedule_fallback(ring
);
581 DMA_FENCE_TRACE(&fence
->base
, "armed on ring %i!\n", ring
->idx
);
587 * amdgpu_fence_free - free up the fence memory
589 * @rcu: RCU callback head
591 * Free up the fence memory after the RCU grace period.
593 static void amdgpu_fence_free(struct rcu_head
*rcu
)
595 struct dma_fence
*f
= container_of(rcu
, struct dma_fence
, rcu
);
596 struct amdgpu_fence
*fence
= to_amdgpu_fence(f
);
597 kmem_cache_free(amdgpu_fence_slab
, fence
);
601 * amdgpu_fence_release - callback that fence can be freed
605 * This function is called when the reference count becomes zero.
606 * It just RCU schedules freeing up the fence.
608 static void amdgpu_fence_release(struct dma_fence
*f
)
610 call_rcu(&f
->rcu
, amdgpu_fence_free
);
613 static const struct dma_fence_ops amdgpu_fence_ops
= {
614 .get_driver_name
= amdgpu_fence_get_driver_name
,
615 .get_timeline_name
= amdgpu_fence_get_timeline_name
,
616 .enable_signaling
= amdgpu_fence_enable_signaling
,
617 .wait
= dma_fence_default_wait
,
618 .release
= amdgpu_fence_release
,
624 #if defined(CONFIG_DEBUG_FS)
625 static int amdgpu_debugfs_fence_info(struct seq_file
*m
, void *data
)
627 struct drm_info_node
*node
= (struct drm_info_node
*)m
->private;
628 struct drm_device
*dev
= node
->minor
->dev
;
629 struct amdgpu_device
*adev
= dev
->dev_private
;
632 for (i
= 0; i
< AMDGPU_MAX_RINGS
; ++i
) {
633 struct amdgpu_ring
*ring
= adev
->rings
[i
];
634 if (!ring
|| !ring
->fence_drv
.initialized
)
637 amdgpu_fence_process(ring
);
639 seq_printf(m
, "--- ring %d (%s) ---\n", i
, ring
->name
);
640 seq_printf(m
, "Last signaled fence 0x%08x\n",
641 atomic_read(&ring
->fence_drv
.last_seq
));
642 seq_printf(m
, "Last emitted 0x%08x\n",
643 ring
->fence_drv
.sync_seq
);
649 * amdgpu_debugfs_gpu_reset - manually trigger a gpu reset
651 * Manually trigger a gpu reset at the next fence wait.
653 static int amdgpu_debugfs_gpu_reset(struct seq_file
*m
, void *data
)
655 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
656 struct drm_device
*dev
= node
->minor
->dev
;
657 struct amdgpu_device
*adev
= dev
->dev_private
;
659 seq_printf(m
, "gpu reset\n");
660 amdgpu_gpu_reset(adev
);
665 static const struct drm_info_list amdgpu_debugfs_fence_list
[] = {
666 {"amdgpu_fence_info", &amdgpu_debugfs_fence_info
, 0, NULL
},
667 {"amdgpu_gpu_reset", &amdgpu_debugfs_gpu_reset
, 0, NULL
}
670 static const struct drm_info_list amdgpu_debugfs_fence_list_sriov
[] = {
671 {"amdgpu_fence_info", &amdgpu_debugfs_fence_info
, 0, NULL
},
675 int amdgpu_debugfs_fence_init(struct amdgpu_device
*adev
)
677 #if defined(CONFIG_DEBUG_FS)
678 if (amdgpu_sriov_vf(adev
))
679 return amdgpu_debugfs_add_files(adev
, amdgpu_debugfs_fence_list_sriov
, 1);
680 return amdgpu_debugfs_add_files(adev
, amdgpu_debugfs_fence_list
, 2);