2 * Copyright 2009 Jerome Glisse.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
28 * Jerome Glisse <glisse@freedesktop.org>
31 #include <linux/seq_file.h>
32 #include <linux/atomic.h>
33 #include <linux/wait.h>
34 #include <linux/kref.h>
35 #include <linux/slab.h>
36 #include <linux/firmware.h>
37 #include <linux/pm_runtime.h>
39 #include <drm/drm_debugfs.h>
42 #include "amdgpu_trace.h"
46 * Fences mark an event in the GPUs pipeline and are used
47 * for GPU/CPU synchronization. When the fence is written,
48 * it is expected that all buffers associated with that fence
49 * are no longer in use by the associated ring on the GPU and
50 * that the the relevant GPU caches have been flushed.
54 struct dma_fence base
;
57 struct amdgpu_ring
*ring
;
60 static struct kmem_cache
*amdgpu_fence_slab
;
62 int amdgpu_fence_slab_init(void)
64 amdgpu_fence_slab
= kmem_cache_create(
65 "amdgpu_fence", sizeof(struct amdgpu_fence
), 0,
66 SLAB_HWCACHE_ALIGN
, NULL
);
67 if (!amdgpu_fence_slab
)
72 void amdgpu_fence_slab_fini(void)
75 kmem_cache_destroy(amdgpu_fence_slab
);
80 static const struct dma_fence_ops amdgpu_fence_ops
;
81 static inline struct amdgpu_fence
*to_amdgpu_fence(struct dma_fence
*f
)
83 struct amdgpu_fence
*__f
= container_of(f
, struct amdgpu_fence
, base
);
85 if (__f
->base
.ops
== &amdgpu_fence_ops
)
92 * amdgpu_fence_write - write a fence value
94 * @ring: ring the fence is associated with
95 * @seq: sequence number to write
97 * Writes a fence value to memory (all asics).
99 static void amdgpu_fence_write(struct amdgpu_ring
*ring
, u32 seq
)
101 struct amdgpu_fence_driver
*drv
= &ring
->fence_drv
;
104 *drv
->cpu_addr
= cpu_to_le32(seq
);
108 * amdgpu_fence_read - read a fence value
110 * @ring: ring the fence is associated with
112 * Reads a fence value from memory (all asics).
113 * Returns the value of the fence read from memory.
115 static u32
amdgpu_fence_read(struct amdgpu_ring
*ring
)
117 struct amdgpu_fence_driver
*drv
= &ring
->fence_drv
;
121 seq
= le32_to_cpu(*drv
->cpu_addr
);
123 seq
= atomic_read(&drv
->last_seq
);
129 * amdgpu_fence_emit - emit a fence on the requested ring
131 * @ring: ring the fence is associated with
132 * @f: resulting fence object
134 * Emits a fence command on the requested ring (all asics).
135 * Returns 0 on success, -ENOMEM on failure.
137 int amdgpu_fence_emit(struct amdgpu_ring
*ring
, struct dma_fence
**f
,
140 struct amdgpu_device
*adev
= ring
->adev
;
141 struct amdgpu_fence
*fence
;
142 struct dma_fence __rcu
**ptr
;
146 fence
= kmem_cache_alloc(amdgpu_fence_slab
, GFP_KERNEL
);
150 seq
= ++ring
->fence_drv
.sync_seq
;
152 dma_fence_init(&fence
->base
, &amdgpu_fence_ops
,
153 &ring
->fence_drv
.lock
,
154 adev
->fence_context
+ ring
->idx
,
156 amdgpu_ring_emit_fence(ring
, ring
->fence_drv
.gpu_addr
,
157 seq
, flags
| AMDGPU_FENCE_FLAG_INT
);
158 pm_runtime_get_noresume(adev
->ddev
->dev
);
159 ptr
= &ring
->fence_drv
.fences
[seq
& ring
->fence_drv
.num_fences_mask
];
160 if (unlikely(rcu_dereference_protected(*ptr
, 1))) {
161 struct dma_fence
*old
;
164 old
= dma_fence_get_rcu_safe(ptr
);
168 r
= dma_fence_wait(old
, false);
175 /* This function can't be called concurrently anyway, otherwise
176 * emitting the fence would mess up the hardware ring buffer.
178 rcu_assign_pointer(*ptr
, dma_fence_get(&fence
->base
));
186 * amdgpu_fence_emit_polling - emit a fence on the requeste ring
188 * @ring: ring the fence is associated with
189 * @s: resulting sequence number
191 * Emits a fence command on the requested ring (all asics).
192 * Used For polling fence.
193 * Returns 0 on success, -ENOMEM on failure.
195 int amdgpu_fence_emit_polling(struct amdgpu_ring
*ring
, uint32_t *s
,
204 seq
= ++ring
->fence_drv
.sync_seq
;
205 r
= amdgpu_fence_wait_polling(ring
,
206 seq
- ring
->fence_drv
.num_fences_mask
,
211 amdgpu_ring_emit_fence(ring
, ring
->fence_drv
.gpu_addr
,
220 * amdgpu_fence_schedule_fallback - schedule fallback check
222 * @ring: pointer to struct amdgpu_ring
224 * Start a timer as fallback to our interrupts.
226 static void amdgpu_fence_schedule_fallback(struct amdgpu_ring
*ring
)
228 mod_timer(&ring
->fence_drv
.fallback_timer
,
229 jiffies
+ AMDGPU_FENCE_JIFFIES_TIMEOUT
);
233 * amdgpu_fence_process - check for fence activity
235 * @ring: pointer to struct amdgpu_ring
237 * Checks the current fence value and calculates the last
238 * signalled fence value. Wakes the fence queue if the
239 * sequence number has increased.
241 * Returns true if fence was processed
243 bool amdgpu_fence_process(struct amdgpu_ring
*ring
)
245 struct amdgpu_fence_driver
*drv
= &ring
->fence_drv
;
246 struct amdgpu_device
*adev
= ring
->adev
;
247 uint32_t seq
, last_seq
;
251 last_seq
= atomic_read(&ring
->fence_drv
.last_seq
);
252 seq
= amdgpu_fence_read(ring
);
254 } while (atomic_cmpxchg(&drv
->last_seq
, last_seq
, seq
) != last_seq
);
256 if (del_timer(&ring
->fence_drv
.fallback_timer
) &&
257 seq
!= ring
->fence_drv
.sync_seq
)
258 amdgpu_fence_schedule_fallback(ring
);
260 if (unlikely(seq
== last_seq
))
263 last_seq
&= drv
->num_fences_mask
;
264 seq
&= drv
->num_fences_mask
;
267 struct dma_fence
*fence
, **ptr
;
270 last_seq
&= drv
->num_fences_mask
;
271 ptr
= &drv
->fences
[last_seq
];
273 /* There is always exactly one thread signaling this fence slot */
274 fence
= rcu_dereference_protected(*ptr
, 1);
275 RCU_INIT_POINTER(*ptr
, NULL
);
280 r
= dma_fence_signal(fence
);
282 DMA_FENCE_TRACE(fence
, "signaled from irq context\n");
286 dma_fence_put(fence
);
287 pm_runtime_mark_last_busy(adev
->ddev
->dev
);
288 pm_runtime_put_autosuspend(adev
->ddev
->dev
);
289 } while (last_seq
!= seq
);
295 * amdgpu_fence_fallback - fallback for hardware interrupts
297 * @work: delayed work item
299 * Checks for fence activity.
301 static void amdgpu_fence_fallback(struct timer_list
*t
)
303 struct amdgpu_ring
*ring
= from_timer(ring
, t
,
304 fence_drv
.fallback_timer
);
306 if (amdgpu_fence_process(ring
))
307 DRM_WARN("Fence fallback timer expired on ring %s\n", ring
->name
);
311 * amdgpu_fence_wait_empty - wait for all fences to signal
313 * @adev: amdgpu device pointer
314 * @ring: ring index the fence is associated with
316 * Wait for all fences on the requested ring to signal (all asics).
317 * Returns 0 if the fences have passed, error for all other cases.
319 int amdgpu_fence_wait_empty(struct amdgpu_ring
*ring
)
321 uint64_t seq
= READ_ONCE(ring
->fence_drv
.sync_seq
);
322 struct dma_fence
*fence
, **ptr
;
328 ptr
= &ring
->fence_drv
.fences
[seq
& ring
->fence_drv
.num_fences_mask
];
330 fence
= rcu_dereference(*ptr
);
331 if (!fence
|| !dma_fence_get_rcu(fence
)) {
337 r
= dma_fence_wait(fence
, false);
338 dma_fence_put(fence
);
343 * amdgpu_fence_wait_polling - busy wait for givn sequence number
345 * @ring: ring index the fence is associated with
346 * @wait_seq: sequence number to wait
347 * @timeout: the timeout for waiting in usecs
349 * Wait for all fences on the requested ring to signal (all asics).
350 * Returns left time if no timeout, 0 or minus if timeout.
352 signed long amdgpu_fence_wait_polling(struct amdgpu_ring
*ring
,
359 seq
= amdgpu_fence_read(ring
);
362 } while ((int32_t)(wait_seq
- seq
) > 0 && timeout
> 0);
364 return timeout
> 0 ? timeout
: 0;
367 * amdgpu_fence_count_emitted - get the count of emitted fences
369 * @ring: ring the fence is associated with
371 * Get the number of fences emitted on the requested ring (all asics).
372 * Returns the number of emitted fences on the ring. Used by the
373 * dynpm code to ring track activity.
375 unsigned amdgpu_fence_count_emitted(struct amdgpu_ring
*ring
)
379 /* We are not protected by ring lock when reading the last sequence
380 * but it's ok to report slightly wrong fence count here.
382 amdgpu_fence_process(ring
);
383 emitted
= 0x100000000ull
;
384 emitted
-= atomic_read(&ring
->fence_drv
.last_seq
);
385 emitted
+= READ_ONCE(ring
->fence_drv
.sync_seq
);
386 return lower_32_bits(emitted
);
390 * amdgpu_fence_driver_start_ring - make the fence driver
391 * ready for use on the requested ring.
393 * @ring: ring to start the fence driver on
394 * @irq_src: interrupt source to use for this ring
395 * @irq_type: interrupt type to use for this ring
397 * Make the fence driver ready for processing (all asics).
398 * Not all asics have all rings, so each asic will only
399 * start the fence driver on the rings it has.
400 * Returns 0 for success, errors for failure.
402 int amdgpu_fence_driver_start_ring(struct amdgpu_ring
*ring
,
403 struct amdgpu_irq_src
*irq_src
,
406 struct amdgpu_device
*adev
= ring
->adev
;
409 if (ring
->funcs
->type
!= AMDGPU_RING_TYPE_UVD
) {
410 ring
->fence_drv
.cpu_addr
= &adev
->wb
.wb
[ring
->fence_offs
];
411 ring
->fence_drv
.gpu_addr
= adev
->wb
.gpu_addr
+ (ring
->fence_offs
* 4);
413 /* put fence directly behind firmware */
414 index
= ALIGN(adev
->uvd
.fw
->size
, 8);
415 ring
->fence_drv
.cpu_addr
= adev
->uvd
.inst
[ring
->me
].cpu_addr
+ index
;
416 ring
->fence_drv
.gpu_addr
= adev
->uvd
.inst
[ring
->me
].gpu_addr
+ index
;
418 amdgpu_fence_write(ring
, atomic_read(&ring
->fence_drv
.last_seq
));
421 amdgpu_irq_get(adev
, irq_src
, irq_type
);
423 ring
->fence_drv
.irq_src
= irq_src
;
424 ring
->fence_drv
.irq_type
= irq_type
;
425 ring
->fence_drv
.initialized
= true;
427 DRM_DEV_DEBUG(adev
->dev
, "fence driver on ring %s use gpu addr 0x%016llx\n",
428 ring
->name
, ring
->fence_drv
.gpu_addr
);
433 * amdgpu_fence_driver_init_ring - init the fence driver
434 * for the requested ring.
436 * @ring: ring to init the fence driver on
437 * @num_hw_submission: number of entries on the hardware queue
439 * Init the fence driver for the requested ring (all asics).
440 * Helper function for amdgpu_fence_driver_init().
442 int amdgpu_fence_driver_init_ring(struct amdgpu_ring
*ring
,
443 unsigned num_hw_submission
)
445 struct amdgpu_device
*adev
= ring
->adev
;
452 if (!is_power_of_2(num_hw_submission
))
455 ring
->fence_drv
.cpu_addr
= NULL
;
456 ring
->fence_drv
.gpu_addr
= 0;
457 ring
->fence_drv
.sync_seq
= 0;
458 atomic_set(&ring
->fence_drv
.last_seq
, 0);
459 ring
->fence_drv
.initialized
= false;
461 timer_setup(&ring
->fence_drv
.fallback_timer
, amdgpu_fence_fallback
, 0);
463 ring
->fence_drv
.num_fences_mask
= num_hw_submission
* 2 - 1;
464 spin_lock_init(&ring
->fence_drv
.lock
);
465 ring
->fence_drv
.fences
= kcalloc(num_hw_submission
* 2, sizeof(void *),
467 if (!ring
->fence_drv
.fences
)
470 /* No need to setup the GPU scheduler for rings that don't need it */
471 if (!ring
->no_scheduler
) {
472 switch (ring
->funcs
->type
) {
473 case AMDGPU_RING_TYPE_GFX
:
474 timeout
= adev
->gfx_timeout
;
476 case AMDGPU_RING_TYPE_COMPUTE
:
477 timeout
= adev
->compute_timeout
;
479 case AMDGPU_RING_TYPE_SDMA
:
480 timeout
= adev
->sdma_timeout
;
483 timeout
= adev
->video_timeout
;
487 r
= drm_sched_init(&ring
->sched
, &amdgpu_sched_ops
,
488 num_hw_submission
, amdgpu_job_hang_limit
,
489 timeout
, ring
->name
);
491 DRM_ERROR("Failed to create scheduler on ring %s.\n",
501 * amdgpu_fence_driver_init - init the fence driver
502 * for all possible rings.
504 * @adev: amdgpu device pointer
506 * Init the fence driver for all possible rings (all asics).
507 * Not all asics have all rings, so each asic will only
508 * start the fence driver on the rings it has using
509 * amdgpu_fence_driver_start_ring().
510 * Returns 0 for success.
512 int amdgpu_fence_driver_init(struct amdgpu_device
*adev
)
518 * amdgpu_fence_driver_fini - tear down the fence driver
519 * for all possible rings.
521 * @adev: amdgpu device pointer
523 * Tear down the fence driver for all possible rings (all asics).
525 void amdgpu_fence_driver_fini(struct amdgpu_device
*adev
)
530 for (i
= 0; i
< AMDGPU_MAX_RINGS
; i
++) {
531 struct amdgpu_ring
*ring
= adev
->rings
[i
];
533 if (!ring
|| !ring
->fence_drv
.initialized
)
535 r
= amdgpu_fence_wait_empty(ring
);
537 /* no need to trigger GPU reset as we are unloading */
538 amdgpu_fence_driver_force_completion(ring
);
540 if (ring
->fence_drv
.irq_src
)
541 amdgpu_irq_put(adev
, ring
->fence_drv
.irq_src
,
542 ring
->fence_drv
.irq_type
);
543 if (!ring
->no_scheduler
)
544 drm_sched_fini(&ring
->sched
);
545 del_timer_sync(&ring
->fence_drv
.fallback_timer
);
546 for (j
= 0; j
<= ring
->fence_drv
.num_fences_mask
; ++j
)
547 dma_fence_put(ring
->fence_drv
.fences
[j
]);
548 kfree(ring
->fence_drv
.fences
);
549 ring
->fence_drv
.fences
= NULL
;
550 ring
->fence_drv
.initialized
= false;
555 * amdgpu_fence_driver_suspend - suspend the fence driver
556 * for all possible rings.
558 * @adev: amdgpu device pointer
560 * Suspend the fence driver for all possible rings (all asics).
562 void amdgpu_fence_driver_suspend(struct amdgpu_device
*adev
)
566 for (i
= 0; i
< AMDGPU_MAX_RINGS
; i
++) {
567 struct amdgpu_ring
*ring
= adev
->rings
[i
];
568 if (!ring
|| !ring
->fence_drv
.initialized
)
571 /* wait for gpu to finish processing current batch */
572 r
= amdgpu_fence_wait_empty(ring
);
574 /* delay GPU reset to resume */
575 amdgpu_fence_driver_force_completion(ring
);
578 /* disable the interrupt */
579 if (ring
->fence_drv
.irq_src
)
580 amdgpu_irq_put(adev
, ring
->fence_drv
.irq_src
,
581 ring
->fence_drv
.irq_type
);
586 * amdgpu_fence_driver_resume - resume the fence driver
587 * for all possible rings.
589 * @adev: amdgpu device pointer
591 * Resume the fence driver for all possible rings (all asics).
592 * Not all asics have all rings, so each asic will only
593 * start the fence driver on the rings it has using
594 * amdgpu_fence_driver_start_ring().
595 * Returns 0 for success.
597 void amdgpu_fence_driver_resume(struct amdgpu_device
*adev
)
601 for (i
= 0; i
< AMDGPU_MAX_RINGS
; i
++) {
602 struct amdgpu_ring
*ring
= adev
->rings
[i
];
603 if (!ring
|| !ring
->fence_drv
.initialized
)
606 /* enable the interrupt */
607 if (ring
->fence_drv
.irq_src
)
608 amdgpu_irq_get(adev
, ring
->fence_drv
.irq_src
,
609 ring
->fence_drv
.irq_type
);
614 * amdgpu_fence_driver_force_completion - force signal latest fence of ring
616 * @ring: fence of the ring to signal
619 void amdgpu_fence_driver_force_completion(struct amdgpu_ring
*ring
)
621 amdgpu_fence_write(ring
, ring
->fence_drv
.sync_seq
);
622 amdgpu_fence_process(ring
);
626 * Common fence implementation
629 static const char *amdgpu_fence_get_driver_name(struct dma_fence
*fence
)
634 static const char *amdgpu_fence_get_timeline_name(struct dma_fence
*f
)
636 struct amdgpu_fence
*fence
= to_amdgpu_fence(f
);
637 return (const char *)fence
->ring
->name
;
641 * amdgpu_fence_enable_signaling - enable signalling on fence
644 * This function is called with fence_queue lock held, and adds a callback
645 * to fence_queue that checks if this fence is signaled, and if so it
646 * signals the fence and removes itself.
648 static bool amdgpu_fence_enable_signaling(struct dma_fence
*f
)
650 struct amdgpu_fence
*fence
= to_amdgpu_fence(f
);
651 struct amdgpu_ring
*ring
= fence
->ring
;
653 if (!timer_pending(&ring
->fence_drv
.fallback_timer
))
654 amdgpu_fence_schedule_fallback(ring
);
656 DMA_FENCE_TRACE(&fence
->base
, "armed on ring %i!\n", ring
->idx
);
662 * amdgpu_fence_free - free up the fence memory
664 * @rcu: RCU callback head
666 * Free up the fence memory after the RCU grace period.
668 static void amdgpu_fence_free(struct rcu_head
*rcu
)
670 struct dma_fence
*f
= container_of(rcu
, struct dma_fence
, rcu
);
671 struct amdgpu_fence
*fence
= to_amdgpu_fence(f
);
672 kmem_cache_free(amdgpu_fence_slab
, fence
);
676 * amdgpu_fence_release - callback that fence can be freed
680 * This function is called when the reference count becomes zero.
681 * It just RCU schedules freeing up the fence.
683 static void amdgpu_fence_release(struct dma_fence
*f
)
685 call_rcu(&f
->rcu
, amdgpu_fence_free
);
688 static const struct dma_fence_ops amdgpu_fence_ops
= {
689 .get_driver_name
= amdgpu_fence_get_driver_name
,
690 .get_timeline_name
= amdgpu_fence_get_timeline_name
,
691 .enable_signaling
= amdgpu_fence_enable_signaling
,
692 .release
= amdgpu_fence_release
,
698 #if defined(CONFIG_DEBUG_FS)
699 static int amdgpu_debugfs_fence_info(struct seq_file
*m
, void *data
)
701 struct drm_info_node
*node
= (struct drm_info_node
*)m
->private;
702 struct drm_device
*dev
= node
->minor
->dev
;
703 struct amdgpu_device
*adev
= dev
->dev_private
;
706 for (i
= 0; i
< AMDGPU_MAX_RINGS
; ++i
) {
707 struct amdgpu_ring
*ring
= adev
->rings
[i
];
708 if (!ring
|| !ring
->fence_drv
.initialized
)
711 amdgpu_fence_process(ring
);
713 seq_printf(m
, "--- ring %d (%s) ---\n", i
, ring
->name
);
714 seq_printf(m
, "Last signaled fence 0x%08x\n",
715 atomic_read(&ring
->fence_drv
.last_seq
));
716 seq_printf(m
, "Last emitted 0x%08x\n",
717 ring
->fence_drv
.sync_seq
);
719 if (ring
->funcs
->type
== AMDGPU_RING_TYPE_GFX
||
720 ring
->funcs
->type
== AMDGPU_RING_TYPE_SDMA
) {
721 seq_printf(m
, "Last signaled trailing fence 0x%08x\n",
722 le32_to_cpu(*ring
->trail_fence_cpu_addr
));
723 seq_printf(m
, "Last emitted 0x%08x\n",
727 if (ring
->funcs
->type
!= AMDGPU_RING_TYPE_GFX
)
730 /* set in CP_VMID_PREEMPT and preemption occurred */
731 seq_printf(m
, "Last preempted 0x%08x\n",
732 le32_to_cpu(*(ring
->fence_drv
.cpu_addr
+ 2)));
733 /* set in CP_VMID_RESET and reset occurred */
734 seq_printf(m
, "Last reset 0x%08x\n",
735 le32_to_cpu(*(ring
->fence_drv
.cpu_addr
+ 4)));
736 /* Both preemption and reset occurred */
737 seq_printf(m
, "Last both 0x%08x\n",
738 le32_to_cpu(*(ring
->fence_drv
.cpu_addr
+ 6)));
744 * amdgpu_debugfs_gpu_recover - manually trigger a gpu reset & recover
746 * Manually trigger a gpu reset at the next fence wait.
748 static int amdgpu_debugfs_gpu_recover(struct seq_file
*m
, void *data
)
750 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
751 struct drm_device
*dev
= node
->minor
->dev
;
752 struct amdgpu_device
*adev
= dev
->dev_private
;
755 r
= pm_runtime_get_sync(dev
->dev
);
757 pm_runtime_put_autosuspend(dev
->dev
);
761 seq_printf(m
, "gpu recover\n");
762 amdgpu_device_gpu_recover(adev
, NULL
);
764 pm_runtime_mark_last_busy(dev
->dev
);
765 pm_runtime_put_autosuspend(dev
->dev
);
770 static const struct drm_info_list amdgpu_debugfs_fence_list
[] = {
771 {"amdgpu_fence_info", &amdgpu_debugfs_fence_info
, 0, NULL
},
772 {"amdgpu_gpu_recover", &amdgpu_debugfs_gpu_recover
, 0, NULL
}
775 static const struct drm_info_list amdgpu_debugfs_fence_list_sriov
[] = {
776 {"amdgpu_fence_info", &amdgpu_debugfs_fence_info
, 0, NULL
},
780 int amdgpu_debugfs_fence_init(struct amdgpu_device
*adev
)
782 #if defined(CONFIG_DEBUG_FS)
783 if (amdgpu_sriov_vf(adev
))
784 return amdgpu_debugfs_add_files(adev
, amdgpu_debugfs_fence_list_sriov
,
785 ARRAY_SIZE(amdgpu_debugfs_fence_list_sriov
));
786 return amdgpu_debugfs_add_files(adev
, amdgpu_debugfs_fence_list
,
787 ARRAY_SIZE(amdgpu_debugfs_fence_list
));