2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
29 #include <linux/seq_file.h>
30 #include <linux/slab.h>
32 #include <drm/amdgpu_drm.h>
33 #include <drm/drm_debugfs.h>
37 #include "amdgpu_trace.h"
39 #define AMDGPU_IB_TEST_TIMEOUT msecs_to_jiffies(1000)
40 #define AMDGPU_IB_TEST_GFX_XGMI_TIMEOUT msecs_to_jiffies(2000)
44 * IBs (Indirect Buffers) and areas of GPU accessible memory where
45 * commands are stored. You can put a pointer to the IB in the
46 * command ring and the hw will fetch the commands from the IB
47 * and execute them. Generally userspace acceleration drivers
48 * produce command buffers which are send to the kernel and
49 * put in IBs for execution by the requested ring.
53 * amdgpu_ib_get - request an IB (Indirect Buffer)
55 * @ring: ring index the IB is associated with
56 * @size: requested IB size
57 * @ib: IB object returned
59 * Request an IB (all asics). IBs are allocated using the
61 * Returns 0 on success, error on failure.
63 int amdgpu_ib_get(struct amdgpu_device
*adev
, struct amdgpu_vm
*vm
,
64 unsigned size
, enum amdgpu_ib_pool_type pool_type
,
70 r
= amdgpu_sa_bo_new(&adev
->ib_pools
[pool_type
],
71 &ib
->sa_bo
, size
, 256);
73 dev_err(adev
->dev
, "failed to get a new IB (%d)\n", r
);
77 ib
->ptr
= amdgpu_sa_bo_cpu_addr(ib
->sa_bo
);
80 ib
->gpu_addr
= amdgpu_sa_bo_gpu_addr(ib
->sa_bo
);
87 * amdgpu_ib_free - free an IB (Indirect Buffer)
89 * @adev: amdgpu_device pointer
90 * @ib: IB object to free
91 * @f: the fence SA bo need wait on for the ib alloation
93 * Free an IB (all asics).
95 void amdgpu_ib_free(struct amdgpu_device
*adev
, struct amdgpu_ib
*ib
,
98 amdgpu_sa_bo_free(adev
, &ib
->sa_bo
, f
);
102 * amdgpu_ib_schedule - schedule an IB (Indirect Buffer) on the ring
104 * @adev: amdgpu_device pointer
105 * @num_ibs: number of IBs to schedule
106 * @ibs: IB objects to schedule
107 * @f: fence created during this submission
109 * Schedule an IB on the associated ring (all asics).
110 * Returns 0 on success, error on failure.
112 * On SI, there are two parallel engines fed from the primary ring,
113 * the CE (Constant Engine) and the DE (Drawing Engine). Since
114 * resource descriptors have moved to memory, the CE allows you to
115 * prime the caches while the DE is updating register state so that
116 * the resource descriptors will be already in cache when the draw is
117 * processed. To accomplish this, the userspace driver submits two
118 * IBs, one for the CE and one for the DE. If there is a CE IB (called
119 * a CONST_IB), it will be put on the ring prior to the DE IB. Prior
120 * to SI there was just a DE IB.
122 int amdgpu_ib_schedule(struct amdgpu_ring
*ring
, unsigned num_ibs
,
123 struct amdgpu_ib
*ibs
, struct amdgpu_job
*job
,
124 struct dma_fence
**f
)
126 struct amdgpu_device
*adev
= ring
->adev
;
127 struct amdgpu_ib
*ib
= &ibs
[0];
128 struct dma_fence
*tmp
= NULL
;
129 bool skip_preamble
, need_ctx_switch
;
130 unsigned patch_offset
= ~0;
131 struct amdgpu_vm
*vm
;
133 uint32_t status
= 0, alloc_size
;
134 unsigned fence_flags
= 0;
139 bool need_pipe_sync
= false;
144 /* ring tests don't use a job */
147 fence_ctx
= job
->base
.s_fence
?
148 job
->base
.s_fence
->scheduled
.context
: 0;
154 if (!ring
->sched
.ready
) {
155 dev_err(adev
->dev
, "couldn't schedule ib on ring <%s>\n", ring
->name
);
159 if (vm
&& !job
->vmid
) {
160 dev_err(adev
->dev
, "VM IB without ID\n");
164 if ((ib
->flags
& AMDGPU_IB_FLAGS_SECURE
) &&
165 (ring
->funcs
->type
== AMDGPU_RING_TYPE_COMPUTE
)) {
166 dev_err(adev
->dev
, "secure submissions not supported on compute rings\n");
170 alloc_size
= ring
->funcs
->emit_frame_size
+ num_ibs
*
171 ring
->funcs
->emit_ib_size
;
173 r
= amdgpu_ring_alloc(ring
, alloc_size
);
175 dev_err(adev
->dev
, "scheduling IB failed (%d).\n", r
);
179 need_ctx_switch
= ring
->current_ctx
!= fence_ctx
;
180 if (ring
->funcs
->emit_pipeline_sync
&& job
&&
181 ((tmp
= amdgpu_sync_get_fence(&job
->sched_sync
)) ||
182 (amdgpu_sriov_vf(adev
) && need_ctx_switch
) ||
183 amdgpu_vm_need_pipeline_sync(ring
, job
))) {
184 need_pipe_sync
= true;
187 trace_amdgpu_ib_pipe_sync(job
, tmp
);
192 if ((ib
->flags
& AMDGPU_IB_FLAG_EMIT_MEM_SYNC
) && ring
->funcs
->emit_mem_sync
)
193 ring
->funcs
->emit_mem_sync(ring
);
195 if (ring
->funcs
->insert_start
)
196 ring
->funcs
->insert_start(ring
);
199 r
= amdgpu_vm_flush(ring
, job
, need_pipe_sync
);
201 amdgpu_ring_undo(ring
);
206 if (job
&& ring
->funcs
->init_cond_exec
)
207 patch_offset
= amdgpu_ring_init_cond_exec(ring
);
210 if (!(adev
->flags
& AMD_IS_APU
))
213 if (ring
->funcs
->emit_hdp_flush
)
214 amdgpu_ring_emit_hdp_flush(ring
);
216 amdgpu_asic_flush_hdp(adev
, ring
);
220 status
|= AMDGPU_HAVE_CTX_SWITCH
;
222 skip_preamble
= ring
->current_ctx
== fence_ctx
;
223 if (job
&& ring
->funcs
->emit_cntxcntl
) {
224 status
|= job
->preamble_status
;
225 status
|= job
->preemption_status
;
226 amdgpu_ring_emit_cntxcntl(ring
, status
);
229 /* Setup initial TMZiness and send it off.
232 if (job
&& ring
->funcs
->emit_frame_cntl
) {
233 secure
= ib
->flags
& AMDGPU_IB_FLAGS_SECURE
;
234 amdgpu_ring_emit_frame_cntl(ring
, true, secure
);
237 for (i
= 0; i
< num_ibs
; ++i
) {
240 /* drop preamble IBs if we don't have a context switch */
241 if ((ib
->flags
& AMDGPU_IB_FLAG_PREAMBLE
) &&
243 !(status
& AMDGPU_PREAMBLE_IB_PRESENT_FIRST
) &&
245 !amdgpu_sriov_vf(adev
)) /* for SRIOV preemption, Preamble CE ib must be inserted anyway */
248 if (job
&& ring
->funcs
->emit_frame_cntl
) {
249 if (secure
!= !!(ib
->flags
& AMDGPU_IB_FLAGS_SECURE
)) {
250 amdgpu_ring_emit_frame_cntl(ring
, false, secure
);
252 amdgpu_ring_emit_frame_cntl(ring
, true, secure
);
256 amdgpu_ring_emit_ib(ring
, job
, ib
, status
);
257 status
&= ~AMDGPU_HAVE_CTX_SWITCH
;
260 if (job
&& ring
->funcs
->emit_frame_cntl
)
261 amdgpu_ring_emit_frame_cntl(ring
, false, secure
);
264 if (!(adev
->flags
& AMD_IS_APU
))
266 amdgpu_asic_invalidate_hdp(adev
, ring
);
268 if (ib
->flags
& AMDGPU_IB_FLAG_TC_WB_NOT_INVALIDATE
)
269 fence_flags
|= AMDGPU_FENCE_FLAG_TC_WB_ONLY
;
271 /* wrap the last IB with fence */
272 if (job
&& job
->uf_addr
) {
273 amdgpu_ring_emit_fence(ring
, job
->uf_addr
, job
->uf_sequence
,
274 fence_flags
| AMDGPU_FENCE_FLAG_64BIT
);
277 r
= amdgpu_fence_emit(ring
, f
, fence_flags
);
279 dev_err(adev
->dev
, "failed to emit fence (%d)\n", r
);
280 if (job
&& job
->vmid
)
281 amdgpu_vmid_reset(adev
, ring
->funcs
->vmhub
, job
->vmid
);
282 amdgpu_ring_undo(ring
);
286 if (ring
->funcs
->insert_end
)
287 ring
->funcs
->insert_end(ring
);
289 if (patch_offset
!= ~0 && ring
->funcs
->patch_cond_exec
)
290 amdgpu_ring_patch_cond_exec(ring
, patch_offset
);
292 ring
->current_ctx
= fence_ctx
;
293 if (vm
&& ring
->funcs
->emit_switch_buffer
)
294 amdgpu_ring_emit_switch_buffer(ring
);
295 amdgpu_ring_commit(ring
);
300 * amdgpu_ib_pool_init - Init the IB (Indirect Buffer) pool
302 * @adev: amdgpu_device pointer
304 * Initialize the suballocator to manage a pool of memory
305 * for use as IBs (all asics).
306 * Returns 0 on success, error on failure.
308 int amdgpu_ib_pool_init(struct amdgpu_device
*adev
)
313 if (adev
->ib_pool_ready
)
316 for (i
= 0; i
< AMDGPU_IB_POOL_MAX
; i
++) {
317 if (i
== AMDGPU_IB_POOL_DIRECT
)
318 size
= PAGE_SIZE
* 2;
320 size
= AMDGPU_IB_POOL_SIZE
;
322 r
= amdgpu_sa_bo_manager_init(adev
, &adev
->ib_pools
[i
],
323 size
, AMDGPU_GPU_PAGE_SIZE
,
324 AMDGPU_GEM_DOMAIN_GTT
);
328 adev
->ib_pool_ready
= true;
334 amdgpu_sa_bo_manager_fini(adev
, &adev
->ib_pools
[i
]);
339 * amdgpu_ib_pool_fini - Free the IB (Indirect Buffer) pool
341 * @adev: amdgpu_device pointer
343 * Tear down the suballocator managing the pool of memory
344 * for use as IBs (all asics).
346 void amdgpu_ib_pool_fini(struct amdgpu_device
*adev
)
350 if (!adev
->ib_pool_ready
)
353 for (i
= 0; i
< AMDGPU_IB_POOL_MAX
; i
++)
354 amdgpu_sa_bo_manager_fini(adev
, &adev
->ib_pools
[i
]);
355 adev
->ib_pool_ready
= false;
359 * amdgpu_ib_ring_tests - test IBs on the rings
361 * @adev: amdgpu_device pointer
363 * Test an IB (Indirect Buffer) on each ring.
364 * If the test fails, disable the ring.
365 * Returns 0 on success, error if the primary GFX ring
368 int amdgpu_ib_ring_tests(struct amdgpu_device
*adev
)
370 long tmo_gfx
, tmo_mm
;
374 tmo_mm
= tmo_gfx
= AMDGPU_IB_TEST_TIMEOUT
;
375 if (amdgpu_sriov_vf(adev
)) {
376 /* for MM engines in hypervisor side they are not scheduled together
377 * with CP and SDMA engines, so even in exclusive mode MM engine could
378 * still running on other VF thus the IB TEST TIMEOUT for MM engines
379 * under SR-IOV should be set to a long time. 8 sec should be enough
380 * for the MM comes back to this VF.
382 tmo_mm
= 8 * AMDGPU_IB_TEST_TIMEOUT
;
385 if (amdgpu_sriov_runtime(adev
)) {
386 /* for CP & SDMA engines since they are scheduled together so
387 * need to make the timeout width enough to cover the time
388 * cost waiting for it coming back under RUNTIME only
390 tmo_gfx
= 8 * AMDGPU_IB_TEST_TIMEOUT
;
391 } else if (adev
->gmc
.xgmi
.hive_id
) {
392 tmo_gfx
= AMDGPU_IB_TEST_GFX_XGMI_TIMEOUT
;
395 for (i
= 0; i
< adev
->num_rings
; ++i
) {
396 struct amdgpu_ring
*ring
= adev
->rings
[i
];
399 /* KIQ rings don't have an IB test because we never submit IBs
400 * to them and they have no interrupt support.
402 if (!ring
->sched
.ready
|| !ring
->funcs
->test_ib
)
405 /* MM engine need more time */
406 if (ring
->funcs
->type
== AMDGPU_RING_TYPE_UVD
||
407 ring
->funcs
->type
== AMDGPU_RING_TYPE_VCE
||
408 ring
->funcs
->type
== AMDGPU_RING_TYPE_UVD_ENC
||
409 ring
->funcs
->type
== AMDGPU_RING_TYPE_VCN_DEC
||
410 ring
->funcs
->type
== AMDGPU_RING_TYPE_VCN_ENC
||
411 ring
->funcs
->type
== AMDGPU_RING_TYPE_VCN_JPEG
)
416 r
= amdgpu_ring_test_ib(ring
, tmo
);
418 DRM_DEV_DEBUG(adev
->dev
, "ib test on %s succeeded\n",
423 ring
->sched
.ready
= false;
424 DRM_DEV_ERROR(adev
->dev
, "IB test failed on %s (%d).\n",
427 if (ring
== &adev
->gfx
.gfx_ring
[0]) {
428 /* oh, oh, that's really bad */
429 adev
->accel_working
= false;
442 #if defined(CONFIG_DEBUG_FS)
444 static int amdgpu_debugfs_sa_info(struct seq_file
*m
, void *data
)
446 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
447 struct drm_device
*dev
= node
->minor
->dev
;
448 struct amdgpu_device
*adev
= dev
->dev_private
;
450 seq_printf(m
, "--------------------- DELAYED --------------------- \n");
451 amdgpu_sa_bo_dump_debug_info(&adev
->ib_pools
[AMDGPU_IB_POOL_DELAYED
],
453 seq_printf(m
, "-------------------- IMMEDIATE -------------------- \n");
454 amdgpu_sa_bo_dump_debug_info(&adev
->ib_pools
[AMDGPU_IB_POOL_IMMEDIATE
],
456 seq_printf(m
, "--------------------- DIRECT ---------------------- \n");
457 amdgpu_sa_bo_dump_debug_info(&adev
->ib_pools
[AMDGPU_IB_POOL_DIRECT
], m
);
462 static const struct drm_info_list amdgpu_debugfs_sa_list
[] = {
463 {"amdgpu_sa_info", &amdgpu_debugfs_sa_info
, 0, NULL
},
468 int amdgpu_debugfs_sa_init(struct amdgpu_device
*adev
)
470 #if defined(CONFIG_DEBUG_FS)
471 return amdgpu_debugfs_add_files(adev
, amdgpu_debugfs_sa_list
, 1);