2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 * Authors: Christian König <christian.koenig@amd.com>
25 #include <linux/firmware.h>
28 #include "amdgpu_uvd.h"
30 #include "uvd/uvd_6_0_d.h"
31 #include "uvd/uvd_6_0_sh_mask.h"
32 #include "oss/oss_2_0_d.h"
33 #include "oss/oss_2_0_sh_mask.h"
34 #include "smu/smu_7_1_3_d.h"
35 #include "smu/smu_7_1_3_sh_mask.h"
36 #include "bif/bif_5_1_d.h"
37 #include "gmc/gmc_8_1_d.h"
40 static void uvd_v6_0_set_ring_funcs(struct amdgpu_device
*adev
);
41 static void uvd_v6_0_set_irq_funcs(struct amdgpu_device
*adev
);
42 static int uvd_v6_0_start(struct amdgpu_device
*adev
);
43 static void uvd_v6_0_stop(struct amdgpu_device
*adev
);
44 static void uvd_v6_0_set_sw_clock_gating(struct amdgpu_device
*adev
);
45 static int uvd_v6_0_set_clockgating_state(void *handle
,
46 enum amd_clockgating_state state
);
47 static void uvd_v6_0_enable_mgcg(struct amdgpu_device
*adev
,
51 * uvd_v6_0_ring_get_rptr - get read pointer
53 * @ring: amdgpu_ring pointer
55 * Returns the current hardware read pointer
57 static uint64_t uvd_v6_0_ring_get_rptr(struct amdgpu_ring
*ring
)
59 struct amdgpu_device
*adev
= ring
->adev
;
61 return RREG32(mmUVD_RBC_RB_RPTR
);
65 * uvd_v6_0_ring_get_wptr - get write pointer
67 * @ring: amdgpu_ring pointer
69 * Returns the current hardware write pointer
71 static uint64_t uvd_v6_0_ring_get_wptr(struct amdgpu_ring
*ring
)
73 struct amdgpu_device
*adev
= ring
->adev
;
75 return RREG32(mmUVD_RBC_RB_WPTR
);
79 * uvd_v6_0_ring_set_wptr - set write pointer
81 * @ring: amdgpu_ring pointer
83 * Commits the write pointer to the hardware
85 static void uvd_v6_0_ring_set_wptr(struct amdgpu_ring
*ring
)
87 struct amdgpu_device
*adev
= ring
->adev
;
89 WREG32(mmUVD_RBC_RB_WPTR
, lower_32_bits(ring
->wptr
));
92 static int uvd_v6_0_early_init(void *handle
)
94 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
96 uvd_v6_0_set_ring_funcs(adev
);
97 uvd_v6_0_set_irq_funcs(adev
);
102 static int uvd_v6_0_sw_init(void *handle
)
104 struct amdgpu_ring
*ring
;
106 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
109 r
= amdgpu_irq_add_id(adev
, AMDGPU_IH_CLIENTID_LEGACY
, 124, &adev
->uvd
.irq
);
113 r
= amdgpu_uvd_sw_init(adev
);
117 r
= amdgpu_uvd_resume(adev
);
121 ring
= &adev
->uvd
.ring
;
122 sprintf(ring
->name
, "uvd");
123 r
= amdgpu_ring_init(adev
, ring
, 512, &adev
->uvd
.irq
, 0);
128 static int uvd_v6_0_sw_fini(void *handle
)
131 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
133 r
= amdgpu_uvd_suspend(adev
);
137 return amdgpu_uvd_sw_fini(adev
);
141 * uvd_v6_0_hw_init - start and test UVD block
143 * @adev: amdgpu_device pointer
145 * Initialize the hardware, boot up the VCPU and do some testing
147 static int uvd_v6_0_hw_init(void *handle
)
149 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
150 struct amdgpu_ring
*ring
= &adev
->uvd
.ring
;
154 amdgpu_asic_set_uvd_clocks(adev
, 10000, 10000);
155 uvd_v6_0_set_clockgating_state(adev
, AMD_CG_STATE_UNGATE
);
156 uvd_v6_0_enable_mgcg(adev
, true);
159 r
= amdgpu_ring_test_ring(ring
);
165 r
= amdgpu_ring_alloc(ring
, 10);
167 DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r
);
171 tmp
= PACKET0(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL
, 0);
172 amdgpu_ring_write(ring
, tmp
);
173 amdgpu_ring_write(ring
, 0xFFFFF);
175 tmp
= PACKET0(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL
, 0);
176 amdgpu_ring_write(ring
, tmp
);
177 amdgpu_ring_write(ring
, 0xFFFFF);
179 tmp
= PACKET0(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL
, 0);
180 amdgpu_ring_write(ring
, tmp
);
181 amdgpu_ring_write(ring
, 0xFFFFF);
183 /* Clear timeout status bits */
184 amdgpu_ring_write(ring
, PACKET0(mmUVD_SEMA_TIMEOUT_STATUS
, 0));
185 amdgpu_ring_write(ring
, 0x8);
187 amdgpu_ring_write(ring
, PACKET0(mmUVD_SEMA_CNTL
, 0));
188 amdgpu_ring_write(ring
, 3);
190 amdgpu_ring_commit(ring
);
194 DRM_INFO("UVD initialized successfully.\n");
200 * uvd_v6_0_hw_fini - stop the hardware block
202 * @adev: amdgpu_device pointer
204 * Stop the UVD block, mark ring as not ready any more
206 static int uvd_v6_0_hw_fini(void *handle
)
208 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
209 struct amdgpu_ring
*ring
= &adev
->uvd
.ring
;
211 if (RREG32(mmUVD_STATUS
) != 0)
219 static int uvd_v6_0_suspend(void *handle
)
222 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
224 r
= uvd_v6_0_hw_fini(adev
);
228 /* Skip this for APU for now */
229 if (!(adev
->flags
& AMD_IS_APU
))
230 r
= amdgpu_uvd_suspend(adev
);
235 static int uvd_v6_0_resume(void *handle
)
238 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
240 /* Skip this for APU for now */
241 if (!(adev
->flags
& AMD_IS_APU
)) {
242 r
= amdgpu_uvd_resume(adev
);
246 return uvd_v6_0_hw_init(adev
);
250 * uvd_v6_0_mc_resume - memory controller programming
252 * @adev: amdgpu_device pointer
254 * Let the UVD memory controller know it's offsets
256 static void uvd_v6_0_mc_resume(struct amdgpu_device
*adev
)
261 /* programm memory controller bits 0-27 */
262 WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW
,
263 lower_32_bits(adev
->uvd
.gpu_addr
));
264 WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH
,
265 upper_32_bits(adev
->uvd
.gpu_addr
));
267 offset
= AMDGPU_UVD_FIRMWARE_OFFSET
;
268 size
= AMDGPU_GPU_PAGE_ALIGN(adev
->uvd
.fw
->size
+ 4);
269 WREG32(mmUVD_VCPU_CACHE_OFFSET0
, offset
>> 3);
270 WREG32(mmUVD_VCPU_CACHE_SIZE0
, size
);
273 size
= AMDGPU_UVD_HEAP_SIZE
;
274 WREG32(mmUVD_VCPU_CACHE_OFFSET1
, offset
>> 3);
275 WREG32(mmUVD_VCPU_CACHE_SIZE1
, size
);
278 size
= AMDGPU_UVD_STACK_SIZE
+
279 (AMDGPU_UVD_SESSION_SIZE
* adev
->uvd
.max_handles
);
280 WREG32(mmUVD_VCPU_CACHE_OFFSET2
, offset
>> 3);
281 WREG32(mmUVD_VCPU_CACHE_SIZE2
, size
);
283 WREG32(mmUVD_UDEC_ADDR_CONFIG
, adev
->gfx
.config
.gb_addr_config
);
284 WREG32(mmUVD_UDEC_DB_ADDR_CONFIG
, adev
->gfx
.config
.gb_addr_config
);
285 WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG
, adev
->gfx
.config
.gb_addr_config
);
287 WREG32(mmUVD_GP_SCRATCH4
, adev
->uvd
.max_handles
);
291 static void cz_set_uvd_clock_gating_branches(struct amdgpu_device
*adev
,
296 data
= RREG32(mmUVD_CGC_GATE
);
297 data1
= RREG32(mmUVD_SUVD_CGC_GATE
);
299 data
|= UVD_CGC_GATE__SYS_MASK
|
300 UVD_CGC_GATE__UDEC_MASK
|
301 UVD_CGC_GATE__MPEG2_MASK
|
302 UVD_CGC_GATE__RBC_MASK
|
303 UVD_CGC_GATE__LMI_MC_MASK
|
304 UVD_CGC_GATE__IDCT_MASK
|
305 UVD_CGC_GATE__MPRD_MASK
|
306 UVD_CGC_GATE__MPC_MASK
|
307 UVD_CGC_GATE__LBSI_MASK
|
308 UVD_CGC_GATE__LRBBM_MASK
|
309 UVD_CGC_GATE__UDEC_RE_MASK
|
310 UVD_CGC_GATE__UDEC_CM_MASK
|
311 UVD_CGC_GATE__UDEC_IT_MASK
|
312 UVD_CGC_GATE__UDEC_DB_MASK
|
313 UVD_CGC_GATE__UDEC_MP_MASK
|
314 UVD_CGC_GATE__WCB_MASK
|
315 UVD_CGC_GATE__VCPU_MASK
|
316 UVD_CGC_GATE__SCPU_MASK
;
317 data1
|= UVD_SUVD_CGC_GATE__SRE_MASK
|
318 UVD_SUVD_CGC_GATE__SIT_MASK
|
319 UVD_SUVD_CGC_GATE__SMP_MASK
|
320 UVD_SUVD_CGC_GATE__SCM_MASK
|
321 UVD_SUVD_CGC_GATE__SDB_MASK
|
322 UVD_SUVD_CGC_GATE__SRE_H264_MASK
|
323 UVD_SUVD_CGC_GATE__SRE_HEVC_MASK
|
324 UVD_SUVD_CGC_GATE__SIT_H264_MASK
|
325 UVD_SUVD_CGC_GATE__SIT_HEVC_MASK
|
326 UVD_SUVD_CGC_GATE__SCM_H264_MASK
|
327 UVD_SUVD_CGC_GATE__SCM_HEVC_MASK
|
328 UVD_SUVD_CGC_GATE__SDB_H264_MASK
|
329 UVD_SUVD_CGC_GATE__SDB_HEVC_MASK
;
331 data
&= ~(UVD_CGC_GATE__SYS_MASK
|
332 UVD_CGC_GATE__UDEC_MASK
|
333 UVD_CGC_GATE__MPEG2_MASK
|
334 UVD_CGC_GATE__RBC_MASK
|
335 UVD_CGC_GATE__LMI_MC_MASK
|
336 UVD_CGC_GATE__LMI_UMC_MASK
|
337 UVD_CGC_GATE__IDCT_MASK
|
338 UVD_CGC_GATE__MPRD_MASK
|
339 UVD_CGC_GATE__MPC_MASK
|
340 UVD_CGC_GATE__LBSI_MASK
|
341 UVD_CGC_GATE__LRBBM_MASK
|
342 UVD_CGC_GATE__UDEC_RE_MASK
|
343 UVD_CGC_GATE__UDEC_CM_MASK
|
344 UVD_CGC_GATE__UDEC_IT_MASK
|
345 UVD_CGC_GATE__UDEC_DB_MASK
|
346 UVD_CGC_GATE__UDEC_MP_MASK
|
347 UVD_CGC_GATE__WCB_MASK
|
348 UVD_CGC_GATE__VCPU_MASK
|
349 UVD_CGC_GATE__SCPU_MASK
);
350 data1
&= ~(UVD_SUVD_CGC_GATE__SRE_MASK
|
351 UVD_SUVD_CGC_GATE__SIT_MASK
|
352 UVD_SUVD_CGC_GATE__SMP_MASK
|
353 UVD_SUVD_CGC_GATE__SCM_MASK
|
354 UVD_SUVD_CGC_GATE__SDB_MASK
|
355 UVD_SUVD_CGC_GATE__SRE_H264_MASK
|
356 UVD_SUVD_CGC_GATE__SRE_HEVC_MASK
|
357 UVD_SUVD_CGC_GATE__SIT_H264_MASK
|
358 UVD_SUVD_CGC_GATE__SIT_HEVC_MASK
|
359 UVD_SUVD_CGC_GATE__SCM_H264_MASK
|
360 UVD_SUVD_CGC_GATE__SCM_HEVC_MASK
|
361 UVD_SUVD_CGC_GATE__SDB_H264_MASK
|
362 UVD_SUVD_CGC_GATE__SDB_HEVC_MASK
);
364 WREG32(mmUVD_CGC_GATE
, data
);
365 WREG32(mmUVD_SUVD_CGC_GATE
, data1
);
370 * uvd_v6_0_start - start UVD block
372 * @adev: amdgpu_device pointer
374 * Setup and start the UVD block
376 static int uvd_v6_0_start(struct amdgpu_device
*adev
)
378 struct amdgpu_ring
*ring
= &adev
->uvd
.ring
;
379 uint32_t rb_bufsz
, tmp
;
380 uint32_t lmi_swap_cntl
;
381 uint32_t mp_swap_cntl
;
385 WREG32_P(mmUVD_POWER_STATUS
, 0, ~UVD_POWER_STATUS__UVD_PG_MODE_MASK
);
387 /* disable byte swapping */
391 uvd_v6_0_mc_resume(adev
);
393 /* disable interupt */
394 WREG32_FIELD(UVD_MASTINT_EN
, VCPU_EN
, 0);
396 /* stall UMC and register bus before resetting VCPU */
397 WREG32_FIELD(UVD_LMI_CTRL2
, STALL_ARB_UMC
, 1);
400 /* put LMI, VCPU, RBC etc... into reset */
401 WREG32(mmUVD_SOFT_RESET
,
402 UVD_SOFT_RESET__LMI_SOFT_RESET_MASK
|
403 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK
|
404 UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK
|
405 UVD_SOFT_RESET__RBC_SOFT_RESET_MASK
|
406 UVD_SOFT_RESET__CSM_SOFT_RESET_MASK
|
407 UVD_SOFT_RESET__CXW_SOFT_RESET_MASK
|
408 UVD_SOFT_RESET__TAP_SOFT_RESET_MASK
|
409 UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK
);
412 /* take UVD block out of reset */
413 WREG32_FIELD(SRBM_SOFT_RESET
, SOFT_RESET_UVD
, 0);
416 /* initialize UVD memory controller */
417 WREG32(mmUVD_LMI_CTRL
,
418 (0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT
) |
419 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK
|
420 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK
|
421 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK
|
422 UVD_LMI_CTRL__REQ_MODE_MASK
|
423 UVD_LMI_CTRL__DISABLE_ON_FWV_FAIL_MASK
);
426 /* swap (8 in 32) RB and IB */
430 WREG32(mmUVD_LMI_SWAP_CNTL
, lmi_swap_cntl
);
431 WREG32(mmUVD_MP_SWAP_CNTL
, mp_swap_cntl
);
433 WREG32(mmUVD_MPC_SET_MUXA0
, 0x40c2040);
434 WREG32(mmUVD_MPC_SET_MUXA1
, 0x0);
435 WREG32(mmUVD_MPC_SET_MUXB0
, 0x40c2040);
436 WREG32(mmUVD_MPC_SET_MUXB1
, 0x0);
437 WREG32(mmUVD_MPC_SET_ALU
, 0);
438 WREG32(mmUVD_MPC_SET_MUX
, 0x88);
440 /* take all subblocks out of reset, except VCPU */
441 WREG32(mmUVD_SOFT_RESET
, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK
);
444 /* enable VCPU clock */
445 WREG32(mmUVD_VCPU_CNTL
, UVD_VCPU_CNTL__CLK_EN_MASK
);
448 WREG32_FIELD(UVD_LMI_CTRL2
, STALL_ARB_UMC
, 0);
450 /* boot up the VCPU */
451 WREG32(mmUVD_SOFT_RESET
, 0);
454 for (i
= 0; i
< 10; ++i
) {
457 for (j
= 0; j
< 100; ++j
) {
458 status
= RREG32(mmUVD_STATUS
);
467 DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n");
468 WREG32_FIELD(UVD_SOFT_RESET
, VCPU_SOFT_RESET
, 1);
470 WREG32_FIELD(UVD_SOFT_RESET
, VCPU_SOFT_RESET
, 0);
476 DRM_ERROR("UVD not responding, giving up!!!\n");
479 /* enable master interrupt */
480 WREG32_P(mmUVD_MASTINT_EN
,
481 (UVD_MASTINT_EN__VCPU_EN_MASK
|UVD_MASTINT_EN__SYS_EN_MASK
),
482 ~(UVD_MASTINT_EN__VCPU_EN_MASK
|UVD_MASTINT_EN__SYS_EN_MASK
));
484 /* clear the bit 4 of UVD_STATUS */
485 WREG32_P(mmUVD_STATUS
, 0, ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT
));
487 /* force RBC into idle state */
488 rb_bufsz
= order_base_2(ring
->ring_size
);
489 tmp
= REG_SET_FIELD(0, UVD_RBC_RB_CNTL
, RB_BUFSZ
, rb_bufsz
);
490 tmp
= REG_SET_FIELD(tmp
, UVD_RBC_RB_CNTL
, RB_BLKSZ
, 1);
491 tmp
= REG_SET_FIELD(tmp
, UVD_RBC_RB_CNTL
, RB_NO_FETCH
, 1);
492 tmp
= REG_SET_FIELD(tmp
, UVD_RBC_RB_CNTL
, RB_WPTR_POLL_EN
, 0);
493 tmp
= REG_SET_FIELD(tmp
, UVD_RBC_RB_CNTL
, RB_NO_UPDATE
, 1);
494 tmp
= REG_SET_FIELD(tmp
, UVD_RBC_RB_CNTL
, RB_RPTR_WR_EN
, 1);
495 WREG32(mmUVD_RBC_RB_CNTL
, tmp
);
497 /* set the write pointer delay */
498 WREG32(mmUVD_RBC_RB_WPTR_CNTL
, 0);
500 /* set the wb address */
501 WREG32(mmUVD_RBC_RB_RPTR_ADDR
, (upper_32_bits(ring
->gpu_addr
) >> 2));
503 /* programm the RB_BASE for ring buffer */
504 WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_LOW
,
505 lower_32_bits(ring
->gpu_addr
));
506 WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH
,
507 upper_32_bits(ring
->gpu_addr
));
509 /* Initialize the ring buffer's read and write pointers */
510 WREG32(mmUVD_RBC_RB_RPTR
, 0);
512 ring
->wptr
= RREG32(mmUVD_RBC_RB_RPTR
);
513 WREG32(mmUVD_RBC_RB_WPTR
, lower_32_bits(ring
->wptr
));
515 WREG32_FIELD(UVD_RBC_RB_CNTL
, RB_NO_FETCH
, 0);
521 * uvd_v6_0_stop - stop UVD block
523 * @adev: amdgpu_device pointer
527 static void uvd_v6_0_stop(struct amdgpu_device
*adev
)
529 /* force RBC into idle state */
530 WREG32(mmUVD_RBC_RB_CNTL
, 0x11010101);
532 /* Stall UMC and register bus before resetting VCPU */
533 WREG32_P(mmUVD_LMI_CTRL2
, 1 << 8, ~(1 << 8));
536 /* put VCPU into reset */
537 WREG32(mmUVD_SOFT_RESET
, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK
);
540 /* disable VCPU clock */
541 WREG32(mmUVD_VCPU_CNTL
, 0x0);
543 /* Unstall UMC and register bus */
544 WREG32_P(mmUVD_LMI_CTRL2
, 0, ~(1 << 8));
546 WREG32(mmUVD_STATUS
, 0);
550 * uvd_v6_0_ring_emit_fence - emit an fence & trap command
552 * @ring: amdgpu_ring pointer
553 * @fence: fence to emit
555 * Write a fence and a trap command to the ring.
557 static void uvd_v6_0_ring_emit_fence(struct amdgpu_ring
*ring
, u64 addr
, u64 seq
,
560 WARN_ON(flags
& AMDGPU_FENCE_FLAG_64BIT
);
562 amdgpu_ring_write(ring
, PACKET0(mmUVD_CONTEXT_ID
, 0));
563 amdgpu_ring_write(ring
, seq
);
564 amdgpu_ring_write(ring
, PACKET0(mmUVD_GPCOM_VCPU_DATA0
, 0));
565 amdgpu_ring_write(ring
, addr
& 0xffffffff);
566 amdgpu_ring_write(ring
, PACKET0(mmUVD_GPCOM_VCPU_DATA1
, 0));
567 amdgpu_ring_write(ring
, upper_32_bits(addr
) & 0xff);
568 amdgpu_ring_write(ring
, PACKET0(mmUVD_GPCOM_VCPU_CMD
, 0));
569 amdgpu_ring_write(ring
, 0);
571 amdgpu_ring_write(ring
, PACKET0(mmUVD_GPCOM_VCPU_DATA0
, 0));
572 amdgpu_ring_write(ring
, 0);
573 amdgpu_ring_write(ring
, PACKET0(mmUVD_GPCOM_VCPU_DATA1
, 0));
574 amdgpu_ring_write(ring
, 0);
575 amdgpu_ring_write(ring
, PACKET0(mmUVD_GPCOM_VCPU_CMD
, 0));
576 amdgpu_ring_write(ring
, 2);
580 * uvd_v6_0_ring_emit_hdp_flush - emit an hdp flush
582 * @ring: amdgpu_ring pointer
584 * Emits an hdp flush.
586 static void uvd_v6_0_ring_emit_hdp_flush(struct amdgpu_ring
*ring
)
588 amdgpu_ring_write(ring
, PACKET0(mmHDP_MEM_COHERENCY_FLUSH_CNTL
, 0));
589 amdgpu_ring_write(ring
, 0);
593 * uvd_v6_0_ring_hdp_invalidate - emit an hdp invalidate
595 * @ring: amdgpu_ring pointer
597 * Emits an hdp invalidate.
599 static void uvd_v6_0_ring_emit_hdp_invalidate(struct amdgpu_ring
*ring
)
601 amdgpu_ring_write(ring
, PACKET0(mmHDP_DEBUG0
, 0));
602 amdgpu_ring_write(ring
, 1);
606 * uvd_v6_0_ring_test_ring - register write test
608 * @ring: amdgpu_ring pointer
610 * Test if we can successfully write to the context register
612 static int uvd_v6_0_ring_test_ring(struct amdgpu_ring
*ring
)
614 struct amdgpu_device
*adev
= ring
->adev
;
619 WREG32(mmUVD_CONTEXT_ID
, 0xCAFEDEAD);
620 r
= amdgpu_ring_alloc(ring
, 3);
622 DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
626 amdgpu_ring_write(ring
, PACKET0(mmUVD_CONTEXT_ID
, 0));
627 amdgpu_ring_write(ring
, 0xDEADBEEF);
628 amdgpu_ring_commit(ring
);
629 for (i
= 0; i
< adev
->usec_timeout
; i
++) {
630 tmp
= RREG32(mmUVD_CONTEXT_ID
);
631 if (tmp
== 0xDEADBEEF)
636 if (i
< adev
->usec_timeout
) {
637 DRM_INFO("ring test on %d succeeded in %d usecs\n",
640 DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
648 * uvd_v6_0_ring_emit_ib - execute indirect buffer
650 * @ring: amdgpu_ring pointer
651 * @ib: indirect buffer to execute
653 * Write ring commands to execute the indirect buffer
655 static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring
*ring
,
656 struct amdgpu_ib
*ib
,
657 unsigned vm_id
, bool ctx_switch
)
659 amdgpu_ring_write(ring
, PACKET0(mmUVD_LMI_RBC_IB_VMID
, 0));
660 amdgpu_ring_write(ring
, vm_id
);
662 amdgpu_ring_write(ring
, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW
, 0));
663 amdgpu_ring_write(ring
, lower_32_bits(ib
->gpu_addr
));
664 amdgpu_ring_write(ring
, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH
, 0));
665 amdgpu_ring_write(ring
, upper_32_bits(ib
->gpu_addr
));
666 amdgpu_ring_write(ring
, PACKET0(mmUVD_RBC_IB_SIZE
, 0));
667 amdgpu_ring_write(ring
, ib
->length_dw
);
670 static void uvd_v6_0_ring_emit_vm_flush(struct amdgpu_ring
*ring
,
671 unsigned vm_id
, uint64_t pd_addr
)
676 reg
= mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR
+ vm_id
;
678 reg
= mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR
+ vm_id
- 8;
680 amdgpu_ring_write(ring
, PACKET0(mmUVD_GPCOM_VCPU_DATA0
, 0));
681 amdgpu_ring_write(ring
, reg
<< 2);
682 amdgpu_ring_write(ring
, PACKET0(mmUVD_GPCOM_VCPU_DATA1
, 0));
683 amdgpu_ring_write(ring
, pd_addr
>> 12);
684 amdgpu_ring_write(ring
, PACKET0(mmUVD_GPCOM_VCPU_CMD
, 0));
685 amdgpu_ring_write(ring
, 0x8);
687 amdgpu_ring_write(ring
, PACKET0(mmUVD_GPCOM_VCPU_DATA0
, 0));
688 amdgpu_ring_write(ring
, mmVM_INVALIDATE_REQUEST
<< 2);
689 amdgpu_ring_write(ring
, PACKET0(mmUVD_GPCOM_VCPU_DATA1
, 0));
690 amdgpu_ring_write(ring
, 1 << vm_id
);
691 amdgpu_ring_write(ring
, PACKET0(mmUVD_GPCOM_VCPU_CMD
, 0));
692 amdgpu_ring_write(ring
, 0x8);
694 amdgpu_ring_write(ring
, PACKET0(mmUVD_GPCOM_VCPU_DATA0
, 0));
695 amdgpu_ring_write(ring
, mmVM_INVALIDATE_REQUEST
<< 2);
696 amdgpu_ring_write(ring
, PACKET0(mmUVD_GPCOM_VCPU_DATA1
, 0));
697 amdgpu_ring_write(ring
, 0);
698 amdgpu_ring_write(ring
, PACKET0(mmUVD_GP_SCRATCH8
, 0));
699 amdgpu_ring_write(ring
, 1 << vm_id
); /* mask */
700 amdgpu_ring_write(ring
, PACKET0(mmUVD_GPCOM_VCPU_CMD
, 0));
701 amdgpu_ring_write(ring
, 0xC);
704 static void uvd_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring
*ring
)
706 uint32_t seq
= ring
->fence_drv
.sync_seq
;
707 uint64_t addr
= ring
->fence_drv
.gpu_addr
;
709 amdgpu_ring_write(ring
, PACKET0(mmUVD_GPCOM_VCPU_DATA0
, 0));
710 amdgpu_ring_write(ring
, lower_32_bits(addr
));
711 amdgpu_ring_write(ring
, PACKET0(mmUVD_GPCOM_VCPU_DATA1
, 0));
712 amdgpu_ring_write(ring
, upper_32_bits(addr
));
713 amdgpu_ring_write(ring
, PACKET0(mmUVD_GP_SCRATCH8
, 0));
714 amdgpu_ring_write(ring
, 0xffffffff); /* mask */
715 amdgpu_ring_write(ring
, PACKET0(mmUVD_GP_SCRATCH9
, 0));
716 amdgpu_ring_write(ring
, seq
);
717 amdgpu_ring_write(ring
, PACKET0(mmUVD_GPCOM_VCPU_CMD
, 0));
718 amdgpu_ring_write(ring
, 0xE);
721 static bool uvd_v6_0_is_idle(void *handle
)
723 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
725 return !(RREG32(mmSRBM_STATUS
) & SRBM_STATUS__UVD_BUSY_MASK
);
728 static int uvd_v6_0_wait_for_idle(void *handle
)
731 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
733 for (i
= 0; i
< adev
->usec_timeout
; i
++) {
734 if (uvd_v6_0_is_idle(handle
))
740 #define AMDGPU_UVD_STATUS_BUSY_MASK 0xfd
741 static bool uvd_v6_0_check_soft_reset(void *handle
)
743 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
744 u32 srbm_soft_reset
= 0;
745 u32 tmp
= RREG32(mmSRBM_STATUS
);
747 if (REG_GET_FIELD(tmp
, SRBM_STATUS
, UVD_RQ_PENDING
) ||
748 REG_GET_FIELD(tmp
, SRBM_STATUS
, UVD_BUSY
) ||
749 (RREG32(mmUVD_STATUS
) & AMDGPU_UVD_STATUS_BUSY_MASK
))
750 srbm_soft_reset
= REG_SET_FIELD(srbm_soft_reset
, SRBM_SOFT_RESET
, SOFT_RESET_UVD
, 1);
752 if (srbm_soft_reset
) {
753 adev
->uvd
.srbm_soft_reset
= srbm_soft_reset
;
756 adev
->uvd
.srbm_soft_reset
= 0;
761 static int uvd_v6_0_pre_soft_reset(void *handle
)
763 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
765 if (!adev
->uvd
.srbm_soft_reset
)
772 static int uvd_v6_0_soft_reset(void *handle
)
774 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
777 if (!adev
->uvd
.srbm_soft_reset
)
779 srbm_soft_reset
= adev
->uvd
.srbm_soft_reset
;
781 if (srbm_soft_reset
) {
784 tmp
= RREG32(mmSRBM_SOFT_RESET
);
785 tmp
|= srbm_soft_reset
;
786 dev_info(adev
->dev
, "SRBM_SOFT_RESET=0x%08X\n", tmp
);
787 WREG32(mmSRBM_SOFT_RESET
, tmp
);
788 tmp
= RREG32(mmSRBM_SOFT_RESET
);
792 tmp
&= ~srbm_soft_reset
;
793 WREG32(mmSRBM_SOFT_RESET
, tmp
);
794 tmp
= RREG32(mmSRBM_SOFT_RESET
);
796 /* Wait a little for things to settle down */
803 static int uvd_v6_0_post_soft_reset(void *handle
)
805 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
807 if (!adev
->uvd
.srbm_soft_reset
)
812 return uvd_v6_0_start(adev
);
815 static int uvd_v6_0_set_interrupt_state(struct amdgpu_device
*adev
,
816 struct amdgpu_irq_src
*source
,
818 enum amdgpu_interrupt_state state
)
824 static int uvd_v6_0_process_interrupt(struct amdgpu_device
*adev
,
825 struct amdgpu_irq_src
*source
,
826 struct amdgpu_iv_entry
*entry
)
828 DRM_DEBUG("IH: UVD TRAP\n");
829 amdgpu_fence_process(&adev
->uvd
.ring
);
833 static void uvd_v6_0_enable_clock_gating(struct amdgpu_device
*adev
, bool enable
)
835 uint32_t data1
, data3
;
837 data1
= RREG32(mmUVD_SUVD_CGC_GATE
);
838 data3
= RREG32(mmUVD_CGC_GATE
);
840 data1
|= UVD_SUVD_CGC_GATE__SRE_MASK
|
841 UVD_SUVD_CGC_GATE__SIT_MASK
|
842 UVD_SUVD_CGC_GATE__SMP_MASK
|
843 UVD_SUVD_CGC_GATE__SCM_MASK
|
844 UVD_SUVD_CGC_GATE__SDB_MASK
|
845 UVD_SUVD_CGC_GATE__SRE_H264_MASK
|
846 UVD_SUVD_CGC_GATE__SRE_HEVC_MASK
|
847 UVD_SUVD_CGC_GATE__SIT_H264_MASK
|
848 UVD_SUVD_CGC_GATE__SIT_HEVC_MASK
|
849 UVD_SUVD_CGC_GATE__SCM_H264_MASK
|
850 UVD_SUVD_CGC_GATE__SCM_HEVC_MASK
|
851 UVD_SUVD_CGC_GATE__SDB_H264_MASK
|
852 UVD_SUVD_CGC_GATE__SDB_HEVC_MASK
;
855 data3
|= (UVD_CGC_GATE__SYS_MASK
|
856 UVD_CGC_GATE__UDEC_MASK
|
857 UVD_CGC_GATE__MPEG2_MASK
|
858 UVD_CGC_GATE__RBC_MASK
|
859 UVD_CGC_GATE__LMI_MC_MASK
|
860 UVD_CGC_GATE__LMI_UMC_MASK
|
861 UVD_CGC_GATE__IDCT_MASK
|
862 UVD_CGC_GATE__MPRD_MASK
|
863 UVD_CGC_GATE__MPC_MASK
|
864 UVD_CGC_GATE__LBSI_MASK
|
865 UVD_CGC_GATE__LRBBM_MASK
|
866 UVD_CGC_GATE__UDEC_RE_MASK
|
867 UVD_CGC_GATE__UDEC_CM_MASK
|
868 UVD_CGC_GATE__UDEC_IT_MASK
|
869 UVD_CGC_GATE__UDEC_DB_MASK
|
870 UVD_CGC_GATE__UDEC_MP_MASK
|
871 UVD_CGC_GATE__WCB_MASK
|
872 UVD_CGC_GATE__JPEG_MASK
|
873 UVD_CGC_GATE__SCPU_MASK
|
874 UVD_CGC_GATE__JPEG2_MASK
);
875 /* only in pg enabled, we can gate clock to vcpu*/
876 if (adev
->pg_flags
& AMD_PG_SUPPORT_UVD
)
877 data3
|= UVD_CGC_GATE__VCPU_MASK
;
879 data3
&= ~UVD_CGC_GATE__REGS_MASK
;
884 WREG32(mmUVD_SUVD_CGC_GATE
, data1
);
885 WREG32(mmUVD_CGC_GATE
, data3
);
888 static void uvd_v6_0_set_sw_clock_gating(struct amdgpu_device
*adev
)
890 uint32_t data
, data2
;
892 data
= RREG32(mmUVD_CGC_CTRL
);
893 data2
= RREG32(mmUVD_SUVD_CGC_CTRL
);
896 data
&= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK
|
897 UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK
);
900 data
|= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK
|
901 (1 << REG_FIELD_SHIFT(UVD_CGC_CTRL
, CLK_GATE_DLY_TIMER
)) |
902 (4 << REG_FIELD_SHIFT(UVD_CGC_CTRL
, CLK_OFF_DELAY
));
904 data
&= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK
|
905 UVD_CGC_CTRL__UDEC_CM_MODE_MASK
|
906 UVD_CGC_CTRL__UDEC_IT_MODE_MASK
|
907 UVD_CGC_CTRL__UDEC_DB_MODE_MASK
|
908 UVD_CGC_CTRL__UDEC_MP_MODE_MASK
|
909 UVD_CGC_CTRL__SYS_MODE_MASK
|
910 UVD_CGC_CTRL__UDEC_MODE_MASK
|
911 UVD_CGC_CTRL__MPEG2_MODE_MASK
|
912 UVD_CGC_CTRL__REGS_MODE_MASK
|
913 UVD_CGC_CTRL__RBC_MODE_MASK
|
914 UVD_CGC_CTRL__LMI_MC_MODE_MASK
|
915 UVD_CGC_CTRL__LMI_UMC_MODE_MASK
|
916 UVD_CGC_CTRL__IDCT_MODE_MASK
|
917 UVD_CGC_CTRL__MPRD_MODE_MASK
|
918 UVD_CGC_CTRL__MPC_MODE_MASK
|
919 UVD_CGC_CTRL__LBSI_MODE_MASK
|
920 UVD_CGC_CTRL__LRBBM_MODE_MASK
|
921 UVD_CGC_CTRL__WCB_MODE_MASK
|
922 UVD_CGC_CTRL__VCPU_MODE_MASK
|
923 UVD_CGC_CTRL__JPEG_MODE_MASK
|
924 UVD_CGC_CTRL__SCPU_MODE_MASK
|
925 UVD_CGC_CTRL__JPEG2_MODE_MASK
);
926 data2
&= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
|
927 UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
|
928 UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
|
929 UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
|
930 UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
);
932 WREG32(mmUVD_CGC_CTRL
, data
);
933 WREG32(mmUVD_SUVD_CGC_CTRL
, data2
);
937 static void uvd_v6_0_set_hw_clock_gating(struct amdgpu_device
*adev
)
939 uint32_t data
, data1
, cgc_flags
, suvd_flags
;
941 data
= RREG32(mmUVD_CGC_GATE
);
942 data1
= RREG32(mmUVD_SUVD_CGC_GATE
);
944 cgc_flags
= UVD_CGC_GATE__SYS_MASK
|
945 UVD_CGC_GATE__UDEC_MASK
|
946 UVD_CGC_GATE__MPEG2_MASK
|
947 UVD_CGC_GATE__RBC_MASK
|
948 UVD_CGC_GATE__LMI_MC_MASK
|
949 UVD_CGC_GATE__IDCT_MASK
|
950 UVD_CGC_GATE__MPRD_MASK
|
951 UVD_CGC_GATE__MPC_MASK
|
952 UVD_CGC_GATE__LBSI_MASK
|
953 UVD_CGC_GATE__LRBBM_MASK
|
954 UVD_CGC_GATE__UDEC_RE_MASK
|
955 UVD_CGC_GATE__UDEC_CM_MASK
|
956 UVD_CGC_GATE__UDEC_IT_MASK
|
957 UVD_CGC_GATE__UDEC_DB_MASK
|
958 UVD_CGC_GATE__UDEC_MP_MASK
|
959 UVD_CGC_GATE__WCB_MASK
|
960 UVD_CGC_GATE__VCPU_MASK
|
961 UVD_CGC_GATE__SCPU_MASK
|
962 UVD_CGC_GATE__JPEG_MASK
|
963 UVD_CGC_GATE__JPEG2_MASK
;
965 suvd_flags
= UVD_SUVD_CGC_GATE__SRE_MASK
|
966 UVD_SUVD_CGC_GATE__SIT_MASK
|
967 UVD_SUVD_CGC_GATE__SMP_MASK
|
968 UVD_SUVD_CGC_GATE__SCM_MASK
|
969 UVD_SUVD_CGC_GATE__SDB_MASK
;
974 WREG32(mmUVD_CGC_GATE
, data
);
975 WREG32(mmUVD_SUVD_CGC_GATE
, data1
);
979 static void uvd_v6_0_enable_mgcg(struct amdgpu_device
*adev
,
984 if (enable
&& (adev
->cg_flags
& AMD_CG_SUPPORT_UVD_MGCG
)) {
985 data
= RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL
);
987 WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL
, data
);
989 orig
= data
= RREG32(mmUVD_CGC_CTRL
);
990 data
|= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK
;
992 WREG32(mmUVD_CGC_CTRL
, data
);
994 data
= RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL
);
996 WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL
, data
);
998 orig
= data
= RREG32(mmUVD_CGC_CTRL
);
999 data
&= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK
;
1001 WREG32(mmUVD_CGC_CTRL
, data
);
1005 static int uvd_v6_0_set_clockgating_state(void *handle
,
1006 enum amd_clockgating_state state
)
1008 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1009 bool enable
= (state
== AMD_CG_STATE_GATE
) ? true : false;
1012 /* wait for STATUS to clear */
1013 if (uvd_v6_0_wait_for_idle(handle
))
1015 uvd_v6_0_enable_clock_gating(adev
, true);
1016 /* enable HW gates because UVD is idle */
1017 /* uvd_v6_0_set_hw_clock_gating(adev); */
1019 /* disable HW gating and enable Sw gating */
1020 uvd_v6_0_enable_clock_gating(adev
, false);
1022 uvd_v6_0_set_sw_clock_gating(adev
);
1026 static int uvd_v6_0_set_powergating_state(void *handle
,
1027 enum amd_powergating_state state
)
1029 /* This doesn't actually powergate the UVD block.
1030 * That's done in the dpm code via the SMC. This
1031 * just re-inits the block as necessary. The actual
1032 * gating still happens in the dpm code. We should
1033 * revisit this when there is a cleaner line between
1034 * the smc and the hw blocks
1036 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1039 WREG32(mmUVD_POWER_STATUS
, UVD_POWER_STATUS__UVD_PG_EN_MASK
);
1041 if (state
== AMD_PG_STATE_GATE
) {
1042 uvd_v6_0_stop(adev
);
1044 ret
= uvd_v6_0_start(adev
);
1053 static void uvd_v6_0_get_clockgating_state(void *handle
, u32
*flags
)
1055 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1058 mutex_lock(&adev
->pm
.mutex
);
1060 if (adev
->flags
& AMD_IS_APU
)
1061 data
= RREG32_SMC(ixCURRENT_PG_STATUS_APU
);
1063 data
= RREG32_SMC(ixCURRENT_PG_STATUS
);
1065 if (data
& CURRENT_PG_STATUS__UVD_PG_STATUS_MASK
) {
1066 DRM_INFO("Cannot get clockgating state when UVD is powergated.\n");
1070 /* AMD_CG_SUPPORT_UVD_MGCG */
1071 data
= RREG32(mmUVD_CGC_CTRL
);
1072 if (data
& UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK
)
1073 *flags
|= AMD_CG_SUPPORT_UVD_MGCG
;
1076 mutex_unlock(&adev
->pm
.mutex
);
1079 static const struct amd_ip_funcs uvd_v6_0_ip_funcs
= {
1081 .early_init
= uvd_v6_0_early_init
,
1083 .sw_init
= uvd_v6_0_sw_init
,
1084 .sw_fini
= uvd_v6_0_sw_fini
,
1085 .hw_init
= uvd_v6_0_hw_init
,
1086 .hw_fini
= uvd_v6_0_hw_fini
,
1087 .suspend
= uvd_v6_0_suspend
,
1088 .resume
= uvd_v6_0_resume
,
1089 .is_idle
= uvd_v6_0_is_idle
,
1090 .wait_for_idle
= uvd_v6_0_wait_for_idle
,
1091 .check_soft_reset
= uvd_v6_0_check_soft_reset
,
1092 .pre_soft_reset
= uvd_v6_0_pre_soft_reset
,
1093 .soft_reset
= uvd_v6_0_soft_reset
,
1094 .post_soft_reset
= uvd_v6_0_post_soft_reset
,
1095 .set_clockgating_state
= uvd_v6_0_set_clockgating_state
,
1096 .set_powergating_state
= uvd_v6_0_set_powergating_state
,
1097 .get_clockgating_state
= uvd_v6_0_get_clockgating_state
,
1100 static const struct amdgpu_ring_funcs uvd_v6_0_ring_phys_funcs
= {
1101 .type
= AMDGPU_RING_TYPE_UVD
,
1103 .nop
= PACKET0(mmUVD_NO_OP
, 0),
1104 .support_64bit_ptrs
= false,
1105 .get_rptr
= uvd_v6_0_ring_get_rptr
,
1106 .get_wptr
= uvd_v6_0_ring_get_wptr
,
1107 .set_wptr
= uvd_v6_0_ring_set_wptr
,
1108 .parse_cs
= amdgpu_uvd_ring_parse_cs
,
1110 2 + /* uvd_v6_0_ring_emit_hdp_flush */
1111 2 + /* uvd_v6_0_ring_emit_hdp_invalidate */
1112 10 + /* uvd_v6_0_ring_emit_pipeline_sync */
1113 14, /* uvd_v6_0_ring_emit_fence x1 no user fence */
1114 .emit_ib_size
= 8, /* uvd_v6_0_ring_emit_ib */
1115 .emit_ib
= uvd_v6_0_ring_emit_ib
,
1116 .emit_fence
= uvd_v6_0_ring_emit_fence
,
1117 .emit_hdp_flush
= uvd_v6_0_ring_emit_hdp_flush
,
1118 .emit_hdp_invalidate
= uvd_v6_0_ring_emit_hdp_invalidate
,
1119 .test_ring
= uvd_v6_0_ring_test_ring
,
1120 .test_ib
= amdgpu_uvd_ring_test_ib
,
1121 .insert_nop
= amdgpu_ring_insert_nop
,
1122 .pad_ib
= amdgpu_ring_generic_pad_ib
,
1123 .begin_use
= amdgpu_uvd_ring_begin_use
,
1124 .end_use
= amdgpu_uvd_ring_end_use
,
1127 static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs
= {
1128 .type
= AMDGPU_RING_TYPE_UVD
,
1130 .nop
= PACKET0(mmUVD_NO_OP
, 0),
1131 .support_64bit_ptrs
= false,
1132 .get_rptr
= uvd_v6_0_ring_get_rptr
,
1133 .get_wptr
= uvd_v6_0_ring_get_wptr
,
1134 .set_wptr
= uvd_v6_0_ring_set_wptr
,
1136 2 + /* uvd_v6_0_ring_emit_hdp_flush */
1137 2 + /* uvd_v6_0_ring_emit_hdp_invalidate */
1138 10 + /* uvd_v6_0_ring_emit_pipeline_sync */
1139 20 + /* uvd_v6_0_ring_emit_vm_flush */
1140 14 + 14, /* uvd_v6_0_ring_emit_fence x2 vm fence */
1141 .emit_ib_size
= 8, /* uvd_v6_0_ring_emit_ib */
1142 .emit_ib
= uvd_v6_0_ring_emit_ib
,
1143 .emit_fence
= uvd_v6_0_ring_emit_fence
,
1144 .emit_vm_flush
= uvd_v6_0_ring_emit_vm_flush
,
1145 .emit_pipeline_sync
= uvd_v6_0_ring_emit_pipeline_sync
,
1146 .emit_hdp_flush
= uvd_v6_0_ring_emit_hdp_flush
,
1147 .emit_hdp_invalidate
= uvd_v6_0_ring_emit_hdp_invalidate
,
1148 .test_ring
= uvd_v6_0_ring_test_ring
,
1149 .test_ib
= amdgpu_uvd_ring_test_ib
,
1150 .insert_nop
= amdgpu_ring_insert_nop
,
1151 .pad_ib
= amdgpu_ring_generic_pad_ib
,
1152 .begin_use
= amdgpu_uvd_ring_begin_use
,
1153 .end_use
= amdgpu_uvd_ring_end_use
,
1156 static void uvd_v6_0_set_ring_funcs(struct amdgpu_device
*adev
)
1158 if (adev
->asic_type
>= CHIP_POLARIS10
) {
1159 adev
->uvd
.ring
.funcs
= &uvd_v6_0_ring_vm_funcs
;
1160 DRM_INFO("UVD is enabled in VM mode\n");
1162 adev
->uvd
.ring
.funcs
= &uvd_v6_0_ring_phys_funcs
;
1163 DRM_INFO("UVD is enabled in physical mode\n");
1167 static const struct amdgpu_irq_src_funcs uvd_v6_0_irq_funcs
= {
1168 .set
= uvd_v6_0_set_interrupt_state
,
1169 .process
= uvd_v6_0_process_interrupt
,
1172 static void uvd_v6_0_set_irq_funcs(struct amdgpu_device
*adev
)
1174 adev
->uvd
.irq
.num_types
= 1;
1175 adev
->uvd
.irq
.funcs
= &uvd_v6_0_irq_funcs
;
1178 const struct amdgpu_ip_block_version uvd_v6_0_ip_block
=
1180 .type
= AMD_IP_BLOCK_TYPE_UVD
,
1184 .funcs
= &uvd_v6_0_ip_funcs
,
1187 const struct amdgpu_ip_block_version uvd_v6_2_ip_block
=
1189 .type
= AMD_IP_BLOCK_TYPE_UVD
,
1193 .funcs
= &uvd_v6_0_ip_funcs
,
1196 const struct amdgpu_ip_block_version uvd_v6_3_ip_block
=
1198 .type
= AMD_IP_BLOCK_TYPE_UVD
,
1202 .funcs
= &uvd_v6_0_ip_funcs
,