2 * Copyright 2016 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/firmware.h>
27 #include "amdgpu_vcn.h"
29 #include "soc15_common.h"
31 #include "vega10/soc15ip.h"
32 #include "raven1/VCN/vcn_1_0_offset.h"
33 #include "raven1/VCN/vcn_1_0_sh_mask.h"
34 #include "vega10/HDP/hdp_4_0_offset.h"
35 #include "raven1/MMHUB/mmhub_9_1_offset.h"
36 #include "raven1/MMHUB/mmhub_9_1_sh_mask.h"
38 static int vcn_v1_0_start(struct amdgpu_device
*adev
);
39 static int vcn_v1_0_stop(struct amdgpu_device
*adev
);
40 static void vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device
*adev
);
41 static void vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device
*adev
);
42 static void vcn_v1_0_set_irq_funcs(struct amdgpu_device
*adev
);
45 * vcn_v1_0_early_init - set function pointers
47 * @handle: amdgpu_device pointer
49 * Set ring and irq function pointers
51 static int vcn_v1_0_early_init(void *handle
)
53 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
55 adev
->vcn
.num_enc_rings
= 2;
57 vcn_v1_0_set_dec_ring_funcs(adev
);
58 vcn_v1_0_set_enc_ring_funcs(adev
);
59 vcn_v1_0_set_irq_funcs(adev
);
65 * vcn_v1_0_sw_init - sw init for VCN block
67 * @handle: amdgpu_device pointer
69 * Load firmware and sw initialization
71 static int vcn_v1_0_sw_init(void *handle
)
73 struct amdgpu_ring
*ring
;
75 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
78 r
= amdgpu_irq_add_id(adev
, AMDGPU_IH_CLIENTID_VCN
, 124, &adev
->vcn
.irq
);
83 for (i
= 0; i
< adev
->vcn
.num_enc_rings
; ++i
) {
84 r
= amdgpu_irq_add_id(adev
, AMDGPU_IH_CLIENTID_VCN
, i
+ 119,
90 r
= amdgpu_vcn_sw_init(adev
);
94 r
= amdgpu_vcn_resume(adev
);
98 ring
= &adev
->vcn
.ring_dec
;
99 sprintf(ring
->name
, "vcn_dec");
100 r
= amdgpu_ring_init(adev
, ring
, 512, &adev
->vcn
.irq
, 0);
104 for (i
= 0; i
< adev
->vcn
.num_enc_rings
; ++i
) {
105 ring
= &adev
->vcn
.ring_enc
[i
];
106 sprintf(ring
->name
, "vcn_enc%d", i
);
107 r
= amdgpu_ring_init(adev
, ring
, 512, &adev
->vcn
.irq
, 0);
116 * vcn_v1_0_sw_fini - sw fini for VCN block
118 * @handle: amdgpu_device pointer
120 * VCN suspend and free up sw allocation
122 static int vcn_v1_0_sw_fini(void *handle
)
125 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
127 r
= amdgpu_vcn_suspend(adev
);
131 r
= amdgpu_vcn_sw_fini(adev
);
137 * vcn_v1_0_hw_init - start and test VCN block
139 * @handle: amdgpu_device pointer
141 * Initialize the hardware, boot up the VCPU and do some testing
143 static int vcn_v1_0_hw_init(void *handle
)
145 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
146 struct amdgpu_ring
*ring
= &adev
->vcn
.ring_dec
;
149 r
= vcn_v1_0_start(adev
);
154 r
= amdgpu_ring_test_ring(ring
);
160 for (i
= 0; i
< adev
->vcn
.num_enc_rings
; ++i
) {
161 ring
= &adev
->vcn
.ring_enc
[i
];
163 r
= amdgpu_ring_test_ring(ring
);
172 DRM_INFO("VCN decode and encode initialized successfully.\n");
178 * vcn_v1_0_hw_fini - stop the hardware block
180 * @handle: amdgpu_device pointer
182 * Stop the VCN block, mark ring as not ready any more
184 static int vcn_v1_0_hw_fini(void *handle
)
186 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
187 struct amdgpu_ring
*ring
= &adev
->vcn
.ring_dec
;
190 r
= vcn_v1_0_stop(adev
);
200 * vcn_v1_0_suspend - suspend VCN block
202 * @handle: amdgpu_device pointer
204 * HW fini and suspend VCN block
206 static int vcn_v1_0_suspend(void *handle
)
209 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
211 r
= vcn_v1_0_hw_fini(adev
);
215 r
= amdgpu_vcn_suspend(adev
);
221 * vcn_v1_0_resume - resume VCN block
223 * @handle: amdgpu_device pointer
225 * Resume firmware and hw init VCN block
227 static int vcn_v1_0_resume(void *handle
)
230 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
232 r
= amdgpu_vcn_resume(adev
);
236 r
= vcn_v1_0_hw_init(adev
);
242 * vcn_v1_0_mc_resume - memory controller programming
244 * @adev: amdgpu_device pointer
246 * Let the VCN memory controller know it's offsets
248 static void vcn_v1_0_mc_resume(struct amdgpu_device
*adev
)
250 uint32_t size
= AMDGPU_GPU_PAGE_ALIGN(adev
->vcn
.fw
->size
+ 4);
252 WREG32_SOC15(UVD
, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW
,
253 lower_32_bits(adev
->vcn
.gpu_addr
));
254 WREG32_SOC15(UVD
, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH
,
255 upper_32_bits(adev
->vcn
.gpu_addr
));
256 WREG32_SOC15(UVD
, 0, mmUVD_VCPU_CACHE_OFFSET0
,
257 AMDGPU_UVD_FIRMWARE_OFFSET
>> 3);
258 WREG32_SOC15(UVD
, 0, mmUVD_VCPU_CACHE_SIZE0
, size
);
260 WREG32_SOC15(UVD
, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW
,
261 lower_32_bits(adev
->vcn
.gpu_addr
+ size
));
262 WREG32_SOC15(UVD
, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH
,
263 upper_32_bits(adev
->vcn
.gpu_addr
+ size
));
264 WREG32_SOC15(UVD
, 0, mmUVD_VCPU_CACHE_OFFSET1
, 0);
265 WREG32_SOC15(UVD
, 0, mmUVD_VCPU_CACHE_SIZE1
, AMDGPU_VCN_HEAP_SIZE
);
267 WREG32_SOC15(UVD
, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW
,
268 lower_32_bits(adev
->vcn
.gpu_addr
+ size
+ AMDGPU_VCN_HEAP_SIZE
));
269 WREG32_SOC15(UVD
, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH
,
270 upper_32_bits(adev
->vcn
.gpu_addr
+ size
+ AMDGPU_VCN_HEAP_SIZE
));
271 WREG32_SOC15(UVD
, 0, mmUVD_VCPU_CACHE_OFFSET2
, 0);
272 WREG32_SOC15(UVD
, 0, mmUVD_VCPU_CACHE_SIZE2
,
273 AMDGPU_VCN_STACK_SIZE
+ (AMDGPU_VCN_SESSION_SIZE
* 40));
275 WREG32_SOC15(UVD
, 0, mmUVD_UDEC_ADDR_CONFIG
,
276 adev
->gfx
.config
.gb_addr_config
);
277 WREG32_SOC15(UVD
, 0, mmUVD_UDEC_DB_ADDR_CONFIG
,
278 adev
->gfx
.config
.gb_addr_config
);
279 WREG32_SOC15(UVD
, 0, mmUVD_UDEC_DBW_ADDR_CONFIG
,
280 adev
->gfx
.config
.gb_addr_config
);
284 * vcn_v1_0_disable_clock_gating - disable VCN clock gating
286 * @adev: amdgpu_device pointer
287 * @sw: enable SW clock gating
289 * Disable clock gating for VCN block
291 static void vcn_v1_0_disable_clock_gating(struct amdgpu_device
*adev
, bool sw
)
295 /* JPEG disable CGC */
296 data
= RREG32_SOC15(VCN
, 0, mmJPEG_CGC_CTRL
);
299 data
|= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT
;
301 data
&= ~JPEG_CGC_CTRL__DYN_CLOCK_MODE_MASK
;
303 data
|= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT
;
304 data
|= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT
;
305 WREG32_SOC15(VCN
, 0, mmJPEG_CGC_CTRL
, data
);
307 data
= RREG32_SOC15(VCN
, 0, mmJPEG_CGC_GATE
);
308 data
&= ~(JPEG_CGC_GATE__JPEG_MASK
| JPEG_CGC_GATE__JPEG2_MASK
);
309 WREG32_SOC15(VCN
, 0, mmJPEG_CGC_GATE
, data
);
311 /* UVD disable CGC */
312 data
= RREG32_SOC15(VCN
, 0, mmUVD_CGC_CTRL
);
314 data
|= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT
;
316 data
&= ~ UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK
;
318 data
|= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT
;
319 data
|= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT
;
320 WREG32_SOC15(VCN
, 0, mmUVD_CGC_CTRL
, data
);
322 data
= RREG32_SOC15(VCN
, 0, mmUVD_CGC_GATE
);
323 data
&= ~(UVD_CGC_GATE__SYS_MASK
324 | UVD_CGC_GATE__UDEC_MASK
325 | UVD_CGC_GATE__MPEG2_MASK
326 | UVD_CGC_GATE__REGS_MASK
327 | UVD_CGC_GATE__RBC_MASK
328 | UVD_CGC_GATE__LMI_MC_MASK
329 | UVD_CGC_GATE__LMI_UMC_MASK
330 | UVD_CGC_GATE__IDCT_MASK
331 | UVD_CGC_GATE__MPRD_MASK
332 | UVD_CGC_GATE__MPC_MASK
333 | UVD_CGC_GATE__LBSI_MASK
334 | UVD_CGC_GATE__LRBBM_MASK
335 | UVD_CGC_GATE__UDEC_RE_MASK
336 | UVD_CGC_GATE__UDEC_CM_MASK
337 | UVD_CGC_GATE__UDEC_IT_MASK
338 | UVD_CGC_GATE__UDEC_DB_MASK
339 | UVD_CGC_GATE__UDEC_MP_MASK
340 | UVD_CGC_GATE__WCB_MASK
341 | UVD_CGC_GATE__VCPU_MASK
342 | UVD_CGC_GATE__SCPU_MASK
);
343 WREG32_SOC15(VCN
, 0, mmUVD_CGC_GATE
, data
);
345 data
= RREG32_SOC15(VCN
, 0, mmUVD_CGC_CTRL
);
346 data
&= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK
347 | UVD_CGC_CTRL__UDEC_CM_MODE_MASK
348 | UVD_CGC_CTRL__UDEC_IT_MODE_MASK
349 | UVD_CGC_CTRL__UDEC_DB_MODE_MASK
350 | UVD_CGC_CTRL__UDEC_MP_MODE_MASK
351 | UVD_CGC_CTRL__SYS_MODE_MASK
352 | UVD_CGC_CTRL__UDEC_MODE_MASK
353 | UVD_CGC_CTRL__MPEG2_MODE_MASK
354 | UVD_CGC_CTRL__REGS_MODE_MASK
355 | UVD_CGC_CTRL__RBC_MODE_MASK
356 | UVD_CGC_CTRL__LMI_MC_MODE_MASK
357 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
358 | UVD_CGC_CTRL__IDCT_MODE_MASK
359 | UVD_CGC_CTRL__MPRD_MODE_MASK
360 | UVD_CGC_CTRL__MPC_MODE_MASK
361 | UVD_CGC_CTRL__LBSI_MODE_MASK
362 | UVD_CGC_CTRL__LRBBM_MODE_MASK
363 | UVD_CGC_CTRL__WCB_MODE_MASK
364 | UVD_CGC_CTRL__VCPU_MODE_MASK
365 | UVD_CGC_CTRL__SCPU_MODE_MASK
);
366 WREG32_SOC15(VCN
, 0, mmUVD_CGC_CTRL
, data
);
369 data
= RREG32_SOC15(VCN
, 0, mmUVD_SUVD_CGC_GATE
);
370 data
|= (UVD_SUVD_CGC_GATE__SRE_MASK
371 | UVD_SUVD_CGC_GATE__SIT_MASK
372 | UVD_SUVD_CGC_GATE__SMP_MASK
373 | UVD_SUVD_CGC_GATE__SCM_MASK
374 | UVD_SUVD_CGC_GATE__SDB_MASK
375 | UVD_SUVD_CGC_GATE__SRE_H264_MASK
376 | UVD_SUVD_CGC_GATE__SRE_HEVC_MASK
377 | UVD_SUVD_CGC_GATE__SIT_H264_MASK
378 | UVD_SUVD_CGC_GATE__SIT_HEVC_MASK
379 | UVD_SUVD_CGC_GATE__SCM_H264_MASK
380 | UVD_SUVD_CGC_GATE__SCM_HEVC_MASK
381 | UVD_SUVD_CGC_GATE__SDB_H264_MASK
382 | UVD_SUVD_CGC_GATE__SDB_HEVC_MASK
383 | UVD_SUVD_CGC_GATE__SCLR_MASK
384 | UVD_SUVD_CGC_GATE__UVD_SC_MASK
385 | UVD_SUVD_CGC_GATE__ENT_MASK
386 | UVD_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK
387 | UVD_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK
388 | UVD_SUVD_CGC_GATE__SITE_MASK
389 | UVD_SUVD_CGC_GATE__SRE_VP9_MASK
390 | UVD_SUVD_CGC_GATE__SCM_VP9_MASK
391 | UVD_SUVD_CGC_GATE__SIT_VP9_DEC_MASK
392 | UVD_SUVD_CGC_GATE__SDB_VP9_MASK
393 | UVD_SUVD_CGC_GATE__IME_HEVC_MASK
);
394 WREG32_SOC15(VCN
, 0, mmUVD_SUVD_CGC_GATE
, data
);
396 data
= RREG32_SOC15(VCN
, 0, mmUVD_SUVD_CGC_CTRL
);
397 data
&= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
398 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
399 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
400 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
401 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
402 | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
403 | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
404 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
405 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
406 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK
);
407 WREG32_SOC15(VCN
, 0, mmUVD_SUVD_CGC_CTRL
, data
);
411 * vcn_v1_0_enable_clock_gating - enable VCN clock gating
413 * @adev: amdgpu_device pointer
414 * @sw: enable SW clock gating
416 * Enable clock gating for VCN block
418 static void vcn_v1_0_enable_clock_gating(struct amdgpu_device
*adev
, bool sw
)
422 /* enable JPEG CGC */
423 data
= RREG32_SOC15(VCN
, 0, mmJPEG_CGC_CTRL
);
425 data
|= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT
;
427 data
|= 0 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT
;
428 data
|= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT
;
429 data
|= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT
;
430 WREG32_SOC15(VCN
, 0, mmJPEG_CGC_CTRL
, data
);
432 data
= RREG32_SOC15(VCN
, 0, mmJPEG_CGC_GATE
);
433 data
|= (JPEG_CGC_GATE__JPEG_MASK
| JPEG_CGC_GATE__JPEG2_MASK
);
434 WREG32_SOC15(VCN
, 0, mmJPEG_CGC_GATE
, data
);
437 data
= RREG32_SOC15(VCN
, 0, mmUVD_CGC_CTRL
);
439 data
|= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT
;
441 data
|= 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT
;
442 data
|= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT
;
443 data
|= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT
;
444 WREG32_SOC15(VCN
, 0, mmUVD_CGC_CTRL
, data
);
446 data
= RREG32_SOC15(VCN
, 0, mmUVD_CGC_CTRL
);
447 data
|= (UVD_CGC_CTRL__UDEC_RE_MODE_MASK
448 | UVD_CGC_CTRL__UDEC_CM_MODE_MASK
449 | UVD_CGC_CTRL__UDEC_IT_MODE_MASK
450 | UVD_CGC_CTRL__UDEC_DB_MODE_MASK
451 | UVD_CGC_CTRL__UDEC_MP_MODE_MASK
452 | UVD_CGC_CTRL__SYS_MODE_MASK
453 | UVD_CGC_CTRL__UDEC_MODE_MASK
454 | UVD_CGC_CTRL__MPEG2_MODE_MASK
455 | UVD_CGC_CTRL__REGS_MODE_MASK
456 | UVD_CGC_CTRL__RBC_MODE_MASK
457 | UVD_CGC_CTRL__LMI_MC_MODE_MASK
458 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
459 | UVD_CGC_CTRL__IDCT_MODE_MASK
460 | UVD_CGC_CTRL__MPRD_MODE_MASK
461 | UVD_CGC_CTRL__MPC_MODE_MASK
462 | UVD_CGC_CTRL__LBSI_MODE_MASK
463 | UVD_CGC_CTRL__LRBBM_MODE_MASK
464 | UVD_CGC_CTRL__WCB_MODE_MASK
465 | UVD_CGC_CTRL__VCPU_MODE_MASK
466 | UVD_CGC_CTRL__SCPU_MODE_MASK
);
467 WREG32_SOC15(VCN
, 0, mmUVD_CGC_CTRL
, data
);
469 data
= RREG32_SOC15(VCN
, 0, mmUVD_SUVD_CGC_CTRL
);
470 data
|= (UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
471 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
472 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
473 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
474 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
475 | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
476 | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
477 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
478 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
479 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK
);
480 WREG32_SOC15(VCN
, 0, mmUVD_SUVD_CGC_CTRL
, data
);
484 * vcn_v1_0_start - start VCN block
486 * @adev: amdgpu_device pointer
488 * Setup and start the VCN block
490 static int vcn_v1_0_start(struct amdgpu_device
*adev
)
492 struct amdgpu_ring
*ring
= &adev
->vcn
.ring_dec
;
493 uint32_t rb_bufsz
, tmp
;
494 uint32_t lmi_swap_cntl
;
497 /* disable byte swapping */
500 vcn_v1_0_mc_resume(adev
);
502 /* disable clock gating */
503 vcn_v1_0_disable_clock_gating(adev
, true);
505 /* disable interupt */
506 WREG32_P(SOC15_REG_OFFSET(UVD
, 0, mmUVD_MASTINT_EN
), 0,
507 ~UVD_MASTINT_EN__VCPU_EN_MASK
);
509 /* stall UMC and register bus before resetting VCPU */
510 WREG32_P(SOC15_REG_OFFSET(UVD
, 0, mmUVD_LMI_CTRL2
),
511 UVD_LMI_CTRL2__STALL_ARB_UMC_MASK
,
512 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK
);
515 /* put LMI, VCPU, RBC etc... into reset */
516 WREG32_SOC15(UVD
, 0, mmUVD_SOFT_RESET
,
517 UVD_SOFT_RESET__LMI_SOFT_RESET_MASK
|
518 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK
|
519 UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK
|
520 UVD_SOFT_RESET__RBC_SOFT_RESET_MASK
|
521 UVD_SOFT_RESET__CSM_SOFT_RESET_MASK
|
522 UVD_SOFT_RESET__CXW_SOFT_RESET_MASK
|
523 UVD_SOFT_RESET__TAP_SOFT_RESET_MASK
|
524 UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK
);
527 /* initialize VCN memory controller */
528 WREG32_SOC15(UVD
, 0, mmUVD_LMI_CTRL
,
529 (0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT
) |
530 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK
|
531 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK
|
532 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK
|
533 UVD_LMI_CTRL__REQ_MODE_MASK
|
537 /* swap (8 in 32) RB and IB */
540 WREG32_SOC15(UVD
, 0, mmUVD_LMI_SWAP_CNTL
, lmi_swap_cntl
);
542 WREG32_SOC15(UVD
, 0, mmUVD_MPC_SET_MUXA0
, 0x40c2040);
543 WREG32_SOC15(UVD
, 0, mmUVD_MPC_SET_MUXA1
, 0x0);
544 WREG32_SOC15(UVD
, 0, mmUVD_MPC_SET_MUXB0
, 0x40c2040);
545 WREG32_SOC15(UVD
, 0, mmUVD_MPC_SET_MUXB1
, 0x0);
546 WREG32_SOC15(UVD
, 0, mmUVD_MPC_SET_ALU
, 0);
547 WREG32_SOC15(UVD
, 0, mmUVD_MPC_SET_MUX
, 0x88);
549 /* take all subblocks out of reset, except VCPU */
550 WREG32_SOC15(UVD
, 0, mmUVD_SOFT_RESET
,
551 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK
);
554 /* enable VCPU clock */
555 WREG32_SOC15(UVD
, 0, mmUVD_VCPU_CNTL
,
556 UVD_VCPU_CNTL__CLK_EN_MASK
);
559 WREG32_P(SOC15_REG_OFFSET(UVD
, 0, mmUVD_LMI_CTRL2
), 0,
560 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK
);
562 /* boot up the VCPU */
563 WREG32_SOC15(UVD
, 0, mmUVD_SOFT_RESET
, 0);
566 for (i
= 0; i
< 10; ++i
) {
569 for (j
= 0; j
< 100; ++j
) {
570 status
= RREG32_SOC15(UVD
, 0, mmUVD_STATUS
);
579 DRM_ERROR("VCN decode not responding, trying to reset the VCPU!!!\n");
580 WREG32_P(SOC15_REG_OFFSET(UVD
, 0, mmUVD_SOFT_RESET
),
581 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK
,
582 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK
);
584 WREG32_P(SOC15_REG_OFFSET(UVD
, 0, mmUVD_SOFT_RESET
), 0,
585 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK
);
591 DRM_ERROR("VCN decode not responding, giving up!!!\n");
594 /* enable master interrupt */
595 WREG32_P(SOC15_REG_OFFSET(UVD
, 0, mmUVD_MASTINT_EN
),
596 (UVD_MASTINT_EN__VCPU_EN_MASK
|UVD_MASTINT_EN__SYS_EN_MASK
),
597 ~(UVD_MASTINT_EN__VCPU_EN_MASK
|UVD_MASTINT_EN__SYS_EN_MASK
));
599 /* clear the bit 4 of VCN_STATUS */
600 WREG32_P(SOC15_REG_OFFSET(UVD
, 0, mmUVD_STATUS
), 0,
601 ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT
));
603 /* force RBC into idle state */
604 rb_bufsz
= order_base_2(ring
->ring_size
);
605 tmp
= REG_SET_FIELD(0, UVD_RBC_RB_CNTL
, RB_BUFSZ
, rb_bufsz
);
606 tmp
= REG_SET_FIELD(tmp
, UVD_RBC_RB_CNTL
, RB_BLKSZ
, 1);
607 tmp
= REG_SET_FIELD(tmp
, UVD_RBC_RB_CNTL
, RB_NO_FETCH
, 1);
608 tmp
= REG_SET_FIELD(tmp
, UVD_RBC_RB_CNTL
, RB_WPTR_POLL_EN
, 0);
609 tmp
= REG_SET_FIELD(tmp
, UVD_RBC_RB_CNTL
, RB_NO_UPDATE
, 1);
610 tmp
= REG_SET_FIELD(tmp
, UVD_RBC_RB_CNTL
, RB_RPTR_WR_EN
, 1);
611 WREG32_SOC15(UVD
, 0, mmUVD_RBC_RB_CNTL
, tmp
);
613 /* set the write pointer delay */
614 WREG32_SOC15(UVD
, 0, mmUVD_RBC_RB_WPTR_CNTL
, 0);
616 /* set the wb address */
617 WREG32_SOC15(UVD
, 0, mmUVD_RBC_RB_RPTR_ADDR
,
618 (upper_32_bits(ring
->gpu_addr
) >> 2));
620 /* programm the RB_BASE for ring buffer */
621 WREG32_SOC15(UVD
, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW
,
622 lower_32_bits(ring
->gpu_addr
));
623 WREG32_SOC15(UVD
, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH
,
624 upper_32_bits(ring
->gpu_addr
));
626 /* Initialize the ring buffer's read and write pointers */
627 WREG32_SOC15(UVD
, 0, mmUVD_RBC_RB_RPTR
, 0);
629 ring
->wptr
= RREG32_SOC15(UVD
, 0, mmUVD_RBC_RB_RPTR
);
630 WREG32_SOC15(UVD
, 0, mmUVD_RBC_RB_WPTR
,
631 lower_32_bits(ring
->wptr
));
633 WREG32_P(SOC15_REG_OFFSET(UVD
, 0, mmUVD_RBC_RB_CNTL
), 0,
634 ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK
);
636 ring
= &adev
->vcn
.ring_enc
[0];
637 WREG32_SOC15(UVD
, 0, mmUVD_RB_RPTR
, lower_32_bits(ring
->wptr
));
638 WREG32_SOC15(UVD
, 0, mmUVD_RB_WPTR
, lower_32_bits(ring
->wptr
));
639 WREG32_SOC15(UVD
, 0, mmUVD_RB_BASE_LO
, ring
->gpu_addr
);
640 WREG32_SOC15(UVD
, 0, mmUVD_RB_BASE_HI
, upper_32_bits(ring
->gpu_addr
));
641 WREG32_SOC15(UVD
, 0, mmUVD_RB_SIZE
, ring
->ring_size
/ 4);
643 ring
= &adev
->vcn
.ring_enc
[1];
644 WREG32_SOC15(UVD
, 0, mmUVD_RB_RPTR2
, lower_32_bits(ring
->wptr
));
645 WREG32_SOC15(UVD
, 0, mmUVD_RB_WPTR2
, lower_32_bits(ring
->wptr
));
646 WREG32_SOC15(UVD
, 0, mmUVD_RB_BASE_LO2
, ring
->gpu_addr
);
647 WREG32_SOC15(UVD
, 0, mmUVD_RB_BASE_HI2
, upper_32_bits(ring
->gpu_addr
));
648 WREG32_SOC15(UVD
, 0, mmUVD_RB_SIZE2
, ring
->ring_size
/ 4);
654 * vcn_v1_0_stop - stop VCN block
656 * @adev: amdgpu_device pointer
660 static int vcn_v1_0_stop(struct amdgpu_device
*adev
)
662 /* force RBC into idle state */
663 WREG32_SOC15(UVD
, 0, mmUVD_RBC_RB_CNTL
, 0x11010101);
665 /* Stall UMC and register bus before resetting VCPU */
666 WREG32_P(SOC15_REG_OFFSET(UVD
, 0, mmUVD_LMI_CTRL2
),
667 UVD_LMI_CTRL2__STALL_ARB_UMC_MASK
,
668 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK
);
671 /* put VCPU into reset */
672 WREG32_SOC15(UVD
, 0, mmUVD_SOFT_RESET
,
673 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK
);
676 /* disable VCPU clock */
677 WREG32_SOC15(UVD
, 0, mmUVD_VCPU_CNTL
, 0x0);
679 /* Unstall UMC and register bus */
680 WREG32_P(SOC15_REG_OFFSET(UVD
, 0, mmUVD_LMI_CTRL2
), 0,
681 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK
);
683 /* enable clock gating */
684 vcn_v1_0_enable_clock_gating(adev
, true);
689 static int vcn_v1_0_set_clockgating_state(void *handle
,
690 enum amd_clockgating_state state
)
692 /* needed for driver unload*/
697 * vcn_v1_0_dec_ring_get_rptr - get read pointer
699 * @ring: amdgpu_ring pointer
701 * Returns the current hardware read pointer
703 static uint64_t vcn_v1_0_dec_ring_get_rptr(struct amdgpu_ring
*ring
)
705 struct amdgpu_device
*adev
= ring
->adev
;
707 return RREG32_SOC15(UVD
, 0, mmUVD_RBC_RB_RPTR
);
711 * vcn_v1_0_dec_ring_get_wptr - get write pointer
713 * @ring: amdgpu_ring pointer
715 * Returns the current hardware write pointer
717 static uint64_t vcn_v1_0_dec_ring_get_wptr(struct amdgpu_ring
*ring
)
719 struct amdgpu_device
*adev
= ring
->adev
;
721 return RREG32_SOC15(UVD
, 0, mmUVD_RBC_RB_WPTR
);
725 * vcn_v1_0_dec_ring_set_wptr - set write pointer
727 * @ring: amdgpu_ring pointer
729 * Commits the write pointer to the hardware
731 static void vcn_v1_0_dec_ring_set_wptr(struct amdgpu_ring
*ring
)
733 struct amdgpu_device
*adev
= ring
->adev
;
735 WREG32_SOC15(UVD
, 0, mmUVD_RBC_RB_WPTR
, lower_32_bits(ring
->wptr
));
739 * vcn_v1_0_dec_ring_insert_start - insert a start command
741 * @ring: amdgpu_ring pointer
743 * Write a start command to the ring.
745 static void vcn_v1_0_dec_ring_insert_start(struct amdgpu_ring
*ring
)
747 amdgpu_ring_write(ring
,
748 PACKET0(SOC15_REG_OFFSET(UVD
, 0, mmUVD_GPCOM_VCPU_DATA0
), 0));
749 amdgpu_ring_write(ring
, 0);
750 amdgpu_ring_write(ring
,
751 PACKET0(SOC15_REG_OFFSET(UVD
, 0, mmUVD_GPCOM_VCPU_CMD
), 0));
752 amdgpu_ring_write(ring
, VCN_DEC_CMD_PACKET_START
<< 1);
756 * vcn_v1_0_dec_ring_insert_end - insert a end command
758 * @ring: amdgpu_ring pointer
760 * Write a end command to the ring.
762 static void vcn_v1_0_dec_ring_insert_end(struct amdgpu_ring
*ring
)
764 amdgpu_ring_write(ring
,
765 PACKET0(SOC15_REG_OFFSET(UVD
, 0, mmUVD_GPCOM_VCPU_CMD
), 0));
766 amdgpu_ring_write(ring
, VCN_DEC_CMD_PACKET_END
<< 1);
770 * vcn_v1_0_dec_ring_emit_fence - emit an fence & trap command
772 * @ring: amdgpu_ring pointer
773 * @fence: fence to emit
775 * Write a fence and a trap command to the ring.
777 static void vcn_v1_0_dec_ring_emit_fence(struct amdgpu_ring
*ring
, u64 addr
, u64 seq
,
780 WARN_ON(flags
& AMDGPU_FENCE_FLAG_64BIT
);
782 amdgpu_ring_write(ring
,
783 PACKET0(SOC15_REG_OFFSET(UVD
, 0, mmUVD_CONTEXT_ID
), 0));
784 amdgpu_ring_write(ring
, seq
);
785 amdgpu_ring_write(ring
,
786 PACKET0(SOC15_REG_OFFSET(UVD
, 0, mmUVD_GPCOM_VCPU_DATA0
), 0));
787 amdgpu_ring_write(ring
, addr
& 0xffffffff);
788 amdgpu_ring_write(ring
,
789 PACKET0(SOC15_REG_OFFSET(UVD
, 0, mmUVD_GPCOM_VCPU_DATA1
), 0));
790 amdgpu_ring_write(ring
, upper_32_bits(addr
) & 0xff);
791 amdgpu_ring_write(ring
,
792 PACKET0(SOC15_REG_OFFSET(UVD
, 0, mmUVD_GPCOM_VCPU_CMD
), 0));
793 amdgpu_ring_write(ring
, VCN_DEC_CMD_FENCE
<< 1);
795 amdgpu_ring_write(ring
,
796 PACKET0(SOC15_REG_OFFSET(UVD
, 0, mmUVD_GPCOM_VCPU_DATA0
), 0));
797 amdgpu_ring_write(ring
, 0);
798 amdgpu_ring_write(ring
,
799 PACKET0(SOC15_REG_OFFSET(UVD
, 0, mmUVD_GPCOM_VCPU_DATA1
), 0));
800 amdgpu_ring_write(ring
, 0);
801 amdgpu_ring_write(ring
,
802 PACKET0(SOC15_REG_OFFSET(UVD
, 0, mmUVD_GPCOM_VCPU_CMD
), 0));
803 amdgpu_ring_write(ring
, VCN_DEC_CMD_TRAP
<< 1);
807 * vcn_v1_0_dec_ring_hdp_invalidate - emit an hdp invalidate
809 * @ring: amdgpu_ring pointer
811 * Emits an hdp invalidate.
813 static void vcn_v1_0_dec_ring_emit_hdp_invalidate(struct amdgpu_ring
*ring
)
815 amdgpu_ring_write(ring
, PACKET0(SOC15_REG_OFFSET(HDP
, 0, mmHDP_READ_CACHE_INVALIDATE
), 0));
816 amdgpu_ring_write(ring
, 1);
820 * vcn_v1_0_dec_ring_emit_ib - execute indirect buffer
822 * @ring: amdgpu_ring pointer
823 * @ib: indirect buffer to execute
825 * Write ring commands to execute the indirect buffer
827 static void vcn_v1_0_dec_ring_emit_ib(struct amdgpu_ring
*ring
,
828 struct amdgpu_ib
*ib
,
829 unsigned vm_id
, bool ctx_switch
)
831 amdgpu_ring_write(ring
,
832 PACKET0(SOC15_REG_OFFSET(UVD
, 0, mmUVD_LMI_RBC_IB_VMID
), 0));
833 amdgpu_ring_write(ring
, vm_id
);
835 amdgpu_ring_write(ring
,
836 PACKET0(SOC15_REG_OFFSET(UVD
, 0, mmUVD_LMI_RBC_IB_64BIT_BAR_LOW
), 0));
837 amdgpu_ring_write(ring
, lower_32_bits(ib
->gpu_addr
));
838 amdgpu_ring_write(ring
,
839 PACKET0(SOC15_REG_OFFSET(UVD
, 0, mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH
), 0));
840 amdgpu_ring_write(ring
, upper_32_bits(ib
->gpu_addr
));
841 amdgpu_ring_write(ring
,
842 PACKET0(SOC15_REG_OFFSET(UVD
, 0, mmUVD_RBC_IB_SIZE
), 0));
843 amdgpu_ring_write(ring
, ib
->length_dw
);
846 static void vcn_v1_0_dec_vm_reg_write(struct amdgpu_ring
*ring
,
847 uint32_t data0
, uint32_t data1
)
849 amdgpu_ring_write(ring
,
850 PACKET0(SOC15_REG_OFFSET(UVD
, 0, mmUVD_GPCOM_VCPU_DATA0
), 0));
851 amdgpu_ring_write(ring
, data0
);
852 amdgpu_ring_write(ring
,
853 PACKET0(SOC15_REG_OFFSET(UVD
, 0, mmUVD_GPCOM_VCPU_DATA1
), 0));
854 amdgpu_ring_write(ring
, data1
);
855 amdgpu_ring_write(ring
,
856 PACKET0(SOC15_REG_OFFSET(UVD
, 0, mmUVD_GPCOM_VCPU_CMD
), 0));
857 amdgpu_ring_write(ring
, VCN_DEC_CMD_WRITE_REG
<< 1);
860 static void vcn_v1_0_dec_vm_reg_wait(struct amdgpu_ring
*ring
,
861 uint32_t data0
, uint32_t data1
, uint32_t mask
)
863 amdgpu_ring_write(ring
,
864 PACKET0(SOC15_REG_OFFSET(UVD
, 0, mmUVD_GPCOM_VCPU_DATA0
), 0));
865 amdgpu_ring_write(ring
, data0
);
866 amdgpu_ring_write(ring
,
867 PACKET0(SOC15_REG_OFFSET(UVD
, 0, mmUVD_GPCOM_VCPU_DATA1
), 0));
868 amdgpu_ring_write(ring
, data1
);
869 amdgpu_ring_write(ring
,
870 PACKET0(SOC15_REG_OFFSET(UVD
, 0, mmUVD_GP_SCRATCH8
), 0));
871 amdgpu_ring_write(ring
, mask
);
872 amdgpu_ring_write(ring
,
873 PACKET0(SOC15_REG_OFFSET(UVD
, 0, mmUVD_GPCOM_VCPU_CMD
), 0));
874 amdgpu_ring_write(ring
, VCN_DEC_CMD_REG_READ_COND_WAIT
<< 1);
877 static void vcn_v1_0_dec_ring_emit_vm_flush(struct amdgpu_ring
*ring
,
878 unsigned vm_id
, uint64_t pd_addr
)
880 struct amdgpu_vmhub
*hub
= &ring
->adev
->vmhub
[ring
->funcs
->vmhub
];
881 uint32_t req
= ring
->adev
->gart
.gart_funcs
->get_invalidate_req(vm_id
);
882 uint32_t data0
, data1
, mask
;
883 unsigned eng
= ring
->vm_inv_eng
;
885 pd_addr
= amdgpu_gart_get_vm_pde(ring
->adev
, pd_addr
);
886 pd_addr
|= AMDGPU_PTE_VALID
;
888 data0
= (hub
->ctx0_ptb_addr_hi32
+ vm_id
* 2) << 2;
889 data1
= upper_32_bits(pd_addr
);
890 vcn_v1_0_dec_vm_reg_write(ring
, data0
, data1
);
892 data0
= (hub
->ctx0_ptb_addr_lo32
+ vm_id
* 2) << 2;
893 data1
= lower_32_bits(pd_addr
);
894 vcn_v1_0_dec_vm_reg_write(ring
, data0
, data1
);
896 data0
= (hub
->ctx0_ptb_addr_lo32
+ vm_id
* 2) << 2;
897 data1
= lower_32_bits(pd_addr
);
899 vcn_v1_0_dec_vm_reg_wait(ring
, data0
, data1
, mask
);
902 data0
= (hub
->vm_inv_eng0_req
+ eng
) << 2;
904 vcn_v1_0_dec_vm_reg_write(ring
, data0
, data1
);
907 data0
= (hub
->vm_inv_eng0_ack
+ eng
) << 2;
910 vcn_v1_0_dec_vm_reg_wait(ring
, data0
, data1
, mask
);
914 * vcn_v1_0_enc_ring_get_rptr - get enc read pointer
916 * @ring: amdgpu_ring pointer
918 * Returns the current hardware enc read pointer
920 static uint64_t vcn_v1_0_enc_ring_get_rptr(struct amdgpu_ring
*ring
)
922 struct amdgpu_device
*adev
= ring
->adev
;
924 if (ring
== &adev
->vcn
.ring_enc
[0])
925 return RREG32_SOC15(UVD
, 0, mmUVD_RB_RPTR
);
927 return RREG32_SOC15(UVD
, 0, mmUVD_RB_RPTR2
);
931 * vcn_v1_0_enc_ring_get_wptr - get enc write pointer
933 * @ring: amdgpu_ring pointer
935 * Returns the current hardware enc write pointer
937 static uint64_t vcn_v1_0_enc_ring_get_wptr(struct amdgpu_ring
*ring
)
939 struct amdgpu_device
*adev
= ring
->adev
;
941 if (ring
== &adev
->vcn
.ring_enc
[0])
942 return RREG32_SOC15(UVD
, 0, mmUVD_RB_WPTR
);
944 return RREG32_SOC15(UVD
, 0, mmUVD_RB_WPTR2
);
948 * vcn_v1_0_enc_ring_set_wptr - set enc write pointer
950 * @ring: amdgpu_ring pointer
952 * Commits the enc write pointer to the hardware
954 static void vcn_v1_0_enc_ring_set_wptr(struct amdgpu_ring
*ring
)
956 struct amdgpu_device
*adev
= ring
->adev
;
958 if (ring
== &adev
->vcn
.ring_enc
[0])
959 WREG32_SOC15(UVD
, 0, mmUVD_RB_WPTR
,
960 lower_32_bits(ring
->wptr
));
962 WREG32_SOC15(UVD
, 0, mmUVD_RB_WPTR2
,
963 lower_32_bits(ring
->wptr
));
967 * vcn_v1_0_enc_ring_emit_fence - emit an enc fence & trap command
969 * @ring: amdgpu_ring pointer
970 * @fence: fence to emit
972 * Write enc a fence and a trap command to the ring.
974 static void vcn_v1_0_enc_ring_emit_fence(struct amdgpu_ring
*ring
, u64 addr
,
975 u64 seq
, unsigned flags
)
977 WARN_ON(flags
& AMDGPU_FENCE_FLAG_64BIT
);
979 amdgpu_ring_write(ring
, VCN_ENC_CMD_FENCE
);
980 amdgpu_ring_write(ring
, addr
);
981 amdgpu_ring_write(ring
, upper_32_bits(addr
));
982 amdgpu_ring_write(ring
, seq
);
983 amdgpu_ring_write(ring
, VCN_ENC_CMD_TRAP
);
986 static void vcn_v1_0_enc_ring_insert_end(struct amdgpu_ring
*ring
)
988 amdgpu_ring_write(ring
, VCN_ENC_CMD_END
);
992 * vcn_v1_0_enc_ring_emit_ib - enc execute indirect buffer
994 * @ring: amdgpu_ring pointer
995 * @ib: indirect buffer to execute
997 * Write enc ring commands to execute the indirect buffer
999 static void vcn_v1_0_enc_ring_emit_ib(struct amdgpu_ring
*ring
,
1000 struct amdgpu_ib
*ib
, unsigned int vm_id
, bool ctx_switch
)
1002 amdgpu_ring_write(ring
, VCN_ENC_CMD_IB
);
1003 amdgpu_ring_write(ring
, vm_id
);
1004 amdgpu_ring_write(ring
, lower_32_bits(ib
->gpu_addr
));
1005 amdgpu_ring_write(ring
, upper_32_bits(ib
->gpu_addr
));
1006 amdgpu_ring_write(ring
, ib
->length_dw
);
1009 static void vcn_v1_0_enc_ring_emit_vm_flush(struct amdgpu_ring
*ring
,
1010 unsigned int vm_id
, uint64_t pd_addr
)
1012 struct amdgpu_vmhub
*hub
= &ring
->adev
->vmhub
[ring
->funcs
->vmhub
];
1013 uint32_t req
= ring
->adev
->gart
.gart_funcs
->get_invalidate_req(vm_id
);
1014 unsigned eng
= ring
->vm_inv_eng
;
1016 pd_addr
= amdgpu_gart_get_vm_pde(ring
->adev
, pd_addr
);
1017 pd_addr
|= AMDGPU_PTE_VALID
;
1019 amdgpu_ring_write(ring
, VCN_ENC_CMD_REG_WRITE
);
1020 amdgpu_ring_write(ring
,
1021 (hub
->ctx0_ptb_addr_hi32
+ vm_id
* 2) << 2);
1022 amdgpu_ring_write(ring
, upper_32_bits(pd_addr
));
1024 amdgpu_ring_write(ring
, VCN_ENC_CMD_REG_WRITE
);
1025 amdgpu_ring_write(ring
,
1026 (hub
->ctx0_ptb_addr_lo32
+ vm_id
* 2) << 2);
1027 amdgpu_ring_write(ring
, lower_32_bits(pd_addr
));
1029 amdgpu_ring_write(ring
, VCN_ENC_CMD_REG_WAIT
);
1030 amdgpu_ring_write(ring
,
1031 (hub
->ctx0_ptb_addr_lo32
+ vm_id
* 2) << 2);
1032 amdgpu_ring_write(ring
, 0xffffffff);
1033 amdgpu_ring_write(ring
, lower_32_bits(pd_addr
));
1036 amdgpu_ring_write(ring
, VCN_ENC_CMD_REG_WRITE
);
1037 amdgpu_ring_write(ring
, (hub
->vm_inv_eng0_req
+ eng
) << 2);
1038 amdgpu_ring_write(ring
, req
);
1040 /* wait for flush */
1041 amdgpu_ring_write(ring
, VCN_ENC_CMD_REG_WAIT
);
1042 amdgpu_ring_write(ring
, (hub
->vm_inv_eng0_ack
+ eng
) << 2);
1043 amdgpu_ring_write(ring
, 1 << vm_id
);
1044 amdgpu_ring_write(ring
, 1 << vm_id
);
1047 static int vcn_v1_0_set_interrupt_state(struct amdgpu_device
*adev
,
1048 struct amdgpu_irq_src
*source
,
1050 enum amdgpu_interrupt_state state
)
1055 static int vcn_v1_0_process_interrupt(struct amdgpu_device
*adev
,
1056 struct amdgpu_irq_src
*source
,
1057 struct amdgpu_iv_entry
*entry
)
1059 DRM_DEBUG("IH: VCN TRAP\n");
1061 switch (entry
->src_id
) {
1063 amdgpu_fence_process(&adev
->vcn
.ring_dec
);
1066 amdgpu_fence_process(&adev
->vcn
.ring_enc
[0]);
1069 amdgpu_fence_process(&adev
->vcn
.ring_enc
[1]);
1072 DRM_ERROR("Unhandled interrupt: %d %d\n",
1073 entry
->src_id
, entry
->src_data
[0]);
1080 static const struct amd_ip_funcs vcn_v1_0_ip_funcs
= {
1082 .early_init
= vcn_v1_0_early_init
,
1084 .sw_init
= vcn_v1_0_sw_init
,
1085 .sw_fini
= vcn_v1_0_sw_fini
,
1086 .hw_init
= vcn_v1_0_hw_init
,
1087 .hw_fini
= vcn_v1_0_hw_fini
,
1088 .suspend
= vcn_v1_0_suspend
,
1089 .resume
= vcn_v1_0_resume
,
1090 .is_idle
= NULL
/* vcn_v1_0_is_idle */,
1091 .wait_for_idle
= NULL
/* vcn_v1_0_wait_for_idle */,
1092 .check_soft_reset
= NULL
/* vcn_v1_0_check_soft_reset */,
1093 .pre_soft_reset
= NULL
/* vcn_v1_0_pre_soft_reset */,
1094 .soft_reset
= NULL
/* vcn_v1_0_soft_reset */,
1095 .post_soft_reset
= NULL
/* vcn_v1_0_post_soft_reset */,
1096 .set_clockgating_state
= vcn_v1_0_set_clockgating_state
,
1097 .set_powergating_state
= NULL
/* vcn_v1_0_set_powergating_state */,
1100 static const struct amdgpu_ring_funcs vcn_v1_0_dec_ring_vm_funcs
= {
1101 .type
= AMDGPU_RING_TYPE_VCN_DEC
,
1103 .nop
= PACKET0(SOC15_REG_OFFSET(UVD
, 0, mmUVD_NO_OP
), 0),
1104 .support_64bit_ptrs
= false,
1105 .vmhub
= AMDGPU_MMHUB
,
1106 .get_rptr
= vcn_v1_0_dec_ring_get_rptr
,
1107 .get_wptr
= vcn_v1_0_dec_ring_get_wptr
,
1108 .set_wptr
= vcn_v1_0_dec_ring_set_wptr
,
1110 2 + /* vcn_v1_0_dec_ring_emit_hdp_invalidate */
1111 34 + /* vcn_v1_0_dec_ring_emit_vm_flush */
1112 14 + 14 + /* vcn_v1_0_dec_ring_emit_fence x2 vm fence */
1114 .emit_ib_size
= 8, /* vcn_v1_0_dec_ring_emit_ib */
1115 .emit_ib
= vcn_v1_0_dec_ring_emit_ib
,
1116 .emit_fence
= vcn_v1_0_dec_ring_emit_fence
,
1117 .emit_vm_flush
= vcn_v1_0_dec_ring_emit_vm_flush
,
1118 .emit_hdp_invalidate
= vcn_v1_0_dec_ring_emit_hdp_invalidate
,
1119 .test_ring
= amdgpu_vcn_dec_ring_test_ring
,
1120 .test_ib
= amdgpu_vcn_dec_ring_test_ib
,
1121 .insert_nop
= amdgpu_ring_insert_nop
,
1122 .insert_start
= vcn_v1_0_dec_ring_insert_start
,
1123 .insert_end
= vcn_v1_0_dec_ring_insert_end
,
1124 .pad_ib
= amdgpu_ring_generic_pad_ib
,
1125 .begin_use
= amdgpu_vcn_ring_begin_use
,
1126 .end_use
= amdgpu_vcn_ring_end_use
,
1129 static const struct amdgpu_ring_funcs vcn_v1_0_enc_ring_vm_funcs
= {
1130 .type
= AMDGPU_RING_TYPE_VCN_ENC
,
1132 .nop
= VCN_ENC_CMD_NO_OP
,
1133 .support_64bit_ptrs
= false,
1134 .vmhub
= AMDGPU_MMHUB
,
1135 .get_rptr
= vcn_v1_0_enc_ring_get_rptr
,
1136 .get_wptr
= vcn_v1_0_enc_ring_get_wptr
,
1137 .set_wptr
= vcn_v1_0_enc_ring_set_wptr
,
1139 17 + /* vcn_v1_0_enc_ring_emit_vm_flush */
1140 5 + 5 + /* vcn_v1_0_enc_ring_emit_fence x2 vm fence */
1141 1, /* vcn_v1_0_enc_ring_insert_end */
1142 .emit_ib_size
= 5, /* vcn_v1_0_enc_ring_emit_ib */
1143 .emit_ib
= vcn_v1_0_enc_ring_emit_ib
,
1144 .emit_fence
= vcn_v1_0_enc_ring_emit_fence
,
1145 .emit_vm_flush
= vcn_v1_0_enc_ring_emit_vm_flush
,
1146 .test_ring
= amdgpu_vcn_enc_ring_test_ring
,
1147 .test_ib
= amdgpu_vcn_enc_ring_test_ib
,
1148 .insert_nop
= amdgpu_ring_insert_nop
,
1149 .insert_end
= vcn_v1_0_enc_ring_insert_end
,
1150 .pad_ib
= amdgpu_ring_generic_pad_ib
,
1151 .begin_use
= amdgpu_vcn_ring_begin_use
,
1152 .end_use
= amdgpu_vcn_ring_end_use
,
1155 static void vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device
*adev
)
1157 adev
->vcn
.ring_dec
.funcs
= &vcn_v1_0_dec_ring_vm_funcs
;
1158 DRM_INFO("VCN decode is enabled in VM mode\n");
1161 static void vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device
*adev
)
1165 for (i
= 0; i
< adev
->vcn
.num_enc_rings
; ++i
)
1166 adev
->vcn
.ring_enc
[i
].funcs
= &vcn_v1_0_enc_ring_vm_funcs
;
1168 DRM_INFO("VCN encode is enabled in VM mode\n");
1171 static const struct amdgpu_irq_src_funcs vcn_v1_0_irq_funcs
= {
1172 .set
= vcn_v1_0_set_interrupt_state
,
1173 .process
= vcn_v1_0_process_interrupt
,
1176 static void vcn_v1_0_set_irq_funcs(struct amdgpu_device
*adev
)
1178 adev
->vcn
.irq
.num_types
= adev
->vcn
.num_enc_rings
+ 1;
1179 adev
->vcn
.irq
.funcs
= &vcn_v1_0_irq_funcs
;
1182 const struct amdgpu_ip_block_version vcn_v1_0_ip_block
=
1184 .type
= AMD_IP_BLOCK_TYPE_VCN
,
1188 .funcs
= &vcn_v1_0_ip_funcs
,