]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
drm/amdgpu: add 64bit doorbell functions (v2)
[mirror_ubuntu-bionic-kernel.git] / drivers / gpu / drm / amd / amdgpu / uvd_v6_0.c
CommitLineData
aaa36a97
AD
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Christian König <christian.koenig@amd.com>
23 */
24
25#include <linux/firmware.h>
26#include <drm/drmP.h>
27#include "amdgpu.h"
28#include "amdgpu_uvd.h"
29#include "vid.h"
30#include "uvd/uvd_6_0_d.h"
31#include "uvd/uvd_6_0_sh_mask.h"
32#include "oss/oss_2_0_d.h"
33#include "oss/oss_2_0_sh_mask.h"
a0cdef9e
AD
34#include "smu/smu_7_1_3_d.h"
35#include "smu/smu_7_1_3_sh_mask.h"
d5b4e25d 36#include "bif/bif_5_1_d.h"
0f30a397 37#include "gmc/gmc_8_1_d.h"
be3ecca7 38#include "vi.h"
aaa36a97
AD
39
40static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev);
41static void uvd_v6_0_set_irq_funcs(struct amdgpu_device *adev);
42static int uvd_v6_0_start(struct amdgpu_device *adev);
43static void uvd_v6_0_stop(struct amdgpu_device *adev);
be3ecca7 44static void uvd_v6_0_set_sw_clock_gating(struct amdgpu_device *adev);
805b3ba8
RZ
45static int uvd_v6_0_set_clockgating_state(void *handle,
46 enum amd_clockgating_state state);
47static void uvd_v6_0_enable_mgcg(struct amdgpu_device *adev,
48 bool enable);
aaa36a97
AD
49
50/**
51 * uvd_v6_0_ring_get_rptr - get read pointer
52 *
53 * @ring: amdgpu_ring pointer
54 *
55 * Returns the current hardware read pointer
56 */
536fbf94 57static uint64_t uvd_v6_0_ring_get_rptr(struct amdgpu_ring *ring)
aaa36a97
AD
58{
59 struct amdgpu_device *adev = ring->adev;
60
61 return RREG32(mmUVD_RBC_RB_RPTR);
62}
63
64/**
65 * uvd_v6_0_ring_get_wptr - get write pointer
66 *
67 * @ring: amdgpu_ring pointer
68 *
69 * Returns the current hardware write pointer
70 */
536fbf94 71static uint64_t uvd_v6_0_ring_get_wptr(struct amdgpu_ring *ring)
aaa36a97
AD
72{
73 struct amdgpu_device *adev = ring->adev;
74
75 return RREG32(mmUVD_RBC_RB_WPTR);
76}
77
78/**
79 * uvd_v6_0_ring_set_wptr - set write pointer
80 *
81 * @ring: amdgpu_ring pointer
82 *
83 * Commits the write pointer to the hardware
84 */
85static void uvd_v6_0_ring_set_wptr(struct amdgpu_ring *ring)
86{
87 struct amdgpu_device *adev = ring->adev;
88
536fbf94 89 WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
aaa36a97
AD
90}
91
5fc3aeeb 92static int uvd_v6_0_early_init(void *handle)
aaa36a97 93{
5fc3aeeb 94 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
95
aaa36a97
AD
96 uvd_v6_0_set_ring_funcs(adev);
97 uvd_v6_0_set_irq_funcs(adev);
98
99 return 0;
100}
101
5fc3aeeb 102static int uvd_v6_0_sw_init(void *handle)
aaa36a97
AD
103{
104 struct amdgpu_ring *ring;
105 int r;
5fc3aeeb 106 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
aaa36a97
AD
107
108 /* UVD TRAP */
109 r = amdgpu_irq_add_id(adev, 124, &adev->uvd.irq);
110 if (r)
111 return r;
112
113 r = amdgpu_uvd_sw_init(adev);
114 if (r)
115 return r;
116
117 r = amdgpu_uvd_resume(adev);
118 if (r)
119 return r;
120
121 ring = &adev->uvd.ring;
122 sprintf(ring->name, "uvd");
79887142 123 r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0);
aaa36a97
AD
124
125 return r;
126}
127
5fc3aeeb 128static int uvd_v6_0_sw_fini(void *handle)
aaa36a97
AD
129{
130 int r;
5fc3aeeb 131 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
aaa36a97
AD
132
133 r = amdgpu_uvd_suspend(adev);
134 if (r)
135 return r;
136
137 r = amdgpu_uvd_sw_fini(adev);
138 if (r)
139 return r;
140
141 return r;
142}
143
144/**
145 * uvd_v6_0_hw_init - start and test UVD block
146 *
147 * @adev: amdgpu_device pointer
148 *
149 * Initialize the hardware, boot up the VCPU and do some testing
150 */
5fc3aeeb 151static int uvd_v6_0_hw_init(void *handle)
aaa36a97 152{
5fc3aeeb 153 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
aaa36a97
AD
154 struct amdgpu_ring *ring = &adev->uvd.ring;
155 uint32_t tmp;
156 int r;
157
e3e672e6
RZ
158 amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
159 uvd_v6_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE);
160 uvd_v6_0_enable_mgcg(adev, true);
aaa36a97
AD
161
162 ring->ready = true;
163 r = amdgpu_ring_test_ring(ring);
164 if (r) {
165 ring->ready = false;
166 goto done;
167 }
168
a27de35c 169 r = amdgpu_ring_alloc(ring, 10);
aaa36a97
AD
170 if (r) {
171 DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r);
172 goto done;
173 }
174
175 tmp = PACKET0(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0);
176 amdgpu_ring_write(ring, tmp);
177 amdgpu_ring_write(ring, 0xFFFFF);
178
179 tmp = PACKET0(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0);
180 amdgpu_ring_write(ring, tmp);
181 amdgpu_ring_write(ring, 0xFFFFF);
182
183 tmp = PACKET0(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0);
184 amdgpu_ring_write(ring, tmp);
185 amdgpu_ring_write(ring, 0xFFFFF);
186
187 /* Clear timeout status bits */
188 amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_TIMEOUT_STATUS, 0));
189 amdgpu_ring_write(ring, 0x8);
190
191 amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CNTL, 0));
192 amdgpu_ring_write(ring, 3);
193
a27de35c 194 amdgpu_ring_commit(ring);
aaa36a97
AD
195
196done:
197 if (!r)
198 DRM_INFO("UVD initialized successfully.\n");
199
200 return r;
201}
202
203/**
204 * uvd_v6_0_hw_fini - stop the hardware block
205 *
206 * @adev: amdgpu_device pointer
207 *
208 * Stop the UVD block, mark ring as not ready any more
209 */
5fc3aeeb 210static int uvd_v6_0_hw_fini(void *handle)
aaa36a97 211{
5fc3aeeb 212 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
aaa36a97
AD
213 struct amdgpu_ring *ring = &adev->uvd.ring;
214
e3e672e6
RZ
215 if (RREG32(mmUVD_STATUS) != 0)
216 uvd_v6_0_stop(adev);
217
aaa36a97
AD
218 ring->ready = false;
219
220 return 0;
221}
222
5fc3aeeb 223static int uvd_v6_0_suspend(void *handle)
aaa36a97
AD
224{
225 int r;
5fc3aeeb 226 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
aaa36a97 227
3f99dd81
LL
228 r = uvd_v6_0_hw_fini(adev);
229 if (r)
230 return r;
231
1f445210
LL
232 /* Skip this for APU for now */
233 if (!(adev->flags & AMD_IS_APU)) {
234 r = amdgpu_uvd_suspend(adev);
235 if (r)
236 return r;
237 }
aaa36a97
AD
238
239 return r;
240}
241
5fc3aeeb 242static int uvd_v6_0_resume(void *handle)
aaa36a97
AD
243{
244 int r;
5fc3aeeb 245 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
aaa36a97 246
1f445210
LL
247 /* Skip this for APU for now */
248 if (!(adev->flags & AMD_IS_APU)) {
249 r = amdgpu_uvd_resume(adev);
250 if (r)
251 return r;
252 }
aaa36a97
AD
253 r = uvd_v6_0_hw_init(adev);
254 if (r)
255 return r;
256
257 return r;
258}
259
260/**
261 * uvd_v6_0_mc_resume - memory controller programming
262 *
263 * @adev: amdgpu_device pointer
264 *
265 * Let the UVD memory controller know it's offsets
266 */
267static void uvd_v6_0_mc_resume(struct amdgpu_device *adev)
268{
269 uint64_t offset;
270 uint32_t size;
271
272 /* programm memory controller bits 0-27 */
273 WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
274 lower_32_bits(adev->uvd.gpu_addr));
275 WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
276 upper_32_bits(adev->uvd.gpu_addr));
277
278 offset = AMDGPU_UVD_FIRMWARE_OFFSET;
279 size = AMDGPU_GPU_PAGE_ALIGN(adev->uvd.fw->size + 4);
280 WREG32(mmUVD_VCPU_CACHE_OFFSET0, offset >> 3);
281 WREG32(mmUVD_VCPU_CACHE_SIZE0, size);
282
283 offset += size;
c0365541 284 size = AMDGPU_UVD_HEAP_SIZE;
aaa36a97
AD
285 WREG32(mmUVD_VCPU_CACHE_OFFSET1, offset >> 3);
286 WREG32(mmUVD_VCPU_CACHE_SIZE1, size);
287
288 offset += size;
c0365541
AN
289 size = AMDGPU_UVD_STACK_SIZE +
290 (AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles);
aaa36a97
AD
291 WREG32(mmUVD_VCPU_CACHE_OFFSET2, offset >> 3);
292 WREG32(mmUVD_VCPU_CACHE_SIZE2, size);
549300ce
AD
293
294 WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
295 WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
296 WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
c0365541
AN
297
298 WREG32(mmUVD_GP_SCRATCH4, adev->uvd.max_handles);
aaa36a97
AD
299}
300
be3ecca7 301#if 0
9b08a306
EH
302static void cz_set_uvd_clock_gating_branches(struct amdgpu_device *adev,
303 bool enable)
304{
305 u32 data, data1;
306
307 data = RREG32(mmUVD_CGC_GATE);
308 data1 = RREG32(mmUVD_SUVD_CGC_GATE);
309 if (enable) {
310 data |= UVD_CGC_GATE__SYS_MASK |
311 UVD_CGC_GATE__UDEC_MASK |
312 UVD_CGC_GATE__MPEG2_MASK |
313 UVD_CGC_GATE__RBC_MASK |
314 UVD_CGC_GATE__LMI_MC_MASK |
315 UVD_CGC_GATE__IDCT_MASK |
316 UVD_CGC_GATE__MPRD_MASK |
317 UVD_CGC_GATE__MPC_MASK |
318 UVD_CGC_GATE__LBSI_MASK |
319 UVD_CGC_GATE__LRBBM_MASK |
320 UVD_CGC_GATE__UDEC_RE_MASK |
321 UVD_CGC_GATE__UDEC_CM_MASK |
322 UVD_CGC_GATE__UDEC_IT_MASK |
323 UVD_CGC_GATE__UDEC_DB_MASK |
324 UVD_CGC_GATE__UDEC_MP_MASK |
325 UVD_CGC_GATE__WCB_MASK |
326 UVD_CGC_GATE__VCPU_MASK |
327 UVD_CGC_GATE__SCPU_MASK;
328 data1 |= UVD_SUVD_CGC_GATE__SRE_MASK |
329 UVD_SUVD_CGC_GATE__SIT_MASK |
330 UVD_SUVD_CGC_GATE__SMP_MASK |
331 UVD_SUVD_CGC_GATE__SCM_MASK |
332 UVD_SUVD_CGC_GATE__SDB_MASK |
333 UVD_SUVD_CGC_GATE__SRE_H264_MASK |
334 UVD_SUVD_CGC_GATE__SRE_HEVC_MASK |
335 UVD_SUVD_CGC_GATE__SIT_H264_MASK |
336 UVD_SUVD_CGC_GATE__SIT_HEVC_MASK |
337 UVD_SUVD_CGC_GATE__SCM_H264_MASK |
338 UVD_SUVD_CGC_GATE__SCM_HEVC_MASK |
339 UVD_SUVD_CGC_GATE__SDB_H264_MASK |
340 UVD_SUVD_CGC_GATE__SDB_HEVC_MASK;
341 } else {
342 data &= ~(UVD_CGC_GATE__SYS_MASK |
343 UVD_CGC_GATE__UDEC_MASK |
344 UVD_CGC_GATE__MPEG2_MASK |
345 UVD_CGC_GATE__RBC_MASK |
346 UVD_CGC_GATE__LMI_MC_MASK |
347 UVD_CGC_GATE__LMI_UMC_MASK |
348 UVD_CGC_GATE__IDCT_MASK |
349 UVD_CGC_GATE__MPRD_MASK |
350 UVD_CGC_GATE__MPC_MASK |
351 UVD_CGC_GATE__LBSI_MASK |
352 UVD_CGC_GATE__LRBBM_MASK |
353 UVD_CGC_GATE__UDEC_RE_MASK |
354 UVD_CGC_GATE__UDEC_CM_MASK |
355 UVD_CGC_GATE__UDEC_IT_MASK |
356 UVD_CGC_GATE__UDEC_DB_MASK |
357 UVD_CGC_GATE__UDEC_MP_MASK |
358 UVD_CGC_GATE__WCB_MASK |
359 UVD_CGC_GATE__VCPU_MASK |
360 UVD_CGC_GATE__SCPU_MASK);
361 data1 &= ~(UVD_SUVD_CGC_GATE__SRE_MASK |
362 UVD_SUVD_CGC_GATE__SIT_MASK |
363 UVD_SUVD_CGC_GATE__SMP_MASK |
364 UVD_SUVD_CGC_GATE__SCM_MASK |
365 UVD_SUVD_CGC_GATE__SDB_MASK |
366 UVD_SUVD_CGC_GATE__SRE_H264_MASK |
367 UVD_SUVD_CGC_GATE__SRE_HEVC_MASK |
368 UVD_SUVD_CGC_GATE__SIT_H264_MASK |
369 UVD_SUVD_CGC_GATE__SIT_HEVC_MASK |
370 UVD_SUVD_CGC_GATE__SCM_H264_MASK |
371 UVD_SUVD_CGC_GATE__SCM_HEVC_MASK |
372 UVD_SUVD_CGC_GATE__SDB_H264_MASK |
373 UVD_SUVD_CGC_GATE__SDB_HEVC_MASK);
374 }
375 WREG32(mmUVD_CGC_GATE, data);
376 WREG32(mmUVD_SUVD_CGC_GATE, data1);
377}
be3ecca7 378#endif
9b08a306 379
aaa36a97
AD
380/**
381 * uvd_v6_0_start - start UVD block
382 *
383 * @adev: amdgpu_device pointer
384 *
385 * Setup and start the UVD block
386 */
387static int uvd_v6_0_start(struct amdgpu_device *adev)
388{
389 struct amdgpu_ring *ring = &adev->uvd.ring;
390 uint32_t rb_bufsz, tmp;
391 uint32_t lmi_swap_cntl;
392 uint32_t mp_swap_cntl;
393 int i, j, r;
394
f78c3422
TSD
395 /* disable DPG */
396 WREG32_P(mmUVD_POWER_STATUS, 0, ~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
aaa36a97
AD
397
398 /* disable byte swapping */
399 lmi_swap_cntl = 0;
400 mp_swap_cntl = 0;
401
402 uvd_v6_0_mc_resume(adev);
403
aaa36a97 404 /* disable interupt */
f4a7f127 405 WREG32_FIELD(UVD_MASTINT_EN, VCPU_EN, 0);
aaa36a97
AD
406
407 /* stall UMC and register bus before resetting VCPU */
f4a7f127 408 WREG32_FIELD(UVD_LMI_CTRL2, STALL_ARB_UMC, 1);
aaa36a97
AD
409 mdelay(1);
410
411 /* put LMI, VCPU, RBC etc... into reset */
f78c3422
TSD
412 WREG32(mmUVD_SOFT_RESET,
413 UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
414 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
415 UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
416 UVD_SOFT_RESET__RBC_SOFT_RESET_MASK |
417 UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
418 UVD_SOFT_RESET__CXW_SOFT_RESET_MASK |
419 UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
aaa36a97
AD
420 UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
421 mdelay(5);
422
423 /* take UVD block out of reset */
f4a7f127 424 WREG32_FIELD(SRBM_SOFT_RESET, SOFT_RESET_UVD, 0);
aaa36a97
AD
425 mdelay(5);
426
427 /* initialize UVD memory controller */
f78c3422
TSD
428 WREG32(mmUVD_LMI_CTRL,
429 (0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
430 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
431 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
432 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
433 UVD_LMI_CTRL__REQ_MODE_MASK |
434 UVD_LMI_CTRL__DISABLE_ON_FWV_FAIL_MASK);
aaa36a97
AD
435
436#ifdef __BIG_ENDIAN
437 /* swap (8 in 32) RB and IB */
438 lmi_swap_cntl = 0xa;
439 mp_swap_cntl = 0;
440#endif
441 WREG32(mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
442 WREG32(mmUVD_MP_SWAP_CNTL, mp_swap_cntl);
443
444 WREG32(mmUVD_MPC_SET_MUXA0, 0x40c2040);
445 WREG32(mmUVD_MPC_SET_MUXA1, 0x0);
446 WREG32(mmUVD_MPC_SET_MUXB0, 0x40c2040);
447 WREG32(mmUVD_MPC_SET_MUXB1, 0x0);
448 WREG32(mmUVD_MPC_SET_ALU, 0);
449 WREG32(mmUVD_MPC_SET_MUX, 0x88);
450
451 /* take all subblocks out of reset, except VCPU */
452 WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
453 mdelay(5);
454
455 /* enable VCPU clock */
f78c3422 456 WREG32(mmUVD_VCPU_CNTL, UVD_VCPU_CNTL__CLK_EN_MASK);
aaa36a97
AD
457
458 /* enable UMC */
f4a7f127 459 WREG32_FIELD(UVD_LMI_CTRL2, STALL_ARB_UMC, 0);
aaa36a97
AD
460
461 /* boot up the VCPU */
462 WREG32(mmUVD_SOFT_RESET, 0);
463 mdelay(10);
464
465 for (i = 0; i < 10; ++i) {
466 uint32_t status;
467
468 for (j = 0; j < 100; ++j) {
469 status = RREG32(mmUVD_STATUS);
470 if (status & 2)
471 break;
472 mdelay(10);
473 }
474 r = 0;
475 if (status & 2)
476 break;
477
478 DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n");
f4a7f127 479 WREG32_FIELD(UVD_SOFT_RESET, VCPU_SOFT_RESET, 1);
aaa36a97 480 mdelay(10);
f4a7f127 481 WREG32_FIELD(UVD_SOFT_RESET, VCPU_SOFT_RESET, 0);
aaa36a97
AD
482 mdelay(10);
483 r = -1;
484 }
485
486 if (r) {
487 DRM_ERROR("UVD not responding, giving up!!!\n");
488 return r;
489 }
490 /* enable master interrupt */
f78c3422
TSD
491 WREG32_P(mmUVD_MASTINT_EN,
492 (UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK),
493 ~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK));
aaa36a97
AD
494
495 /* clear the bit 4 of UVD_STATUS */
f78c3422 496 WREG32_P(mmUVD_STATUS, 0, ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
aaa36a97 497
f4a7f127 498 /* force RBC into idle state */
aaa36a97 499 rb_bufsz = order_base_2(ring->ring_size);
f4a7f127 500 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
aaa36a97
AD
501 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
502 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
503 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0);
504 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
505 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
aaa36a97
AD
506 WREG32(mmUVD_RBC_RB_CNTL, tmp);
507
508 /* set the write pointer delay */
509 WREG32(mmUVD_RBC_RB_WPTR_CNTL, 0);
510
511 /* set the wb address */
512 WREG32(mmUVD_RBC_RB_RPTR_ADDR, (upper_32_bits(ring->gpu_addr) >> 2));
513
514 /* programm the RB_BASE for ring buffer */
515 WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
516 lower_32_bits(ring->gpu_addr));
517 WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
518 upper_32_bits(ring->gpu_addr));
519
520 /* Initialize the ring buffer's read and write pointers */
521 WREG32(mmUVD_RBC_RB_RPTR, 0);
522
523 ring->wptr = RREG32(mmUVD_RBC_RB_RPTR);
536fbf94 524 WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
aaa36a97 525
f4a7f127 526 WREG32_FIELD(UVD_RBC_RB_CNTL, RB_NO_FETCH, 0);
aaa36a97
AD
527
528 return 0;
529}
530
531/**
532 * uvd_v6_0_stop - stop UVD block
533 *
534 * @adev: amdgpu_device pointer
535 *
536 * stop the UVD block
537 */
538static void uvd_v6_0_stop(struct amdgpu_device *adev)
539{
540 /* force RBC into idle state */
541 WREG32(mmUVD_RBC_RB_CNTL, 0x11010101);
542
543 /* Stall UMC and register bus before resetting VCPU */
544 WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
545 mdelay(1);
546
547 /* put VCPU into reset */
548 WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
549 mdelay(5);
550
551 /* disable VCPU clock */
552 WREG32(mmUVD_VCPU_CNTL, 0x0);
553
554 /* Unstall UMC and register bus */
555 WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
e3e672e6
RZ
556
557 WREG32(mmUVD_STATUS, 0);
aaa36a97
AD
558}
559
560/**
561 * uvd_v6_0_ring_emit_fence - emit an fence & trap command
562 *
563 * @ring: amdgpu_ring pointer
564 * @fence: fence to emit
565 *
566 * Write a fence and a trap command to the ring.
567 */
568static void uvd_v6_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
890ee23f 569 unsigned flags)
aaa36a97 570{
890ee23f 571 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
aaa36a97
AD
572
573 amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
574 amdgpu_ring_write(ring, seq);
575 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
576 amdgpu_ring_write(ring, addr & 0xffffffff);
577 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
578 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
579 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
580 amdgpu_ring_write(ring, 0);
581
582 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
583 amdgpu_ring_write(ring, 0);
584 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
585 amdgpu_ring_write(ring, 0);
586 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
587 amdgpu_ring_write(ring, 2);
588}
589
d5b4e25d
CK
590/**
591 * uvd_v6_0_ring_emit_hdp_flush - emit an hdp flush
592 *
593 * @ring: amdgpu_ring pointer
594 *
595 * Emits an hdp flush.
596 */
597static void uvd_v6_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
598{
599 amdgpu_ring_write(ring, PACKET0(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0));
600 amdgpu_ring_write(ring, 0);
601}
602
603/**
604 * uvd_v6_0_ring_hdp_invalidate - emit an hdp invalidate
605 *
606 * @ring: amdgpu_ring pointer
607 *
608 * Emits an hdp invalidate.
609 */
610static void uvd_v6_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
611{
612 amdgpu_ring_write(ring, PACKET0(mmHDP_DEBUG0, 0));
613 amdgpu_ring_write(ring, 1);
614}
615
aaa36a97
AD
616/**
617 * uvd_v6_0_ring_test_ring - register write test
618 *
619 * @ring: amdgpu_ring pointer
620 *
621 * Test if we can successfully write to the context register
622 */
623static int uvd_v6_0_ring_test_ring(struct amdgpu_ring *ring)
624{
625 struct amdgpu_device *adev = ring->adev;
626 uint32_t tmp = 0;
627 unsigned i;
628 int r;
629
630 WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD);
a27de35c 631 r = amdgpu_ring_alloc(ring, 3);
aaa36a97
AD
632 if (r) {
633 DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
634 ring->idx, r);
635 return r;
636 }
637 amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
638 amdgpu_ring_write(ring, 0xDEADBEEF);
a27de35c 639 amdgpu_ring_commit(ring);
aaa36a97
AD
640 for (i = 0; i < adev->usec_timeout; i++) {
641 tmp = RREG32(mmUVD_CONTEXT_ID);
642 if (tmp == 0xDEADBEEF)
643 break;
644 DRM_UDELAY(1);
645 }
646
647 if (i < adev->usec_timeout) {
648 DRM_INFO("ring test on %d succeeded in %d usecs\n",
649 ring->idx, i);
650 } else {
651 DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
652 ring->idx, tmp);
653 r = -EINVAL;
654 }
655 return r;
656}
657
658/**
659 * uvd_v6_0_ring_emit_ib - execute indirect buffer
660 *
661 * @ring: amdgpu_ring pointer
662 * @ib: indirect buffer to execute
663 *
664 * Write ring commands to execute the indirect buffer
665 */
666static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring,
d88bf583
CK
667 struct amdgpu_ib *ib,
668 unsigned vm_id, bool ctx_switch)
aaa36a97 669{
0f30a397
CK
670 amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_VMID, 0));
671 amdgpu_ring_write(ring, vm_id);
672
aaa36a97
AD
673 amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0));
674 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
675 amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH, 0));
676 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
677 amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_SIZE, 0));
678 amdgpu_ring_write(ring, ib->length_dw);
679}
680
0f30a397
CK
681static void uvd_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
682 unsigned vm_id, uint64_t pd_addr)
683{
684 uint32_t reg;
685
686 if (vm_id < 8)
687 reg = mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id;
688 else
689 reg = mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vm_id - 8;
690
691 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
692 amdgpu_ring_write(ring, reg << 2);
693 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
694 amdgpu_ring_write(ring, pd_addr >> 12);
695 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
696 amdgpu_ring_write(ring, 0x8);
697
698 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
699 amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2);
700 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
701 amdgpu_ring_write(ring, 1 << vm_id);
702 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
703 amdgpu_ring_write(ring, 0x8);
704
705 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
706 amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2);
707 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
708 amdgpu_ring_write(ring, 0);
709 amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH8, 0));
710 amdgpu_ring_write(ring, 1 << vm_id); /* mask */
711 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
712 amdgpu_ring_write(ring, 0xC);
713}
714
715static void uvd_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
716{
717 uint32_t seq = ring->fence_drv.sync_seq;
718 uint64_t addr = ring->fence_drv.gpu_addr;
719
720 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
721 amdgpu_ring_write(ring, lower_32_bits(addr));
722 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
723 amdgpu_ring_write(ring, upper_32_bits(addr));
724 amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH8, 0));
725 amdgpu_ring_write(ring, 0xffffffff); /* mask */
726 amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH9, 0));
727 amdgpu_ring_write(ring, seq);
728 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
729 amdgpu_ring_write(ring, 0xE);
730}
731
5fc3aeeb 732static bool uvd_v6_0_is_idle(void *handle)
aaa36a97 733{
5fc3aeeb 734 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
735
aaa36a97
AD
736 return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK);
737}
738
5fc3aeeb 739static int uvd_v6_0_wait_for_idle(void *handle)
aaa36a97
AD
740{
741 unsigned i;
5fc3aeeb 742 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
aaa36a97
AD
743
744 for (i = 0; i < adev->usec_timeout; i++) {
f4a7f127 745 if (uvd_v6_0_is_idle(handle))
aaa36a97
AD
746 return 0;
747 }
748 return -ETIMEDOUT;
749}
750
fc0b3b90 751#define AMDGPU_UVD_STATUS_BUSY_MASK 0xfd
da146d3b 752static bool uvd_v6_0_check_soft_reset(void *handle)
fc0b3b90
CZ
753{
754 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
755 u32 srbm_soft_reset = 0;
756 u32 tmp = RREG32(mmSRBM_STATUS);
757
758 if (REG_GET_FIELD(tmp, SRBM_STATUS, UVD_RQ_PENDING) ||
759 REG_GET_FIELD(tmp, SRBM_STATUS, UVD_BUSY) ||
760 (RREG32(mmUVD_STATUS) & AMDGPU_UVD_STATUS_BUSY_MASK))
761 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_UVD, 1);
762
763 if (srbm_soft_reset) {
fc0b3b90 764 adev->uvd.srbm_soft_reset = srbm_soft_reset;
da146d3b 765 return true;
fc0b3b90 766 } else {
fc0b3b90 767 adev->uvd.srbm_soft_reset = 0;
da146d3b 768 return false;
fc0b3b90 769 }
fc0b3b90 770}
da146d3b 771
fc0b3b90 772static int uvd_v6_0_pre_soft_reset(void *handle)
aaa36a97 773{
5fc3aeeb 774 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
775
da146d3b 776 if (!adev->uvd.srbm_soft_reset)
fc0b3b90
CZ
777 return 0;
778
aaa36a97 779 uvd_v6_0_stop(adev);
fc0b3b90
CZ
780 return 0;
781}
782
783static int uvd_v6_0_soft_reset(void *handle)
784{
785 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
786 u32 srbm_soft_reset;
787
da146d3b 788 if (!adev->uvd.srbm_soft_reset)
fc0b3b90
CZ
789 return 0;
790 srbm_soft_reset = adev->uvd.srbm_soft_reset;
791
792 if (srbm_soft_reset) {
793 u32 tmp;
794
795 tmp = RREG32(mmSRBM_SOFT_RESET);
796 tmp |= srbm_soft_reset;
797 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
798 WREG32(mmSRBM_SOFT_RESET, tmp);
799 tmp = RREG32(mmSRBM_SOFT_RESET);
800
801 udelay(50);
802
803 tmp &= ~srbm_soft_reset;
804 WREG32(mmSRBM_SOFT_RESET, tmp);
805 tmp = RREG32(mmSRBM_SOFT_RESET);
806
807 /* Wait a little for things to settle down */
808 udelay(50);
809 }
810
811 return 0;
812}
813
814static int uvd_v6_0_post_soft_reset(void *handle)
815{
816 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
817
da146d3b 818 if (!adev->uvd.srbm_soft_reset)
fc0b3b90 819 return 0;
aaa36a97 820
aaa36a97
AD
821 mdelay(5);
822
823 return uvd_v6_0_start(adev);
824}
825
aaa36a97
AD
826static int uvd_v6_0_set_interrupt_state(struct amdgpu_device *adev,
827 struct amdgpu_irq_src *source,
828 unsigned type,
829 enum amdgpu_interrupt_state state)
830{
831 // TODO
832 return 0;
833}
834
835static int uvd_v6_0_process_interrupt(struct amdgpu_device *adev,
836 struct amdgpu_irq_src *source,
837 struct amdgpu_iv_entry *entry)
838{
839 DRM_DEBUG("IH: UVD TRAP\n");
840 amdgpu_fence_process(&adev->uvd.ring);
841 return 0;
842}
843
805b3ba8
RZ
844static void uvd_v6_0_enable_clock_gating(struct amdgpu_device *adev, bool enable)
845{
846 uint32_t data1, data3;
847
848 data1 = RREG32(mmUVD_SUVD_CGC_GATE);
849 data3 = RREG32(mmUVD_CGC_GATE);
850
851 data1 |= UVD_SUVD_CGC_GATE__SRE_MASK |
852 UVD_SUVD_CGC_GATE__SIT_MASK |
853 UVD_SUVD_CGC_GATE__SMP_MASK |
854 UVD_SUVD_CGC_GATE__SCM_MASK |
855 UVD_SUVD_CGC_GATE__SDB_MASK |
856 UVD_SUVD_CGC_GATE__SRE_H264_MASK |
857 UVD_SUVD_CGC_GATE__SRE_HEVC_MASK |
858 UVD_SUVD_CGC_GATE__SIT_H264_MASK |
859 UVD_SUVD_CGC_GATE__SIT_HEVC_MASK |
860 UVD_SUVD_CGC_GATE__SCM_H264_MASK |
861 UVD_SUVD_CGC_GATE__SCM_HEVC_MASK |
862 UVD_SUVD_CGC_GATE__SDB_H264_MASK |
863 UVD_SUVD_CGC_GATE__SDB_HEVC_MASK;
864
865 if (enable) {
866 data3 |= (UVD_CGC_GATE__SYS_MASK |
867 UVD_CGC_GATE__UDEC_MASK |
868 UVD_CGC_GATE__MPEG2_MASK |
869 UVD_CGC_GATE__RBC_MASK |
870 UVD_CGC_GATE__LMI_MC_MASK |
871 UVD_CGC_GATE__LMI_UMC_MASK |
872 UVD_CGC_GATE__IDCT_MASK |
873 UVD_CGC_GATE__MPRD_MASK |
874 UVD_CGC_GATE__MPC_MASK |
875 UVD_CGC_GATE__LBSI_MASK |
876 UVD_CGC_GATE__LRBBM_MASK |
877 UVD_CGC_GATE__UDEC_RE_MASK |
878 UVD_CGC_GATE__UDEC_CM_MASK |
879 UVD_CGC_GATE__UDEC_IT_MASK |
880 UVD_CGC_GATE__UDEC_DB_MASK |
881 UVD_CGC_GATE__UDEC_MP_MASK |
882 UVD_CGC_GATE__WCB_MASK |
805b3ba8
RZ
883 UVD_CGC_GATE__JPEG_MASK |
884 UVD_CGC_GATE__SCPU_MASK |
885 UVD_CGC_GATE__JPEG2_MASK);
3c3a7e61
RZ
886 /* only in pg enabled, we can gate clock to vcpu*/
887 if (adev->pg_flags & AMD_PG_SUPPORT_UVD)
888 data3 |= UVD_CGC_GATE__VCPU_MASK;
889
805b3ba8
RZ
890 data3 &= ~UVD_CGC_GATE__REGS_MASK;
891 } else {
892 data3 = 0;
893 }
894
895 WREG32(mmUVD_SUVD_CGC_GATE, data1);
896 WREG32(mmUVD_CGC_GATE, data3);
897}
898
be3ecca7
TSD
899static void uvd_v6_0_set_sw_clock_gating(struct amdgpu_device *adev)
900{
805b3ba8 901 uint32_t data, data2;
be3ecca7
TSD
902
903 data = RREG32(mmUVD_CGC_CTRL);
be3ecca7
TSD
904 data2 = RREG32(mmUVD_SUVD_CGC_CTRL);
905
805b3ba8 906
be3ecca7
TSD
907 data &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK |
908 UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK);
909
be3ecca7
TSD
910
911 data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK |
912 (1 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_GATE_DLY_TIMER)) |
913 (4 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_OFF_DELAY));
914
915 data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
916 UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
917 UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
918 UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
919 UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
920 UVD_CGC_CTRL__SYS_MODE_MASK |
921 UVD_CGC_CTRL__UDEC_MODE_MASK |
922 UVD_CGC_CTRL__MPEG2_MODE_MASK |
923 UVD_CGC_CTRL__REGS_MODE_MASK |
924 UVD_CGC_CTRL__RBC_MODE_MASK |
925 UVD_CGC_CTRL__LMI_MC_MODE_MASK |
926 UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
927 UVD_CGC_CTRL__IDCT_MODE_MASK |
928 UVD_CGC_CTRL__MPRD_MODE_MASK |
929 UVD_CGC_CTRL__MPC_MODE_MASK |
930 UVD_CGC_CTRL__LBSI_MODE_MASK |
931 UVD_CGC_CTRL__LRBBM_MODE_MASK |
932 UVD_CGC_CTRL__WCB_MODE_MASK |
933 UVD_CGC_CTRL__VCPU_MODE_MASK |
934 UVD_CGC_CTRL__JPEG_MODE_MASK |
935 UVD_CGC_CTRL__SCPU_MODE_MASK |
936 UVD_CGC_CTRL__JPEG2_MODE_MASK);
937 data2 &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK |
938 UVD_SUVD_CGC_CTRL__SIT_MODE_MASK |
939 UVD_SUVD_CGC_CTRL__SMP_MODE_MASK |
940 UVD_SUVD_CGC_CTRL__SCM_MODE_MASK |
941 UVD_SUVD_CGC_CTRL__SDB_MODE_MASK);
be3ecca7
TSD
942
943 WREG32(mmUVD_CGC_CTRL, data);
be3ecca7
TSD
944 WREG32(mmUVD_SUVD_CGC_CTRL, data2);
945}
946
947#if 0
948static void uvd_v6_0_set_hw_clock_gating(struct amdgpu_device *adev)
949{
950 uint32_t data, data1, cgc_flags, suvd_flags;
951
952 data = RREG32(mmUVD_CGC_GATE);
953 data1 = RREG32(mmUVD_SUVD_CGC_GATE);
954
955 cgc_flags = UVD_CGC_GATE__SYS_MASK |
956 UVD_CGC_GATE__UDEC_MASK |
957 UVD_CGC_GATE__MPEG2_MASK |
958 UVD_CGC_GATE__RBC_MASK |
959 UVD_CGC_GATE__LMI_MC_MASK |
960 UVD_CGC_GATE__IDCT_MASK |
961 UVD_CGC_GATE__MPRD_MASK |
962 UVD_CGC_GATE__MPC_MASK |
963 UVD_CGC_GATE__LBSI_MASK |
964 UVD_CGC_GATE__LRBBM_MASK |
965 UVD_CGC_GATE__UDEC_RE_MASK |
966 UVD_CGC_GATE__UDEC_CM_MASK |
967 UVD_CGC_GATE__UDEC_IT_MASK |
968 UVD_CGC_GATE__UDEC_DB_MASK |
969 UVD_CGC_GATE__UDEC_MP_MASK |
970 UVD_CGC_GATE__WCB_MASK |
971 UVD_CGC_GATE__VCPU_MASK |
972 UVD_CGC_GATE__SCPU_MASK |
973 UVD_CGC_GATE__JPEG_MASK |
974 UVD_CGC_GATE__JPEG2_MASK;
975
976 suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
977 UVD_SUVD_CGC_GATE__SIT_MASK |
978 UVD_SUVD_CGC_GATE__SMP_MASK |
979 UVD_SUVD_CGC_GATE__SCM_MASK |
980 UVD_SUVD_CGC_GATE__SDB_MASK;
981
982 data |= cgc_flags;
983 data1 |= suvd_flags;
984
985 WREG32(mmUVD_CGC_GATE, data);
986 WREG32(mmUVD_SUVD_CGC_GATE, data1);
987}
988#endif
989
805b3ba8
RZ
990static void uvd_v6_0_enable_mgcg(struct amdgpu_device *adev,
991 bool enable)
992{
993 u32 orig, data;
994
995 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) {
996 data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
997 data |= 0xfff;
998 WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
999
1000 orig = data = RREG32(mmUVD_CGC_CTRL);
1001 data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
1002 if (orig != data)
1003 WREG32(mmUVD_CGC_CTRL, data);
1004 } else {
1005 data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
1006 data &= ~0xfff;
1007 WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
1008
1009 orig = data = RREG32(mmUVD_CGC_CTRL);
1010 data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
1011 if (orig != data)
1012 WREG32(mmUVD_CGC_CTRL, data);
1013 }
1014}
1015
5fc3aeeb 1016static int uvd_v6_0_set_clockgating_state(void *handle,
1017 enum amd_clockgating_state state)
aaa36a97 1018{
9b08a306 1019 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4be5097c 1020 bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
a0cdef9e 1021
4be5097c 1022 if (enable) {
be3ecca7
TSD
1023 /* wait for STATUS to clear */
1024 if (uvd_v6_0_wait_for_idle(handle))
1025 return -EBUSY;
805b3ba8 1026 uvd_v6_0_enable_clock_gating(adev, true);
be3ecca7
TSD
1027 /* enable HW gates because UVD is idle */
1028/* uvd_v6_0_set_hw_clock_gating(adev); */
805b3ba8
RZ
1029 } else {
1030 /* disable HW gating and enable Sw gating */
1031 uvd_v6_0_enable_clock_gating(adev, false);
9b08a306 1032 }
805b3ba8 1033 uvd_v6_0_set_sw_clock_gating(adev);
aaa36a97
AD
1034 return 0;
1035}
1036
5fc3aeeb 1037static int uvd_v6_0_set_powergating_state(void *handle,
1038 enum amd_powergating_state state)
aaa36a97
AD
1039{
1040 /* This doesn't actually powergate the UVD block.
1041 * That's done in the dpm code via the SMC. This
1042 * just re-inits the block as necessary. The actual
1043 * gating still happens in the dpm code. We should
1044 * revisit this when there is a cleaner line between
1045 * the smc and the hw blocks
1046 */
5fc3aeeb 1047 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
c8781f56 1048 int ret = 0;
5fc3aeeb 1049
fa5d2e0c
TSD
1050 WREG32(mmUVD_POWER_STATUS, UVD_POWER_STATUS__UVD_PG_EN_MASK);
1051
5fc3aeeb 1052 if (state == AMD_PG_STATE_GATE) {
aaa36a97 1053 uvd_v6_0_stop(adev);
aaa36a97 1054 } else {
c8781f56
HR
1055 ret = uvd_v6_0_start(adev);
1056 if (ret)
1057 goto out;
c8781f56
HR
1058 }
1059
1060out:
1061 return ret;
1062}
1063
1064static void uvd_v6_0_get_clockgating_state(void *handle, u32 *flags)
1065{
1066 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1067 int data;
1068
1069 mutex_lock(&adev->pm.mutex);
1070
254cd2e0
RZ
1071 if (RREG32_SMC(ixCURRENT_PG_STATUS) &
1072 CURRENT_PG_STATUS__UVD_PG_STATUS_MASK) {
c8781f56
HR
1073 DRM_INFO("Cannot get clockgating state when UVD is powergated.\n");
1074 goto out;
aaa36a97 1075 }
c8781f56
HR
1076
1077 /* AMD_CG_SUPPORT_UVD_MGCG */
1078 data = RREG32(mmUVD_CGC_CTRL);
1079 if (data & UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK)
1080 *flags |= AMD_CG_SUPPORT_UVD_MGCG;
1081
1082out:
1083 mutex_unlock(&adev->pm.mutex);
aaa36a97
AD
1084}
1085
a1255107 1086static const struct amd_ip_funcs uvd_v6_0_ip_funcs = {
88a907d6 1087 .name = "uvd_v6_0",
aaa36a97
AD
1088 .early_init = uvd_v6_0_early_init,
1089 .late_init = NULL,
1090 .sw_init = uvd_v6_0_sw_init,
1091 .sw_fini = uvd_v6_0_sw_fini,
1092 .hw_init = uvd_v6_0_hw_init,
1093 .hw_fini = uvd_v6_0_hw_fini,
1094 .suspend = uvd_v6_0_suspend,
1095 .resume = uvd_v6_0_resume,
1096 .is_idle = uvd_v6_0_is_idle,
1097 .wait_for_idle = uvd_v6_0_wait_for_idle,
fc0b3b90
CZ
1098 .check_soft_reset = uvd_v6_0_check_soft_reset,
1099 .pre_soft_reset = uvd_v6_0_pre_soft_reset,
aaa36a97 1100 .soft_reset = uvd_v6_0_soft_reset,
fc0b3b90 1101 .post_soft_reset = uvd_v6_0_post_soft_reset,
aaa36a97
AD
1102 .set_clockgating_state = uvd_v6_0_set_clockgating_state,
1103 .set_powergating_state = uvd_v6_0_set_powergating_state,
c8781f56 1104 .get_clockgating_state = uvd_v6_0_get_clockgating_state,
aaa36a97
AD
1105};
1106
0f30a397 1107static const struct amdgpu_ring_funcs uvd_v6_0_ring_phys_funcs = {
21cd942e 1108 .type = AMDGPU_RING_TYPE_UVD,
79887142
CK
1109 .align_mask = 0xf,
1110 .nop = PACKET0(mmUVD_NO_OP, 0),
536fbf94 1111 .support_64bit_ptrs = false,
aaa36a97
AD
1112 .get_rptr = uvd_v6_0_ring_get_rptr,
1113 .get_wptr = uvd_v6_0_ring_get_wptr,
1114 .set_wptr = uvd_v6_0_ring_set_wptr,
1115 .parse_cs = amdgpu_uvd_ring_parse_cs,
e12f3d7a
CK
1116 .emit_frame_size =
1117 2 + /* uvd_v6_0_ring_emit_hdp_flush */
1118 2 + /* uvd_v6_0_ring_emit_hdp_invalidate */
1119 10 + /* uvd_v6_0_ring_emit_pipeline_sync */
1120 14, /* uvd_v6_0_ring_emit_fence x1 no user fence */
1121 .emit_ib_size = 8, /* uvd_v6_0_ring_emit_ib */
aaa36a97
AD
1122 .emit_ib = uvd_v6_0_ring_emit_ib,
1123 .emit_fence = uvd_v6_0_ring_emit_fence,
d5b4e25d
CK
1124 .emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush,
1125 .emit_hdp_invalidate = uvd_v6_0_ring_emit_hdp_invalidate,
aaa36a97 1126 .test_ring = uvd_v6_0_ring_test_ring,
8de190c9 1127 .test_ib = amdgpu_uvd_ring_test_ib,
edff0e28 1128 .insert_nop = amdgpu_ring_insert_nop,
9e5d5309 1129 .pad_ib = amdgpu_ring_generic_pad_ib,
c4120d55
CK
1130 .begin_use = amdgpu_uvd_ring_begin_use,
1131 .end_use = amdgpu_uvd_ring_end_use,
aaa36a97
AD
1132};
1133
0f30a397 1134static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = {
21cd942e 1135 .type = AMDGPU_RING_TYPE_UVD,
79887142
CK
1136 .align_mask = 0xf,
1137 .nop = PACKET0(mmUVD_NO_OP, 0),
536fbf94 1138 .support_64bit_ptrs = false,
0f30a397
CK
1139 .get_rptr = uvd_v6_0_ring_get_rptr,
1140 .get_wptr = uvd_v6_0_ring_get_wptr,
1141 .set_wptr = uvd_v6_0_ring_set_wptr,
e12f3d7a
CK
1142 .emit_frame_size =
1143 2 + /* uvd_v6_0_ring_emit_hdp_flush */
1144 2 + /* uvd_v6_0_ring_emit_hdp_invalidate */
1145 10 + /* uvd_v6_0_ring_emit_pipeline_sync */
1146 20 + /* uvd_v6_0_ring_emit_vm_flush */
1147 14 + 14, /* uvd_v6_0_ring_emit_fence x2 vm fence */
1148 .emit_ib_size = 8, /* uvd_v6_0_ring_emit_ib */
0f30a397
CK
1149 .emit_ib = uvd_v6_0_ring_emit_ib,
1150 .emit_fence = uvd_v6_0_ring_emit_fence,
1151 .emit_vm_flush = uvd_v6_0_ring_emit_vm_flush,
1152 .emit_pipeline_sync = uvd_v6_0_ring_emit_pipeline_sync,
1153 .emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush,
1154 .emit_hdp_invalidate = uvd_v6_0_ring_emit_hdp_invalidate,
1155 .test_ring = uvd_v6_0_ring_test_ring,
8de190c9 1156 .test_ib = amdgpu_uvd_ring_test_ib,
0f30a397
CK
1157 .insert_nop = amdgpu_ring_insert_nop,
1158 .pad_ib = amdgpu_ring_generic_pad_ib,
c4120d55
CK
1159 .begin_use = amdgpu_uvd_ring_begin_use,
1160 .end_use = amdgpu_uvd_ring_end_use,
0f30a397
CK
1161};
1162
aaa36a97
AD
1163static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev)
1164{
a05c92d1 1165 if (adev->asic_type >= CHIP_POLARIS10) {
0f30a397
CK
1166 adev->uvd.ring.funcs = &uvd_v6_0_ring_vm_funcs;
1167 DRM_INFO("UVD is enabled in VM mode\n");
1168 } else {
1169 adev->uvd.ring.funcs = &uvd_v6_0_ring_phys_funcs;
1170 DRM_INFO("UVD is enabled in physical mode\n");
1171 }
aaa36a97
AD
1172}
1173
1174static const struct amdgpu_irq_src_funcs uvd_v6_0_irq_funcs = {
1175 .set = uvd_v6_0_set_interrupt_state,
1176 .process = uvd_v6_0_process_interrupt,
1177};
1178
1179static void uvd_v6_0_set_irq_funcs(struct amdgpu_device *adev)
1180{
1181 adev->uvd.irq.num_types = 1;
1182 adev->uvd.irq.funcs = &uvd_v6_0_irq_funcs;
1183}
a1255107
AD
1184
1185const struct amdgpu_ip_block_version uvd_v6_0_ip_block =
1186{
1187 .type = AMD_IP_BLOCK_TYPE_UVD,
1188 .major = 6,
1189 .minor = 0,
1190 .rev = 0,
1191 .funcs = &uvd_v6_0_ip_funcs,
1192};
1193
1194const struct amdgpu_ip_block_version uvd_v6_2_ip_block =
1195{
1196 .type = AMD_IP_BLOCK_TYPE_UVD,
1197 .major = 6,
1198 .minor = 2,
1199 .rev = 0,
1200 .funcs = &uvd_v6_0_ip_funcs,
1201};
1202
1203const struct amdgpu_ip_block_version uvd_v6_3_ip_block =
1204{
1205 .type = AMD_IP_BLOCK_TYPE_UVD,
1206 .major = 6,
1207 .minor = 3,
1208 .rev = 0,
1209 .funcs = &uvd_v6_0_ip_funcs,
1210};