]>
Commit | Line | Data |
---|---|---|
aaa36a97 AD |
1 | /* |
2 | * Copyright 2014 Advanced Micro Devices, Inc. | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice shall be included in | |
12 | * all copies or substantial portions of the Software. | |
13 | * | |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
20 | * OTHER DEALINGS IN THE SOFTWARE. | |
21 | * | |
22 | * Authors: Christian König <christian.koenig@amd.com> | |
23 | */ | |
24 | ||
25 | #include <linux/firmware.h> | |
26 | #include <drm/drmP.h> | |
27 | #include "amdgpu.h" | |
28 | #include "amdgpu_uvd.h" | |
29 | #include "vid.h" | |
30 | #include "uvd/uvd_6_0_d.h" | |
31 | #include "uvd/uvd_6_0_sh_mask.h" | |
32 | #include "oss/oss_2_0_d.h" | |
33 | #include "oss/oss_2_0_sh_mask.h" | |
be3ecca7 | 34 | #include "vi.h" |
aaa36a97 AD |
35 | |
36 | static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev); | |
37 | static void uvd_v6_0_set_irq_funcs(struct amdgpu_device *adev); | |
38 | static int uvd_v6_0_start(struct amdgpu_device *adev); | |
39 | static void uvd_v6_0_stop(struct amdgpu_device *adev); | |
be3ecca7 | 40 | static void uvd_v6_0_set_sw_clock_gating(struct amdgpu_device *adev); |
aaa36a97 AD |
41 | |
42 | /** | |
43 | * uvd_v6_0_ring_get_rptr - get read pointer | |
44 | * | |
45 | * @ring: amdgpu_ring pointer | |
46 | * | |
47 | * Returns the current hardware read pointer | |
48 | */ | |
49 | static uint32_t uvd_v6_0_ring_get_rptr(struct amdgpu_ring *ring) | |
50 | { | |
51 | struct amdgpu_device *adev = ring->adev; | |
52 | ||
53 | return RREG32(mmUVD_RBC_RB_RPTR); | |
54 | } | |
55 | ||
56 | /** | |
57 | * uvd_v6_0_ring_get_wptr - get write pointer | |
58 | * | |
59 | * @ring: amdgpu_ring pointer | |
60 | * | |
61 | * Returns the current hardware write pointer | |
62 | */ | |
63 | static uint32_t uvd_v6_0_ring_get_wptr(struct amdgpu_ring *ring) | |
64 | { | |
65 | struct amdgpu_device *adev = ring->adev; | |
66 | ||
67 | return RREG32(mmUVD_RBC_RB_WPTR); | |
68 | } | |
69 | ||
70 | /** | |
71 | * uvd_v6_0_ring_set_wptr - set write pointer | |
72 | * | |
73 | * @ring: amdgpu_ring pointer | |
74 | * | |
75 | * Commits the write pointer to the hardware | |
76 | */ | |
77 | static void uvd_v6_0_ring_set_wptr(struct amdgpu_ring *ring) | |
78 | { | |
79 | struct amdgpu_device *adev = ring->adev; | |
80 | ||
81 | WREG32(mmUVD_RBC_RB_WPTR, ring->wptr); | |
82 | } | |
83 | ||
5fc3aeeb | 84 | static int uvd_v6_0_early_init(void *handle) |
aaa36a97 | 85 | { |
5fc3aeeb | 86 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
87 | ||
aaa36a97 AD |
88 | uvd_v6_0_set_ring_funcs(adev); |
89 | uvd_v6_0_set_irq_funcs(adev); | |
90 | ||
91 | return 0; | |
92 | } | |
93 | ||
5fc3aeeb | 94 | static int uvd_v6_0_sw_init(void *handle) |
aaa36a97 AD |
95 | { |
96 | struct amdgpu_ring *ring; | |
97 | int r; | |
5fc3aeeb | 98 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
aaa36a97 AD |
99 | |
100 | /* UVD TRAP */ | |
101 | r = amdgpu_irq_add_id(adev, 124, &adev->uvd.irq); | |
102 | if (r) | |
103 | return r; | |
104 | ||
105 | r = amdgpu_uvd_sw_init(adev); | |
106 | if (r) | |
107 | return r; | |
108 | ||
109 | r = amdgpu_uvd_resume(adev); | |
110 | if (r) | |
111 | return r; | |
112 | ||
113 | ring = &adev->uvd.ring; | |
114 | sprintf(ring->name, "uvd"); | |
a3f1cf35 | 115 | r = amdgpu_ring_init(adev, ring, 512, CP_PACKET2, 0xf, |
aaa36a97 AD |
116 | &adev->uvd.irq, 0, AMDGPU_RING_TYPE_UVD); |
117 | ||
118 | return r; | |
119 | } | |
120 | ||
5fc3aeeb | 121 | static int uvd_v6_0_sw_fini(void *handle) |
aaa36a97 AD |
122 | { |
123 | int r; | |
5fc3aeeb | 124 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
aaa36a97 AD |
125 | |
126 | r = amdgpu_uvd_suspend(adev); | |
127 | if (r) | |
128 | return r; | |
129 | ||
130 | r = amdgpu_uvd_sw_fini(adev); | |
131 | if (r) | |
132 | return r; | |
133 | ||
134 | return r; | |
135 | } | |
136 | ||
137 | /** | |
138 | * uvd_v6_0_hw_init - start and test UVD block | |
139 | * | |
140 | * @adev: amdgpu_device pointer | |
141 | * | |
142 | * Initialize the hardware, boot up the VCPU and do some testing | |
143 | */ | |
5fc3aeeb | 144 | static int uvd_v6_0_hw_init(void *handle) |
aaa36a97 | 145 | { |
5fc3aeeb | 146 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
aaa36a97 AD |
147 | struct amdgpu_ring *ring = &adev->uvd.ring; |
148 | uint32_t tmp; | |
149 | int r; | |
150 | ||
151 | r = uvd_v6_0_start(adev); | |
152 | if (r) | |
153 | goto done; | |
154 | ||
155 | ring->ready = true; | |
156 | r = amdgpu_ring_test_ring(ring); | |
157 | if (r) { | |
158 | ring->ready = false; | |
159 | goto done; | |
160 | } | |
161 | ||
a27de35c | 162 | r = amdgpu_ring_alloc(ring, 10); |
aaa36a97 AD |
163 | if (r) { |
164 | DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r); | |
165 | goto done; | |
166 | } | |
167 | ||
168 | tmp = PACKET0(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0); | |
169 | amdgpu_ring_write(ring, tmp); | |
170 | amdgpu_ring_write(ring, 0xFFFFF); | |
171 | ||
172 | tmp = PACKET0(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0); | |
173 | amdgpu_ring_write(ring, tmp); | |
174 | amdgpu_ring_write(ring, 0xFFFFF); | |
175 | ||
176 | tmp = PACKET0(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0); | |
177 | amdgpu_ring_write(ring, tmp); | |
178 | amdgpu_ring_write(ring, 0xFFFFF); | |
179 | ||
180 | /* Clear timeout status bits */ | |
181 | amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_TIMEOUT_STATUS, 0)); | |
182 | amdgpu_ring_write(ring, 0x8); | |
183 | ||
184 | amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CNTL, 0)); | |
185 | amdgpu_ring_write(ring, 3); | |
186 | ||
a27de35c | 187 | amdgpu_ring_commit(ring); |
aaa36a97 AD |
188 | |
189 | done: | |
190 | if (!r) | |
191 | DRM_INFO("UVD initialized successfully.\n"); | |
192 | ||
193 | return r; | |
194 | } | |
195 | ||
196 | /** | |
197 | * uvd_v6_0_hw_fini - stop the hardware block | |
198 | * | |
199 | * @adev: amdgpu_device pointer | |
200 | * | |
201 | * Stop the UVD block, mark ring as not ready any more | |
202 | */ | |
5fc3aeeb | 203 | static int uvd_v6_0_hw_fini(void *handle) |
aaa36a97 | 204 | { |
5fc3aeeb | 205 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
aaa36a97 AD |
206 | struct amdgpu_ring *ring = &adev->uvd.ring; |
207 | ||
208 | uvd_v6_0_stop(adev); | |
209 | ring->ready = false; | |
210 | ||
211 | return 0; | |
212 | } | |
213 | ||
5fc3aeeb | 214 | static int uvd_v6_0_suspend(void *handle) |
aaa36a97 AD |
215 | { |
216 | int r; | |
5fc3aeeb | 217 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
aaa36a97 | 218 | |
3f99dd81 LL |
219 | r = uvd_v6_0_hw_fini(adev); |
220 | if (r) | |
221 | return r; | |
222 | ||
1f445210 LL |
223 | /* Skip this for APU for now */ |
224 | if (!(adev->flags & AMD_IS_APU)) { | |
225 | r = amdgpu_uvd_suspend(adev); | |
226 | if (r) | |
227 | return r; | |
228 | } | |
aaa36a97 AD |
229 | |
230 | return r; | |
231 | } | |
232 | ||
5fc3aeeb | 233 | static int uvd_v6_0_resume(void *handle) |
aaa36a97 AD |
234 | { |
235 | int r; | |
5fc3aeeb | 236 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
aaa36a97 | 237 | |
1f445210 LL |
238 | /* Skip this for APU for now */ |
239 | if (!(adev->flags & AMD_IS_APU)) { | |
240 | r = amdgpu_uvd_resume(adev); | |
241 | if (r) | |
242 | return r; | |
243 | } | |
aaa36a97 AD |
244 | r = uvd_v6_0_hw_init(adev); |
245 | if (r) | |
246 | return r; | |
247 | ||
248 | return r; | |
249 | } | |
250 | ||
251 | /** | |
252 | * uvd_v6_0_mc_resume - memory controller programming | |
253 | * | |
254 | * @adev: amdgpu_device pointer | |
255 | * | |
256 | * Let the UVD memory controller know it's offsets | |
257 | */ | |
258 | static void uvd_v6_0_mc_resume(struct amdgpu_device *adev) | |
259 | { | |
260 | uint64_t offset; | |
261 | uint32_t size; | |
262 | ||
263 | /* programm memory controller bits 0-27 */ | |
264 | WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, | |
265 | lower_32_bits(adev->uvd.gpu_addr)); | |
266 | WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, | |
267 | upper_32_bits(adev->uvd.gpu_addr)); | |
268 | ||
269 | offset = AMDGPU_UVD_FIRMWARE_OFFSET; | |
270 | size = AMDGPU_GPU_PAGE_ALIGN(adev->uvd.fw->size + 4); | |
271 | WREG32(mmUVD_VCPU_CACHE_OFFSET0, offset >> 3); | |
272 | WREG32(mmUVD_VCPU_CACHE_SIZE0, size); | |
273 | ||
274 | offset += size; | |
c0365541 | 275 | size = AMDGPU_UVD_HEAP_SIZE; |
aaa36a97 AD |
276 | WREG32(mmUVD_VCPU_CACHE_OFFSET1, offset >> 3); |
277 | WREG32(mmUVD_VCPU_CACHE_SIZE1, size); | |
278 | ||
279 | offset += size; | |
c0365541 AN |
280 | size = AMDGPU_UVD_STACK_SIZE + |
281 | (AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles); | |
aaa36a97 AD |
282 | WREG32(mmUVD_VCPU_CACHE_OFFSET2, offset >> 3); |
283 | WREG32(mmUVD_VCPU_CACHE_SIZE2, size); | |
549300ce AD |
284 | |
285 | WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config); | |
286 | WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, adev->gfx.config.gb_addr_config); | |
287 | WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, adev->gfx.config.gb_addr_config); | |
c0365541 AN |
288 | |
289 | WREG32(mmUVD_GP_SCRATCH4, adev->uvd.max_handles); | |
aaa36a97 AD |
290 | } |
291 | ||
be3ecca7 | 292 | #if 0 |
9b08a306 EH |
293 | static void cz_set_uvd_clock_gating_branches(struct amdgpu_device *adev, |
294 | bool enable) | |
295 | { | |
296 | u32 data, data1; | |
297 | ||
298 | data = RREG32(mmUVD_CGC_GATE); | |
299 | data1 = RREG32(mmUVD_SUVD_CGC_GATE); | |
300 | if (enable) { | |
301 | data |= UVD_CGC_GATE__SYS_MASK | | |
302 | UVD_CGC_GATE__UDEC_MASK | | |
303 | UVD_CGC_GATE__MPEG2_MASK | | |
304 | UVD_CGC_GATE__RBC_MASK | | |
305 | UVD_CGC_GATE__LMI_MC_MASK | | |
306 | UVD_CGC_GATE__IDCT_MASK | | |
307 | UVD_CGC_GATE__MPRD_MASK | | |
308 | UVD_CGC_GATE__MPC_MASK | | |
309 | UVD_CGC_GATE__LBSI_MASK | | |
310 | UVD_CGC_GATE__LRBBM_MASK | | |
311 | UVD_CGC_GATE__UDEC_RE_MASK | | |
312 | UVD_CGC_GATE__UDEC_CM_MASK | | |
313 | UVD_CGC_GATE__UDEC_IT_MASK | | |
314 | UVD_CGC_GATE__UDEC_DB_MASK | | |
315 | UVD_CGC_GATE__UDEC_MP_MASK | | |
316 | UVD_CGC_GATE__WCB_MASK | | |
317 | UVD_CGC_GATE__VCPU_MASK | | |
318 | UVD_CGC_GATE__SCPU_MASK; | |
319 | data1 |= UVD_SUVD_CGC_GATE__SRE_MASK | | |
320 | UVD_SUVD_CGC_GATE__SIT_MASK | | |
321 | UVD_SUVD_CGC_GATE__SMP_MASK | | |
322 | UVD_SUVD_CGC_GATE__SCM_MASK | | |
323 | UVD_SUVD_CGC_GATE__SDB_MASK | | |
324 | UVD_SUVD_CGC_GATE__SRE_H264_MASK | | |
325 | UVD_SUVD_CGC_GATE__SRE_HEVC_MASK | | |
326 | UVD_SUVD_CGC_GATE__SIT_H264_MASK | | |
327 | UVD_SUVD_CGC_GATE__SIT_HEVC_MASK | | |
328 | UVD_SUVD_CGC_GATE__SCM_H264_MASK | | |
329 | UVD_SUVD_CGC_GATE__SCM_HEVC_MASK | | |
330 | UVD_SUVD_CGC_GATE__SDB_H264_MASK | | |
331 | UVD_SUVD_CGC_GATE__SDB_HEVC_MASK; | |
332 | } else { | |
333 | data &= ~(UVD_CGC_GATE__SYS_MASK | | |
334 | UVD_CGC_GATE__UDEC_MASK | | |
335 | UVD_CGC_GATE__MPEG2_MASK | | |
336 | UVD_CGC_GATE__RBC_MASK | | |
337 | UVD_CGC_GATE__LMI_MC_MASK | | |
338 | UVD_CGC_GATE__LMI_UMC_MASK | | |
339 | UVD_CGC_GATE__IDCT_MASK | | |
340 | UVD_CGC_GATE__MPRD_MASK | | |
341 | UVD_CGC_GATE__MPC_MASK | | |
342 | UVD_CGC_GATE__LBSI_MASK | | |
343 | UVD_CGC_GATE__LRBBM_MASK | | |
344 | UVD_CGC_GATE__UDEC_RE_MASK | | |
345 | UVD_CGC_GATE__UDEC_CM_MASK | | |
346 | UVD_CGC_GATE__UDEC_IT_MASK | | |
347 | UVD_CGC_GATE__UDEC_DB_MASK | | |
348 | UVD_CGC_GATE__UDEC_MP_MASK | | |
349 | UVD_CGC_GATE__WCB_MASK | | |
350 | UVD_CGC_GATE__VCPU_MASK | | |
351 | UVD_CGC_GATE__SCPU_MASK); | |
352 | data1 &= ~(UVD_SUVD_CGC_GATE__SRE_MASK | | |
353 | UVD_SUVD_CGC_GATE__SIT_MASK | | |
354 | UVD_SUVD_CGC_GATE__SMP_MASK | | |
355 | UVD_SUVD_CGC_GATE__SCM_MASK | | |
356 | UVD_SUVD_CGC_GATE__SDB_MASK | | |
357 | UVD_SUVD_CGC_GATE__SRE_H264_MASK | | |
358 | UVD_SUVD_CGC_GATE__SRE_HEVC_MASK | | |
359 | UVD_SUVD_CGC_GATE__SIT_H264_MASK | | |
360 | UVD_SUVD_CGC_GATE__SIT_HEVC_MASK | | |
361 | UVD_SUVD_CGC_GATE__SCM_H264_MASK | | |
362 | UVD_SUVD_CGC_GATE__SCM_HEVC_MASK | | |
363 | UVD_SUVD_CGC_GATE__SDB_H264_MASK | | |
364 | UVD_SUVD_CGC_GATE__SDB_HEVC_MASK); | |
365 | } | |
366 | WREG32(mmUVD_CGC_GATE, data); | |
367 | WREG32(mmUVD_SUVD_CGC_GATE, data1); | |
368 | } | |
be3ecca7 | 369 | #endif |
9b08a306 | 370 | |
aaa36a97 AD |
371 | /** |
372 | * uvd_v6_0_start - start UVD block | |
373 | * | |
374 | * @adev: amdgpu_device pointer | |
375 | * | |
376 | * Setup and start the UVD block | |
377 | */ | |
378 | static int uvd_v6_0_start(struct amdgpu_device *adev) | |
379 | { | |
380 | struct amdgpu_ring *ring = &adev->uvd.ring; | |
381 | uint32_t rb_bufsz, tmp; | |
382 | uint32_t lmi_swap_cntl; | |
383 | uint32_t mp_swap_cntl; | |
384 | int i, j, r; | |
385 | ||
386 | /*disable DPG */ | |
387 | WREG32_P(mmUVD_POWER_STATUS, 0, ~(1 << 2)); | |
388 | ||
389 | /* disable byte swapping */ | |
390 | lmi_swap_cntl = 0; | |
391 | mp_swap_cntl = 0; | |
392 | ||
393 | uvd_v6_0_mc_resume(adev); | |
394 | ||
9b08a306 | 395 | /* Set dynamic clock gating in S/W control mode */ |
e3b04bc7 | 396 | if (adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG) { |
be3ecca7 | 397 | uvd_v6_0_set_sw_clock_gating(adev); |
9b08a306 EH |
398 | } else { |
399 | /* disable clock gating */ | |
400 | uint32_t data = RREG32(mmUVD_CGC_CTRL); | |
401 | data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK; | |
402 | WREG32(mmUVD_CGC_CTRL, data); | |
403 | } | |
aaa36a97 AD |
404 | |
405 | /* disable interupt */ | |
406 | WREG32_P(mmUVD_MASTINT_EN, 0, ~(1 << 1)); | |
407 | ||
408 | /* stall UMC and register bus before resetting VCPU */ | |
409 | WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8)); | |
410 | mdelay(1); | |
411 | ||
412 | /* put LMI, VCPU, RBC etc... into reset */ | |
413 | WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__LMI_SOFT_RESET_MASK | | |
414 | UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK | UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK | | |
415 | UVD_SOFT_RESET__RBC_SOFT_RESET_MASK | UVD_SOFT_RESET__CSM_SOFT_RESET_MASK | | |
416 | UVD_SOFT_RESET__CXW_SOFT_RESET_MASK | UVD_SOFT_RESET__TAP_SOFT_RESET_MASK | | |
417 | UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK); | |
418 | mdelay(5); | |
419 | ||
420 | /* take UVD block out of reset */ | |
421 | WREG32_P(mmSRBM_SOFT_RESET, 0, ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK); | |
422 | mdelay(5); | |
423 | ||
424 | /* initialize UVD memory controller */ | |
425 | WREG32(mmUVD_LMI_CTRL, 0x40 | (1 << 8) | (1 << 13) | | |
426 | (1 << 21) | (1 << 9) | (1 << 20)); | |
427 | ||
428 | #ifdef __BIG_ENDIAN | |
429 | /* swap (8 in 32) RB and IB */ | |
430 | lmi_swap_cntl = 0xa; | |
431 | mp_swap_cntl = 0; | |
432 | #endif | |
433 | WREG32(mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl); | |
434 | WREG32(mmUVD_MP_SWAP_CNTL, mp_swap_cntl); | |
435 | ||
436 | WREG32(mmUVD_MPC_SET_MUXA0, 0x40c2040); | |
437 | WREG32(mmUVD_MPC_SET_MUXA1, 0x0); | |
438 | WREG32(mmUVD_MPC_SET_MUXB0, 0x40c2040); | |
439 | WREG32(mmUVD_MPC_SET_MUXB1, 0x0); | |
440 | WREG32(mmUVD_MPC_SET_ALU, 0); | |
441 | WREG32(mmUVD_MPC_SET_MUX, 0x88); | |
442 | ||
443 | /* take all subblocks out of reset, except VCPU */ | |
444 | WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); | |
445 | mdelay(5); | |
446 | ||
447 | /* enable VCPU clock */ | |
448 | WREG32(mmUVD_VCPU_CNTL, 1 << 9); | |
449 | ||
450 | /* enable UMC */ | |
451 | WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8)); | |
452 | ||
453 | /* boot up the VCPU */ | |
454 | WREG32(mmUVD_SOFT_RESET, 0); | |
455 | mdelay(10); | |
456 | ||
457 | for (i = 0; i < 10; ++i) { | |
458 | uint32_t status; | |
459 | ||
460 | for (j = 0; j < 100; ++j) { | |
461 | status = RREG32(mmUVD_STATUS); | |
462 | if (status & 2) | |
463 | break; | |
464 | mdelay(10); | |
465 | } | |
466 | r = 0; | |
467 | if (status & 2) | |
468 | break; | |
469 | ||
470 | DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n"); | |
471 | WREG32_P(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK, | |
472 | ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); | |
473 | mdelay(10); | |
474 | WREG32_P(mmUVD_SOFT_RESET, 0, | |
475 | ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); | |
476 | mdelay(10); | |
477 | r = -1; | |
478 | } | |
479 | ||
480 | if (r) { | |
481 | DRM_ERROR("UVD not responding, giving up!!!\n"); | |
482 | return r; | |
483 | } | |
484 | /* enable master interrupt */ | |
485 | WREG32_P(mmUVD_MASTINT_EN, 3 << 1, ~(3 << 1)); | |
486 | ||
487 | /* clear the bit 4 of UVD_STATUS */ | |
488 | WREG32_P(mmUVD_STATUS, 0, ~(2 << 1)); | |
489 | ||
490 | rb_bufsz = order_base_2(ring->ring_size); | |
491 | tmp = 0; | |
492 | tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz); | |
493 | tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1); | |
494 | tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1); | |
495 | tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0); | |
496 | tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1); | |
497 | tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1); | |
498 | /* force RBC into idle state */ | |
499 | WREG32(mmUVD_RBC_RB_CNTL, tmp); | |
500 | ||
501 | /* set the write pointer delay */ | |
502 | WREG32(mmUVD_RBC_RB_WPTR_CNTL, 0); | |
503 | ||
504 | /* set the wb address */ | |
505 | WREG32(mmUVD_RBC_RB_RPTR_ADDR, (upper_32_bits(ring->gpu_addr) >> 2)); | |
506 | ||
507 | /* programm the RB_BASE for ring buffer */ | |
508 | WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_LOW, | |
509 | lower_32_bits(ring->gpu_addr)); | |
510 | WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH, | |
511 | upper_32_bits(ring->gpu_addr)); | |
512 | ||
513 | /* Initialize the ring buffer's read and write pointers */ | |
514 | WREG32(mmUVD_RBC_RB_RPTR, 0); | |
515 | ||
516 | ring->wptr = RREG32(mmUVD_RBC_RB_RPTR); | |
517 | WREG32(mmUVD_RBC_RB_WPTR, ring->wptr); | |
518 | ||
519 | WREG32_P(mmUVD_RBC_RB_CNTL, 0, ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK); | |
520 | ||
521 | return 0; | |
522 | } | |
523 | ||
524 | /** | |
525 | * uvd_v6_0_stop - stop UVD block | |
526 | * | |
527 | * @adev: amdgpu_device pointer | |
528 | * | |
529 | * stop the UVD block | |
530 | */ | |
531 | static void uvd_v6_0_stop(struct amdgpu_device *adev) | |
532 | { | |
533 | /* force RBC into idle state */ | |
534 | WREG32(mmUVD_RBC_RB_CNTL, 0x11010101); | |
535 | ||
536 | /* Stall UMC and register bus before resetting VCPU */ | |
537 | WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8)); | |
538 | mdelay(1); | |
539 | ||
540 | /* put VCPU into reset */ | |
541 | WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); | |
542 | mdelay(5); | |
543 | ||
544 | /* disable VCPU clock */ | |
545 | WREG32(mmUVD_VCPU_CNTL, 0x0); | |
546 | ||
547 | /* Unstall UMC and register bus */ | |
548 | WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8)); | |
549 | } | |
550 | ||
551 | /** | |
552 | * uvd_v6_0_ring_emit_fence - emit an fence & trap command | |
553 | * | |
554 | * @ring: amdgpu_ring pointer | |
555 | * @fence: fence to emit | |
556 | * | |
557 | * Write a fence and a trap command to the ring. | |
558 | */ | |
559 | static void uvd_v6_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, | |
890ee23f | 560 | unsigned flags) |
aaa36a97 | 561 | { |
890ee23f | 562 | WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT); |
aaa36a97 AD |
563 | |
564 | amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0)); | |
565 | amdgpu_ring_write(ring, seq); | |
566 | amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0)); | |
567 | amdgpu_ring_write(ring, addr & 0xffffffff); | |
568 | amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0)); | |
569 | amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff); | |
570 | amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0)); | |
571 | amdgpu_ring_write(ring, 0); | |
572 | ||
573 | amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0)); | |
574 | amdgpu_ring_write(ring, 0); | |
575 | amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0)); | |
576 | amdgpu_ring_write(ring, 0); | |
577 | amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0)); | |
578 | amdgpu_ring_write(ring, 2); | |
579 | } | |
580 | ||
aaa36a97 AD |
581 | /** |
582 | * uvd_v6_0_ring_test_ring - register write test | |
583 | * | |
584 | * @ring: amdgpu_ring pointer | |
585 | * | |
586 | * Test if we can successfully write to the context register | |
587 | */ | |
588 | static int uvd_v6_0_ring_test_ring(struct amdgpu_ring *ring) | |
589 | { | |
590 | struct amdgpu_device *adev = ring->adev; | |
591 | uint32_t tmp = 0; | |
592 | unsigned i; | |
593 | int r; | |
594 | ||
595 | WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD); | |
a27de35c | 596 | r = amdgpu_ring_alloc(ring, 3); |
aaa36a97 AD |
597 | if (r) { |
598 | DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", | |
599 | ring->idx, r); | |
600 | return r; | |
601 | } | |
602 | amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0)); | |
603 | amdgpu_ring_write(ring, 0xDEADBEEF); | |
a27de35c | 604 | amdgpu_ring_commit(ring); |
aaa36a97 AD |
605 | for (i = 0; i < adev->usec_timeout; i++) { |
606 | tmp = RREG32(mmUVD_CONTEXT_ID); | |
607 | if (tmp == 0xDEADBEEF) | |
608 | break; | |
609 | DRM_UDELAY(1); | |
610 | } | |
611 | ||
612 | if (i < adev->usec_timeout) { | |
613 | DRM_INFO("ring test on %d succeeded in %d usecs\n", | |
614 | ring->idx, i); | |
615 | } else { | |
616 | DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n", | |
617 | ring->idx, tmp); | |
618 | r = -EINVAL; | |
619 | } | |
620 | return r; | |
621 | } | |
622 | ||
623 | /** | |
624 | * uvd_v6_0_ring_emit_ib - execute indirect buffer | |
625 | * | |
626 | * @ring: amdgpu_ring pointer | |
627 | * @ib: indirect buffer to execute | |
628 | * | |
629 | * Write ring commands to execute the indirect buffer | |
630 | */ | |
631 | static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring, | |
632 | struct amdgpu_ib *ib) | |
633 | { | |
634 | amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0)); | |
635 | amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); | |
636 | amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH, 0)); | |
637 | amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); | |
638 | amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_SIZE, 0)); | |
639 | amdgpu_ring_write(ring, ib->length_dw); | |
640 | } | |
641 | ||
642 | /** | |
643 | * uvd_v6_0_ring_test_ib - test ib execution | |
644 | * | |
645 | * @ring: amdgpu_ring pointer | |
646 | * | |
647 | * Test if we can successfully execute an IB | |
648 | */ | |
649 | static int uvd_v6_0_ring_test_ib(struct amdgpu_ring *ring) | |
650 | { | |
0e3f154a | 651 | struct fence *fence = NULL; |
aaa36a97 AD |
652 | int r; |
653 | ||
654 | r = amdgpu_uvd_get_create_msg(ring, 1, NULL); | |
655 | if (r) { | |
656 | DRM_ERROR("amdgpu: failed to get create msg (%d).\n", r); | |
657 | goto error; | |
658 | } | |
659 | ||
d7af97db | 660 | r = amdgpu_uvd_get_destroy_msg(ring, 1, true, &fence); |
aaa36a97 AD |
661 | if (r) { |
662 | DRM_ERROR("amdgpu: failed to get destroy ib (%d).\n", r); | |
663 | goto error; | |
664 | } | |
665 | ||
0e3f154a | 666 | r = fence_wait(fence, false); |
aaa36a97 AD |
667 | if (r) { |
668 | DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); | |
669 | goto error; | |
670 | } | |
671 | DRM_INFO("ib test on ring %d succeeded\n", ring->idx); | |
672 | error: | |
0e3f154a | 673 | fence_put(fence); |
aaa36a97 AD |
674 | return r; |
675 | } | |
676 | ||
5fc3aeeb | 677 | static bool uvd_v6_0_is_idle(void *handle) |
aaa36a97 | 678 | { |
5fc3aeeb | 679 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
680 | ||
aaa36a97 AD |
681 | return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK); |
682 | } | |
683 | ||
5fc3aeeb | 684 | static int uvd_v6_0_wait_for_idle(void *handle) |
aaa36a97 AD |
685 | { |
686 | unsigned i; | |
5fc3aeeb | 687 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
aaa36a97 AD |
688 | |
689 | for (i = 0; i < adev->usec_timeout; i++) { | |
690 | if (!(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK)) | |
691 | return 0; | |
692 | } | |
693 | return -ETIMEDOUT; | |
694 | } | |
695 | ||
5fc3aeeb | 696 | static int uvd_v6_0_soft_reset(void *handle) |
aaa36a97 | 697 | { |
5fc3aeeb | 698 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
699 | ||
aaa36a97 AD |
700 | uvd_v6_0_stop(adev); |
701 | ||
702 | WREG32_P(mmSRBM_SOFT_RESET, SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK, | |
703 | ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK); | |
704 | mdelay(5); | |
705 | ||
706 | return uvd_v6_0_start(adev); | |
707 | } | |
708 | ||
aaa36a97 AD |
709 | static int uvd_v6_0_set_interrupt_state(struct amdgpu_device *adev, |
710 | struct amdgpu_irq_src *source, | |
711 | unsigned type, | |
712 | enum amdgpu_interrupt_state state) | |
713 | { | |
714 | // TODO | |
715 | return 0; | |
716 | } | |
717 | ||
718 | static int uvd_v6_0_process_interrupt(struct amdgpu_device *adev, | |
719 | struct amdgpu_irq_src *source, | |
720 | struct amdgpu_iv_entry *entry) | |
721 | { | |
722 | DRM_DEBUG("IH: UVD TRAP\n"); | |
723 | amdgpu_fence_process(&adev->uvd.ring); | |
724 | return 0; | |
725 | } | |
726 | ||
be3ecca7 TSD |
727 | static void uvd_v6_0_set_sw_clock_gating(struct amdgpu_device *adev) |
728 | { | |
729 | uint32_t data, data1, data2, suvd_flags; | |
730 | ||
731 | data = RREG32(mmUVD_CGC_CTRL); | |
732 | data1 = RREG32(mmUVD_SUVD_CGC_GATE); | |
733 | data2 = RREG32(mmUVD_SUVD_CGC_CTRL); | |
734 | ||
735 | data &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK | | |
736 | UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK); | |
737 | ||
738 | suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK | | |
739 | UVD_SUVD_CGC_GATE__SIT_MASK | | |
740 | UVD_SUVD_CGC_GATE__SMP_MASK | | |
741 | UVD_SUVD_CGC_GATE__SCM_MASK | | |
742 | UVD_SUVD_CGC_GATE__SDB_MASK; | |
743 | ||
744 | data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK | | |
745 | (1 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_GATE_DLY_TIMER)) | | |
746 | (4 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_OFF_DELAY)); | |
747 | ||
748 | data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK | | |
749 | UVD_CGC_CTRL__UDEC_CM_MODE_MASK | | |
750 | UVD_CGC_CTRL__UDEC_IT_MODE_MASK | | |
751 | UVD_CGC_CTRL__UDEC_DB_MODE_MASK | | |
752 | UVD_CGC_CTRL__UDEC_MP_MODE_MASK | | |
753 | UVD_CGC_CTRL__SYS_MODE_MASK | | |
754 | UVD_CGC_CTRL__UDEC_MODE_MASK | | |
755 | UVD_CGC_CTRL__MPEG2_MODE_MASK | | |
756 | UVD_CGC_CTRL__REGS_MODE_MASK | | |
757 | UVD_CGC_CTRL__RBC_MODE_MASK | | |
758 | UVD_CGC_CTRL__LMI_MC_MODE_MASK | | |
759 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK | | |
760 | UVD_CGC_CTRL__IDCT_MODE_MASK | | |
761 | UVD_CGC_CTRL__MPRD_MODE_MASK | | |
762 | UVD_CGC_CTRL__MPC_MODE_MASK | | |
763 | UVD_CGC_CTRL__LBSI_MODE_MASK | | |
764 | UVD_CGC_CTRL__LRBBM_MODE_MASK | | |
765 | UVD_CGC_CTRL__WCB_MODE_MASK | | |
766 | UVD_CGC_CTRL__VCPU_MODE_MASK | | |
767 | UVD_CGC_CTRL__JPEG_MODE_MASK | | |
768 | UVD_CGC_CTRL__SCPU_MODE_MASK | | |
769 | UVD_CGC_CTRL__JPEG2_MODE_MASK); | |
770 | data2 &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK | | |
771 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK | | |
772 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK | | |
773 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK | | |
774 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK); | |
775 | data1 |= suvd_flags; | |
776 | ||
777 | WREG32(mmUVD_CGC_CTRL, data); | |
778 | WREG32(mmUVD_CGC_GATE, 0); | |
779 | WREG32(mmUVD_SUVD_CGC_GATE, data1); | |
780 | WREG32(mmUVD_SUVD_CGC_CTRL, data2); | |
781 | } | |
782 | ||
783 | #if 0 | |
784 | static void uvd_v6_0_set_hw_clock_gating(struct amdgpu_device *adev) | |
785 | { | |
786 | uint32_t data, data1, cgc_flags, suvd_flags; | |
787 | ||
788 | data = RREG32(mmUVD_CGC_GATE); | |
789 | data1 = RREG32(mmUVD_SUVD_CGC_GATE); | |
790 | ||
791 | cgc_flags = UVD_CGC_GATE__SYS_MASK | | |
792 | UVD_CGC_GATE__UDEC_MASK | | |
793 | UVD_CGC_GATE__MPEG2_MASK | | |
794 | UVD_CGC_GATE__RBC_MASK | | |
795 | UVD_CGC_GATE__LMI_MC_MASK | | |
796 | UVD_CGC_GATE__IDCT_MASK | | |
797 | UVD_CGC_GATE__MPRD_MASK | | |
798 | UVD_CGC_GATE__MPC_MASK | | |
799 | UVD_CGC_GATE__LBSI_MASK | | |
800 | UVD_CGC_GATE__LRBBM_MASK | | |
801 | UVD_CGC_GATE__UDEC_RE_MASK | | |
802 | UVD_CGC_GATE__UDEC_CM_MASK | | |
803 | UVD_CGC_GATE__UDEC_IT_MASK | | |
804 | UVD_CGC_GATE__UDEC_DB_MASK | | |
805 | UVD_CGC_GATE__UDEC_MP_MASK | | |
806 | UVD_CGC_GATE__WCB_MASK | | |
807 | UVD_CGC_GATE__VCPU_MASK | | |
808 | UVD_CGC_GATE__SCPU_MASK | | |
809 | UVD_CGC_GATE__JPEG_MASK | | |
810 | UVD_CGC_GATE__JPEG2_MASK; | |
811 | ||
812 | suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK | | |
813 | UVD_SUVD_CGC_GATE__SIT_MASK | | |
814 | UVD_SUVD_CGC_GATE__SMP_MASK | | |
815 | UVD_SUVD_CGC_GATE__SCM_MASK | | |
816 | UVD_SUVD_CGC_GATE__SDB_MASK; | |
817 | ||
818 | data |= cgc_flags; | |
819 | data1 |= suvd_flags; | |
820 | ||
821 | WREG32(mmUVD_CGC_GATE, data); | |
822 | WREG32(mmUVD_SUVD_CGC_GATE, data1); | |
823 | } | |
824 | #endif | |
825 | ||
5fc3aeeb | 826 | static int uvd_v6_0_set_clockgating_state(void *handle, |
827 | enum amd_clockgating_state state) | |
aaa36a97 | 828 | { |
9b08a306 EH |
829 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
830 | bool enable = (state == AMD_CG_STATE_GATE) ? true : false; | |
be3ecca7 | 831 | static int curstate = -1; |
9b08a306 | 832 | |
e3b04bc7 | 833 | if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) |
9b08a306 EH |
834 | return 0; |
835 | ||
be3ecca7 TSD |
836 | if (curstate == state) |
837 | return 0; | |
838 | ||
839 | curstate = state; | |
9b08a306 | 840 | if (enable) { |
be3ecca7 TSD |
841 | /* disable HW gating and enable Sw gating */ |
842 | uvd_v6_0_set_sw_clock_gating(adev); | |
9b08a306 | 843 | } else { |
be3ecca7 TSD |
844 | /* wait for STATUS to clear */ |
845 | if (uvd_v6_0_wait_for_idle(handle)) | |
846 | return -EBUSY; | |
847 | ||
848 | /* enable HW gates because UVD is idle */ | |
849 | /* uvd_v6_0_set_hw_clock_gating(adev); */ | |
9b08a306 EH |
850 | } |
851 | ||
aaa36a97 AD |
852 | return 0; |
853 | } | |
854 | ||
5fc3aeeb | 855 | static int uvd_v6_0_set_powergating_state(void *handle, |
856 | enum amd_powergating_state state) | |
aaa36a97 AD |
857 | { |
858 | /* This doesn't actually powergate the UVD block. | |
859 | * That's done in the dpm code via the SMC. This | |
860 | * just re-inits the block as necessary. The actual | |
861 | * gating still happens in the dpm code. We should | |
862 | * revisit this when there is a cleaner line between | |
863 | * the smc and the hw blocks | |
864 | */ | |
5fc3aeeb | 865 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
866 | ||
e3b04bc7 | 867 | if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD)) |
b6df77fc AD |
868 | return 0; |
869 | ||
5fc3aeeb | 870 | if (state == AMD_PG_STATE_GATE) { |
aaa36a97 AD |
871 | uvd_v6_0_stop(adev); |
872 | return 0; | |
873 | } else { | |
874 | return uvd_v6_0_start(adev); | |
875 | } | |
876 | } | |
877 | ||
5fc3aeeb | 878 | const struct amd_ip_funcs uvd_v6_0_ip_funcs = { |
aaa36a97 AD |
879 | .early_init = uvd_v6_0_early_init, |
880 | .late_init = NULL, | |
881 | .sw_init = uvd_v6_0_sw_init, | |
882 | .sw_fini = uvd_v6_0_sw_fini, | |
883 | .hw_init = uvd_v6_0_hw_init, | |
884 | .hw_fini = uvd_v6_0_hw_fini, | |
885 | .suspend = uvd_v6_0_suspend, | |
886 | .resume = uvd_v6_0_resume, | |
887 | .is_idle = uvd_v6_0_is_idle, | |
888 | .wait_for_idle = uvd_v6_0_wait_for_idle, | |
889 | .soft_reset = uvd_v6_0_soft_reset, | |
aaa36a97 AD |
890 | .set_clockgating_state = uvd_v6_0_set_clockgating_state, |
891 | .set_powergating_state = uvd_v6_0_set_powergating_state, | |
892 | }; | |
893 | ||
894 | static const struct amdgpu_ring_funcs uvd_v6_0_ring_funcs = { | |
895 | .get_rptr = uvd_v6_0_ring_get_rptr, | |
896 | .get_wptr = uvd_v6_0_ring_get_wptr, | |
897 | .set_wptr = uvd_v6_0_ring_set_wptr, | |
898 | .parse_cs = amdgpu_uvd_ring_parse_cs, | |
899 | .emit_ib = uvd_v6_0_ring_emit_ib, | |
900 | .emit_fence = uvd_v6_0_ring_emit_fence, | |
aaa36a97 AD |
901 | .test_ring = uvd_v6_0_ring_test_ring, |
902 | .test_ib = uvd_v6_0_ring_test_ib, | |
edff0e28 | 903 | .insert_nop = amdgpu_ring_insert_nop, |
9e5d5309 | 904 | .pad_ib = amdgpu_ring_generic_pad_ib, |
aaa36a97 AD |
905 | }; |
906 | ||
907 | static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev) | |
908 | { | |
909 | adev->uvd.ring.funcs = &uvd_v6_0_ring_funcs; | |
910 | } | |
911 | ||
912 | static const struct amdgpu_irq_src_funcs uvd_v6_0_irq_funcs = { | |
913 | .set = uvd_v6_0_set_interrupt_state, | |
914 | .process = uvd_v6_0_process_interrupt, | |
915 | }; | |
916 | ||
917 | static void uvd_v6_0_set_irq_funcs(struct amdgpu_device *adev) | |
918 | { | |
919 | adev->uvd.irq.num_types = 1; | |
920 | adev->uvd.irq.funcs = &uvd_v6_0_irq_funcs; | |
921 | } |