]>
Commit | Line | Data |
---|---|---|
aaa36a97 AD |
1 | /* |
2 | * Copyright 2014 Advanced Micro Devices, Inc. | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice shall be included in | |
12 | * all copies or substantial portions of the Software. | |
13 | * | |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
20 | * OTHER DEALINGS IN THE SOFTWARE. | |
21 | * | |
22 | * Authors: Christian König <christian.koenig@amd.com> | |
23 | */ | |
24 | ||
25 | #include <linux/firmware.h> | |
26 | #include <drm/drmP.h> | |
27 | #include "amdgpu.h" | |
28 | #include "amdgpu_uvd.h" | |
29 | #include "vid.h" | |
30 | #include "uvd/uvd_5_0_d.h" | |
31 | #include "uvd/uvd_5_0_sh_mask.h" | |
32 | #include "oss/oss_2_0_d.h" | |
33 | #include "oss/oss_2_0_sh_mask.h" | |
d5b4e25d | 34 | #include "bif/bif_5_0_d.h" |
be3ecca7 | 35 | #include "vi.h" |
4be5097c RZ |
36 | #include "smu/smu_7_1_2_d.h" |
37 | #include "smu/smu_7_1_2_sh_mask.h" | |
091aec0b | 38 | #include "ivsrcid/ivsrcid_vislands30.h" |
aaa36a97 AD |
39 | |
40 | static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev); | |
41 | static void uvd_v5_0_set_irq_funcs(struct amdgpu_device *adev); | |
42 | static int uvd_v5_0_start(struct amdgpu_device *adev); | |
43 | static void uvd_v5_0_stop(struct amdgpu_device *adev); | |
809a6a62 RZ |
44 | static int uvd_v5_0_set_clockgating_state(void *handle, |
45 | enum amd_clockgating_state state); | |
46 | static void uvd_v5_0_enable_mgcg(struct amdgpu_device *adev, | |
47 | bool enable); | |
aaa36a97 AD |
48 | /** |
49 | * uvd_v5_0_ring_get_rptr - get read pointer | |
50 | * | |
51 | * @ring: amdgpu_ring pointer | |
52 | * | |
53 | * Returns the current hardware read pointer | |
54 | */ | |
536fbf94 | 55 | static uint64_t uvd_v5_0_ring_get_rptr(struct amdgpu_ring *ring) |
aaa36a97 AD |
56 | { |
57 | struct amdgpu_device *adev = ring->adev; | |
58 | ||
59 | return RREG32(mmUVD_RBC_RB_RPTR); | |
60 | } | |
61 | ||
62 | /** | |
63 | * uvd_v5_0_ring_get_wptr - get write pointer | |
64 | * | |
65 | * @ring: amdgpu_ring pointer | |
66 | * | |
67 | * Returns the current hardware write pointer | |
68 | */ | |
536fbf94 | 69 | static uint64_t uvd_v5_0_ring_get_wptr(struct amdgpu_ring *ring) |
aaa36a97 AD |
70 | { |
71 | struct amdgpu_device *adev = ring->adev; | |
72 | ||
73 | return RREG32(mmUVD_RBC_RB_WPTR); | |
74 | } | |
75 | ||
76 | /** | |
77 | * uvd_v5_0_ring_set_wptr - set write pointer | |
78 | * | |
79 | * @ring: amdgpu_ring pointer | |
80 | * | |
81 | * Commits the write pointer to the hardware | |
82 | */ | |
83 | static void uvd_v5_0_ring_set_wptr(struct amdgpu_ring *ring) | |
84 | { | |
85 | struct amdgpu_device *adev = ring->adev; | |
86 | ||
536fbf94 | 87 | WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr)); |
aaa36a97 AD |
88 | } |
89 | ||
5fc3aeeb | 90 | static int uvd_v5_0_early_init(void *handle) |
aaa36a97 | 91 | { |
5fc3aeeb | 92 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
2bb795f5 | 93 | adev->uvd.num_uvd_inst = 1; |
5fc3aeeb | 94 | |
aaa36a97 AD |
95 | uvd_v5_0_set_ring_funcs(adev); |
96 | uvd_v5_0_set_irq_funcs(adev); | |
97 | ||
98 | return 0; | |
99 | } | |
100 | ||
5fc3aeeb | 101 | static int uvd_v5_0_sw_init(void *handle) |
aaa36a97 AD |
102 | { |
103 | struct amdgpu_ring *ring; | |
5fc3aeeb | 104 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
aaa36a97 AD |
105 | int r; |
106 | ||
107 | /* UVD TRAP */ | |
091aec0b | 108 | r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_UVD_SYSTEM_MESSAGE, &adev->uvd.inst->irq); |
aaa36a97 AD |
109 | if (r) |
110 | return r; | |
111 | ||
112 | r = amdgpu_uvd_sw_init(adev); | |
113 | if (r) | |
114 | return r; | |
115 | ||
116 | r = amdgpu_uvd_resume(adev); | |
117 | if (r) | |
118 | return r; | |
119 | ||
2bb795f5 | 120 | ring = &adev->uvd.inst->ring; |
aaa36a97 | 121 | sprintf(ring->name, "uvd"); |
2bb795f5 | 122 | r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0); |
33d5bd07 ED |
123 | if (r) |
124 | return r; | |
125 | ||
126 | r = amdgpu_uvd_entity_init(adev); | |
aaa36a97 AD |
127 | |
128 | return r; | |
129 | } | |
130 | ||
5fc3aeeb | 131 | static int uvd_v5_0_sw_fini(void *handle) |
aaa36a97 AD |
132 | { |
133 | int r; | |
5fc3aeeb | 134 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
aaa36a97 AD |
135 | |
136 | r = amdgpu_uvd_suspend(adev); | |
137 | if (r) | |
138 | return r; | |
139 | ||
50237287 | 140 | return amdgpu_uvd_sw_fini(adev); |
aaa36a97 AD |
141 | } |
142 | ||
143 | /** | |
144 | * uvd_v5_0_hw_init - start and test UVD block | |
145 | * | |
146 | * @adev: amdgpu_device pointer | |
147 | * | |
148 | * Initialize the hardware, boot up the VCPU and do some testing | |
149 | */ | |
5fc3aeeb | 150 | static int uvd_v5_0_hw_init(void *handle) |
aaa36a97 | 151 | { |
5fc3aeeb | 152 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
2bb795f5 | 153 | struct amdgpu_ring *ring = &adev->uvd.inst->ring; |
aaa36a97 AD |
154 | uint32_t tmp; |
155 | int r; | |
156 | ||
e3e672e6 RZ |
157 | amdgpu_asic_set_uvd_clocks(adev, 10000, 10000); |
158 | uvd_v5_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE); | |
159 | uvd_v5_0_enable_mgcg(adev, true); | |
aaa36a97 AD |
160 | |
161 | ring->ready = true; | |
162 | r = amdgpu_ring_test_ring(ring); | |
163 | if (r) { | |
164 | ring->ready = false; | |
165 | goto done; | |
166 | } | |
167 | ||
a27de35c | 168 | r = amdgpu_ring_alloc(ring, 10); |
aaa36a97 AD |
169 | if (r) { |
170 | DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r); | |
171 | goto done; | |
172 | } | |
173 | ||
174 | tmp = PACKET0(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0); | |
175 | amdgpu_ring_write(ring, tmp); | |
176 | amdgpu_ring_write(ring, 0xFFFFF); | |
177 | ||
178 | tmp = PACKET0(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0); | |
179 | amdgpu_ring_write(ring, tmp); | |
180 | amdgpu_ring_write(ring, 0xFFFFF); | |
181 | ||
182 | tmp = PACKET0(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0); | |
183 | amdgpu_ring_write(ring, tmp); | |
184 | amdgpu_ring_write(ring, 0xFFFFF); | |
185 | ||
186 | /* Clear timeout status bits */ | |
187 | amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_TIMEOUT_STATUS, 0)); | |
188 | amdgpu_ring_write(ring, 0x8); | |
189 | ||
190 | amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CNTL, 0)); | |
191 | amdgpu_ring_write(ring, 3); | |
192 | ||
a27de35c | 193 | amdgpu_ring_commit(ring); |
e3e672e6 | 194 | |
aaa36a97 | 195 | done: |
aaa36a97 AD |
196 | if (!r) |
197 | DRM_INFO("UVD initialized successfully.\n"); | |
198 | ||
199 | return r; | |
e3e672e6 | 200 | |
aaa36a97 AD |
201 | } |
202 | ||
203 | /** | |
204 | * uvd_v5_0_hw_fini - stop the hardware block | |
205 | * | |
206 | * @adev: amdgpu_device pointer | |
207 | * | |
208 | * Stop the UVD block, mark ring as not ready any more | |
209 | */ | |
5fc3aeeb | 210 | static int uvd_v5_0_hw_fini(void *handle) |
aaa36a97 | 211 | { |
5fc3aeeb | 212 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
2bb795f5 | 213 | struct amdgpu_ring *ring = &adev->uvd.inst->ring; |
aaa36a97 | 214 | |
e3e672e6 RZ |
215 | if (RREG32(mmUVD_STATUS) != 0) |
216 | uvd_v5_0_stop(adev); | |
217 | ||
aaa36a97 AD |
218 | ring->ready = false; |
219 | ||
220 | return 0; | |
221 | } | |
222 | ||
5fc3aeeb | 223 | static int uvd_v5_0_suspend(void *handle) |
aaa36a97 AD |
224 | { |
225 | int r; | |
5fc3aeeb | 226 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
aaa36a97 | 227 | |
3f99dd81 | 228 | r = uvd_v5_0_hw_fini(adev); |
aaa36a97 AD |
229 | if (r) |
230 | return r; | |
809a6a62 | 231 | uvd_v5_0_set_clockgating_state(adev, AMD_CG_STATE_GATE); |
aaa36a97 | 232 | |
50237287 | 233 | return amdgpu_uvd_suspend(adev); |
aaa36a97 AD |
234 | } |
235 | ||
5fc3aeeb | 236 | static int uvd_v5_0_resume(void *handle) |
aaa36a97 AD |
237 | { |
238 | int r; | |
5fc3aeeb | 239 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
aaa36a97 AD |
240 | |
241 | r = amdgpu_uvd_resume(adev); | |
242 | if (r) | |
243 | return r; | |
244 | ||
50237287 | 245 | return uvd_v5_0_hw_init(adev); |
aaa36a97 AD |
246 | } |
247 | ||
248 | /** | |
249 | * uvd_v5_0_mc_resume - memory controller programming | |
250 | * | |
251 | * @adev: amdgpu_device pointer | |
252 | * | |
253 | * Let the UVD memory controller know it's offsets | |
254 | */ | |
255 | static void uvd_v5_0_mc_resume(struct amdgpu_device *adev) | |
256 | { | |
257 | uint64_t offset; | |
258 | uint32_t size; | |
259 | ||
260 | /* programm memory controller bits 0-27 */ | |
261 | WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, | |
2bb795f5 | 262 | lower_32_bits(adev->uvd.inst->gpu_addr)); |
aaa36a97 | 263 | WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, |
2bb795f5 | 264 | upper_32_bits(adev->uvd.inst->gpu_addr)); |
aaa36a97 AD |
265 | |
266 | offset = AMDGPU_UVD_FIRMWARE_OFFSET; | |
c1fe75c9 | 267 | size = AMDGPU_UVD_FIRMWARE_SIZE(adev); |
aaa36a97 AD |
268 | WREG32(mmUVD_VCPU_CACHE_OFFSET0, offset >> 3); |
269 | WREG32(mmUVD_VCPU_CACHE_SIZE0, size); | |
270 | ||
271 | offset += size; | |
c0365541 | 272 | size = AMDGPU_UVD_HEAP_SIZE; |
aaa36a97 AD |
273 | WREG32(mmUVD_VCPU_CACHE_OFFSET1, offset >> 3); |
274 | WREG32(mmUVD_VCPU_CACHE_SIZE1, size); | |
275 | ||
276 | offset += size; | |
c0365541 AN |
277 | size = AMDGPU_UVD_STACK_SIZE + |
278 | (AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles); | |
aaa36a97 AD |
279 | WREG32(mmUVD_VCPU_CACHE_OFFSET2, offset >> 3); |
280 | WREG32(mmUVD_VCPU_CACHE_SIZE2, size); | |
549300ce AD |
281 | |
282 | WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config); | |
283 | WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, adev->gfx.config.gb_addr_config); | |
284 | WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, adev->gfx.config.gb_addr_config); | |
aaa36a97 AD |
285 | } |
286 | ||
287 | /** | |
288 | * uvd_v5_0_start - start UVD block | |
289 | * | |
290 | * @adev: amdgpu_device pointer | |
291 | * | |
292 | * Setup and start the UVD block | |
293 | */ | |
294 | static int uvd_v5_0_start(struct amdgpu_device *adev) | |
295 | { | |
2bb795f5 | 296 | struct amdgpu_ring *ring = &adev->uvd.inst->ring; |
aaa36a97 AD |
297 | uint32_t rb_bufsz, tmp; |
298 | uint32_t lmi_swap_cntl; | |
299 | uint32_t mp_swap_cntl; | |
300 | int i, j, r; | |
301 | ||
302 | /*disable DPG */ | |
303 | WREG32_P(mmUVD_POWER_STATUS, 0, ~(1 << 2)); | |
304 | ||
305 | /* disable byte swapping */ | |
306 | lmi_swap_cntl = 0; | |
307 | mp_swap_cntl = 0; | |
308 | ||
309 | uvd_v5_0_mc_resume(adev); | |
310 | ||
aaa36a97 AD |
311 | /* disable interupt */ |
312 | WREG32_P(mmUVD_MASTINT_EN, 0, ~(1 << 1)); | |
313 | ||
314 | /* stall UMC and register bus before resetting VCPU */ | |
315 | WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8)); | |
316 | mdelay(1); | |
317 | ||
318 | /* put LMI, VCPU, RBC etc... into reset */ | |
319 | WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__LMI_SOFT_RESET_MASK | | |
320 | UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK | UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK | | |
321 | UVD_SOFT_RESET__RBC_SOFT_RESET_MASK | UVD_SOFT_RESET__CSM_SOFT_RESET_MASK | | |
322 | UVD_SOFT_RESET__CXW_SOFT_RESET_MASK | UVD_SOFT_RESET__TAP_SOFT_RESET_MASK | | |
323 | UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK); | |
324 | mdelay(5); | |
325 | ||
326 | /* take UVD block out of reset */ | |
327 | WREG32_P(mmSRBM_SOFT_RESET, 0, ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK); | |
328 | mdelay(5); | |
329 | ||
330 | /* initialize UVD memory controller */ | |
331 | WREG32(mmUVD_LMI_CTRL, 0x40 | (1 << 8) | (1 << 13) | | |
332 | (1 << 21) | (1 << 9) | (1 << 20)); | |
333 | ||
334 | #ifdef __BIG_ENDIAN | |
335 | /* swap (8 in 32) RB and IB */ | |
336 | lmi_swap_cntl = 0xa; | |
337 | mp_swap_cntl = 0; | |
338 | #endif | |
339 | WREG32(mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl); | |
340 | WREG32(mmUVD_MP_SWAP_CNTL, mp_swap_cntl); | |
341 | ||
342 | WREG32(mmUVD_MPC_SET_MUXA0, 0x40c2040); | |
343 | WREG32(mmUVD_MPC_SET_MUXA1, 0x0); | |
344 | WREG32(mmUVD_MPC_SET_MUXB0, 0x40c2040); | |
345 | WREG32(mmUVD_MPC_SET_MUXB1, 0x0); | |
346 | WREG32(mmUVD_MPC_SET_ALU, 0); | |
347 | WREG32(mmUVD_MPC_SET_MUX, 0x88); | |
348 | ||
349 | /* take all subblocks out of reset, except VCPU */ | |
350 | WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); | |
351 | mdelay(5); | |
352 | ||
353 | /* enable VCPU clock */ | |
354 | WREG32(mmUVD_VCPU_CNTL, 1 << 9); | |
355 | ||
356 | /* enable UMC */ | |
357 | WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8)); | |
358 | ||
359 | /* boot up the VCPU */ | |
360 | WREG32(mmUVD_SOFT_RESET, 0); | |
361 | mdelay(10); | |
362 | ||
363 | for (i = 0; i < 10; ++i) { | |
364 | uint32_t status; | |
365 | for (j = 0; j < 100; ++j) { | |
366 | status = RREG32(mmUVD_STATUS); | |
367 | if (status & 2) | |
368 | break; | |
369 | mdelay(10); | |
370 | } | |
371 | r = 0; | |
372 | if (status & 2) | |
373 | break; | |
374 | ||
375 | DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n"); | |
376 | WREG32_P(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK, | |
377 | ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); | |
378 | mdelay(10); | |
379 | WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); | |
380 | mdelay(10); | |
381 | r = -1; | |
382 | } | |
383 | ||
384 | if (r) { | |
385 | DRM_ERROR("UVD not responding, giving up!!!\n"); | |
386 | return r; | |
387 | } | |
388 | /* enable master interrupt */ | |
389 | WREG32_P(mmUVD_MASTINT_EN, 3 << 1, ~(3 << 1)); | |
390 | ||
391 | /* clear the bit 4 of UVD_STATUS */ | |
392 | WREG32_P(mmUVD_STATUS, 0, ~(2 << 1)); | |
393 | ||
394 | rb_bufsz = order_base_2(ring->ring_size); | |
395 | tmp = 0; | |
396 | tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz); | |
397 | tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1); | |
398 | tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1); | |
399 | tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0); | |
400 | tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1); | |
401 | tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1); | |
402 | /* force RBC into idle state */ | |
403 | WREG32(mmUVD_RBC_RB_CNTL, tmp); | |
404 | ||
405 | /* set the write pointer delay */ | |
406 | WREG32(mmUVD_RBC_RB_WPTR_CNTL, 0); | |
407 | ||
408 | /* set the wb address */ | |
409 | WREG32(mmUVD_RBC_RB_RPTR_ADDR, (upper_32_bits(ring->gpu_addr) >> 2)); | |
410 | ||
411 | /* programm the RB_BASE for ring buffer */ | |
412 | WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_LOW, | |
413 | lower_32_bits(ring->gpu_addr)); | |
414 | WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH, | |
415 | upper_32_bits(ring->gpu_addr)); | |
416 | ||
417 | /* Initialize the ring buffer's read and write pointers */ | |
418 | WREG32(mmUVD_RBC_RB_RPTR, 0); | |
419 | ||
420 | ring->wptr = RREG32(mmUVD_RBC_RB_RPTR); | |
536fbf94 | 421 | WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr)); |
aaa36a97 AD |
422 | |
423 | WREG32_P(mmUVD_RBC_RB_CNTL, 0, ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK); | |
424 | ||
425 | return 0; | |
426 | } | |
427 | ||
428 | /** | |
429 | * uvd_v5_0_stop - stop UVD block | |
430 | * | |
431 | * @adev: amdgpu_device pointer | |
432 | * | |
433 | * stop the UVD block | |
434 | */ | |
435 | static void uvd_v5_0_stop(struct amdgpu_device *adev) | |
436 | { | |
437 | /* force RBC into idle state */ | |
438 | WREG32(mmUVD_RBC_RB_CNTL, 0x11010101); | |
439 | ||
440 | /* Stall UMC and register bus before resetting VCPU */ | |
441 | WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8)); | |
442 | mdelay(1); | |
443 | ||
444 | /* put VCPU into reset */ | |
445 | WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); | |
446 | mdelay(5); | |
447 | ||
448 | /* disable VCPU clock */ | |
449 | WREG32(mmUVD_VCPU_CNTL, 0x0); | |
450 | ||
451 | /* Unstall UMC and register bus */ | |
452 | WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8)); | |
e3e672e6 RZ |
453 | |
454 | WREG32(mmUVD_STATUS, 0); | |
aaa36a97 AD |
455 | } |
456 | ||
457 | /** | |
458 | * uvd_v5_0_ring_emit_fence - emit an fence & trap command | |
459 | * | |
460 | * @ring: amdgpu_ring pointer | |
461 | * @fence: fence to emit | |
462 | * | |
463 | * Write a fence and a trap command to the ring. | |
464 | */ | |
465 | static void uvd_v5_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, | |
890ee23f | 466 | unsigned flags) |
aaa36a97 | 467 | { |
890ee23f | 468 | WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT); |
aaa36a97 AD |
469 | |
470 | amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0)); | |
471 | amdgpu_ring_write(ring, seq); | |
472 | amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0)); | |
473 | amdgpu_ring_write(ring, addr & 0xffffffff); | |
474 | amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0)); | |
475 | amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff); | |
476 | amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0)); | |
477 | amdgpu_ring_write(ring, 0); | |
478 | ||
479 | amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0)); | |
480 | amdgpu_ring_write(ring, 0); | |
481 | amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0)); | |
482 | amdgpu_ring_write(ring, 0); | |
483 | amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0)); | |
484 | amdgpu_ring_write(ring, 2); | |
485 | } | |
486 | ||
aaa36a97 AD |
487 | /** |
488 | * uvd_v5_0_ring_test_ring - register write test | |
489 | * | |
490 | * @ring: amdgpu_ring pointer | |
491 | * | |
492 | * Test if we can successfully write to the context register | |
493 | */ | |
494 | static int uvd_v5_0_ring_test_ring(struct amdgpu_ring *ring) | |
495 | { | |
496 | struct amdgpu_device *adev = ring->adev; | |
497 | uint32_t tmp = 0; | |
498 | unsigned i; | |
499 | int r; | |
500 | ||
501 | WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD); | |
a27de35c | 502 | r = amdgpu_ring_alloc(ring, 3); |
aaa36a97 AD |
503 | if (r) { |
504 | DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", | |
505 | ring->idx, r); | |
506 | return r; | |
507 | } | |
508 | amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0)); | |
509 | amdgpu_ring_write(ring, 0xDEADBEEF); | |
a27de35c | 510 | amdgpu_ring_commit(ring); |
aaa36a97 AD |
511 | for (i = 0; i < adev->usec_timeout; i++) { |
512 | tmp = RREG32(mmUVD_CONTEXT_ID); | |
513 | if (tmp == 0xDEADBEEF) | |
514 | break; | |
515 | DRM_UDELAY(1); | |
516 | } | |
517 | ||
518 | if (i < adev->usec_timeout) { | |
9953b72f | 519 | DRM_DEBUG("ring test on %d succeeded in %d usecs\n", |
aaa36a97 AD |
520 | ring->idx, i); |
521 | } else { | |
522 | DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n", | |
523 | ring->idx, tmp); | |
524 | r = -EINVAL; | |
525 | } | |
526 | return r; | |
527 | } | |
528 | ||
529 | /** | |
530 | * uvd_v5_0_ring_emit_ib - execute indirect buffer | |
531 | * | |
532 | * @ring: amdgpu_ring pointer | |
533 | * @ib: indirect buffer to execute | |
534 | * | |
535 | * Write ring commands to execute the indirect buffer | |
536 | */ | |
537 | static void uvd_v5_0_ring_emit_ib(struct amdgpu_ring *ring, | |
d88bf583 | 538 | struct amdgpu_ib *ib, |
c4f46f22 | 539 | unsigned vmid, bool ctx_switch) |
aaa36a97 AD |
540 | { |
541 | amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0)); | |
542 | amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); | |
543 | amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH, 0)); | |
544 | amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); | |
545 | amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_SIZE, 0)); | |
546 | amdgpu_ring_write(ring, ib->length_dw); | |
547 | } | |
548 | ||
0232e306 LL |
549 | static void uvd_v5_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) |
550 | { | |
551 | int i; | |
552 | ||
553 | WARN_ON(ring->wptr % 2 || count % 2); | |
554 | ||
555 | for (i = 0; i < count / 2; i++) { | |
556 | amdgpu_ring_write(ring, PACKET0(mmUVD_NO_OP, 0)); | |
557 | amdgpu_ring_write(ring, 0); | |
558 | } | |
559 | } | |
560 | ||
5fc3aeeb | 561 | static bool uvd_v5_0_is_idle(void *handle) |
aaa36a97 | 562 | { |
5fc3aeeb | 563 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
564 | ||
aaa36a97 AD |
565 | return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK); |
566 | } | |
567 | ||
5fc3aeeb | 568 | static int uvd_v5_0_wait_for_idle(void *handle) |
aaa36a97 AD |
569 | { |
570 | unsigned i; | |
5fc3aeeb | 571 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
aaa36a97 AD |
572 | |
573 | for (i = 0; i < adev->usec_timeout; i++) { | |
574 | if (!(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK)) | |
575 | return 0; | |
576 | } | |
577 | return -ETIMEDOUT; | |
578 | } | |
579 | ||
5fc3aeeb | 580 | static int uvd_v5_0_soft_reset(void *handle) |
aaa36a97 | 581 | { |
5fc3aeeb | 582 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
583 | ||
aaa36a97 AD |
584 | uvd_v5_0_stop(adev); |
585 | ||
586 | WREG32_P(mmSRBM_SOFT_RESET, SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK, | |
587 | ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK); | |
588 | mdelay(5); | |
589 | ||
590 | return uvd_v5_0_start(adev); | |
591 | } | |
592 | ||
aaa36a97 AD |
593 | static int uvd_v5_0_set_interrupt_state(struct amdgpu_device *adev, |
594 | struct amdgpu_irq_src *source, | |
595 | unsigned type, | |
596 | enum amdgpu_interrupt_state state) | |
597 | { | |
598 | // TODO | |
599 | return 0; | |
600 | } | |
601 | ||
602 | static int uvd_v5_0_process_interrupt(struct amdgpu_device *adev, | |
603 | struct amdgpu_irq_src *source, | |
604 | struct amdgpu_iv_entry *entry) | |
605 | { | |
606 | DRM_DEBUG("IH: UVD TRAP\n"); | |
2bb795f5 | 607 | amdgpu_fence_process(&adev->uvd.inst->ring); |
aaa36a97 AD |
608 | return 0; |
609 | } | |
610 | ||
809a6a62 | 611 | static void uvd_v5_0_enable_clock_gating(struct amdgpu_device *adev, bool enable) |
be3ecca7 | 612 | { |
809a6a62 | 613 | uint32_t data1, data3, suvd_flags; |
be3ecca7 | 614 | |
be3ecca7 | 615 | data1 = RREG32(mmUVD_SUVD_CGC_GATE); |
809a6a62 | 616 | data3 = RREG32(mmUVD_CGC_GATE); |
be3ecca7 TSD |
617 | |
618 | suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK | | |
619 | UVD_SUVD_CGC_GATE__SIT_MASK | | |
620 | UVD_SUVD_CGC_GATE__SMP_MASK | | |
621 | UVD_SUVD_CGC_GATE__SCM_MASK | | |
622 | UVD_SUVD_CGC_GATE__SDB_MASK; | |
623 | ||
809a6a62 | 624 | if (enable) { |
3c3a7e61 | 625 | data3 |= (UVD_CGC_GATE__SYS_MASK | |
809a6a62 RZ |
626 | UVD_CGC_GATE__UDEC_MASK | |
627 | UVD_CGC_GATE__MPEG2_MASK | | |
628 | UVD_CGC_GATE__RBC_MASK | | |
629 | UVD_CGC_GATE__LMI_MC_MASK | | |
630 | UVD_CGC_GATE__IDCT_MASK | | |
631 | UVD_CGC_GATE__MPRD_MASK | | |
632 | UVD_CGC_GATE__MPC_MASK | | |
633 | UVD_CGC_GATE__LBSI_MASK | | |
634 | UVD_CGC_GATE__LRBBM_MASK | | |
635 | UVD_CGC_GATE__UDEC_RE_MASK | | |
636 | UVD_CGC_GATE__UDEC_CM_MASK | | |
637 | UVD_CGC_GATE__UDEC_IT_MASK | | |
638 | UVD_CGC_GATE__UDEC_DB_MASK | | |
639 | UVD_CGC_GATE__UDEC_MP_MASK | | |
640 | UVD_CGC_GATE__WCB_MASK | | |
809a6a62 RZ |
641 | UVD_CGC_GATE__JPEG_MASK | |
642 | UVD_CGC_GATE__SCPU_MASK); | |
3c3a7e61 RZ |
643 | /* only in pg enabled, we can gate clock to vcpu*/ |
644 | if (adev->pg_flags & AMD_PG_SUPPORT_UVD) | |
645 | data3 |= UVD_CGC_GATE__VCPU_MASK; | |
809a6a62 RZ |
646 | data3 &= ~UVD_CGC_GATE__REGS_MASK; |
647 | data1 |= suvd_flags; | |
648 | } else { | |
649 | data3 = 0; | |
650 | data1 = 0; | |
651 | } | |
652 | ||
653 | WREG32(mmUVD_SUVD_CGC_GATE, data1); | |
654 | WREG32(mmUVD_CGC_GATE, data3); | |
655 | } | |
656 | ||
657 | static void uvd_v5_0_set_sw_clock_gating(struct amdgpu_device *adev) | |
658 | { | |
659 | uint32_t data, data2; | |
660 | ||
661 | data = RREG32(mmUVD_CGC_CTRL); | |
662 | data2 = RREG32(mmUVD_SUVD_CGC_CTRL); | |
663 | ||
664 | ||
665 | data &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK | | |
666 | UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK); | |
667 | ||
668 | ||
be3ecca7 TSD |
669 | data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK | |
670 | (1 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_GATE_DLY_TIMER)) | | |
671 | (4 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_OFF_DELAY)); | |
672 | ||
673 | data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK | | |
674 | UVD_CGC_CTRL__UDEC_CM_MODE_MASK | | |
675 | UVD_CGC_CTRL__UDEC_IT_MODE_MASK | | |
676 | UVD_CGC_CTRL__UDEC_DB_MODE_MASK | | |
677 | UVD_CGC_CTRL__UDEC_MP_MODE_MASK | | |
678 | UVD_CGC_CTRL__SYS_MODE_MASK | | |
679 | UVD_CGC_CTRL__UDEC_MODE_MASK | | |
680 | UVD_CGC_CTRL__MPEG2_MODE_MASK | | |
681 | UVD_CGC_CTRL__REGS_MODE_MASK | | |
682 | UVD_CGC_CTRL__RBC_MODE_MASK | | |
683 | UVD_CGC_CTRL__LMI_MC_MODE_MASK | | |
684 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK | | |
685 | UVD_CGC_CTRL__IDCT_MODE_MASK | | |
686 | UVD_CGC_CTRL__MPRD_MODE_MASK | | |
687 | UVD_CGC_CTRL__MPC_MODE_MASK | | |
688 | UVD_CGC_CTRL__LBSI_MODE_MASK | | |
689 | UVD_CGC_CTRL__LRBBM_MODE_MASK | | |
690 | UVD_CGC_CTRL__WCB_MODE_MASK | | |
691 | UVD_CGC_CTRL__VCPU_MODE_MASK | | |
692 | UVD_CGC_CTRL__JPEG_MODE_MASK | | |
693 | UVD_CGC_CTRL__SCPU_MODE_MASK); | |
694 | data2 &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK | | |
695 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK | | |
696 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK | | |
697 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK | | |
698 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK); | |
be3ecca7 TSD |
699 | |
700 | WREG32(mmUVD_CGC_CTRL, data); | |
be3ecca7 TSD |
701 | WREG32(mmUVD_SUVD_CGC_CTRL, data2); |
702 | } | |
703 | ||
704 | #if 0 | |
705 | static void uvd_v5_0_set_hw_clock_gating(struct amdgpu_device *adev) | |
706 | { | |
707 | uint32_t data, data1, cgc_flags, suvd_flags; | |
708 | ||
709 | data = RREG32(mmUVD_CGC_GATE); | |
710 | data1 = RREG32(mmUVD_SUVD_CGC_GATE); | |
711 | ||
712 | cgc_flags = UVD_CGC_GATE__SYS_MASK | | |
713 | UVD_CGC_GATE__UDEC_MASK | | |
714 | UVD_CGC_GATE__MPEG2_MASK | | |
715 | UVD_CGC_GATE__RBC_MASK | | |
716 | UVD_CGC_GATE__LMI_MC_MASK | | |
717 | UVD_CGC_GATE__IDCT_MASK | | |
718 | UVD_CGC_GATE__MPRD_MASK | | |
719 | UVD_CGC_GATE__MPC_MASK | | |
720 | UVD_CGC_GATE__LBSI_MASK | | |
721 | UVD_CGC_GATE__LRBBM_MASK | | |
722 | UVD_CGC_GATE__UDEC_RE_MASK | | |
723 | UVD_CGC_GATE__UDEC_CM_MASK | | |
724 | UVD_CGC_GATE__UDEC_IT_MASK | | |
725 | UVD_CGC_GATE__UDEC_DB_MASK | | |
726 | UVD_CGC_GATE__UDEC_MP_MASK | | |
727 | UVD_CGC_GATE__WCB_MASK | | |
728 | UVD_CGC_GATE__VCPU_MASK | | |
729 | UVD_CGC_GATE__SCPU_MASK; | |
730 | ||
731 | suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK | | |
732 | UVD_SUVD_CGC_GATE__SIT_MASK | | |
733 | UVD_SUVD_CGC_GATE__SMP_MASK | | |
734 | UVD_SUVD_CGC_GATE__SCM_MASK | | |
735 | UVD_SUVD_CGC_GATE__SDB_MASK; | |
736 | ||
737 | data |= cgc_flags; | |
738 | data1 |= suvd_flags; | |
739 | ||
740 | WREG32(mmUVD_CGC_GATE, data); | |
741 | WREG32(mmUVD_SUVD_CGC_GATE, data1); | |
742 | } | |
743 | #endif | |
744 | ||
809a6a62 RZ |
745 | static void uvd_v5_0_enable_mgcg(struct amdgpu_device *adev, |
746 | bool enable) | |
747 | { | |
748 | u32 orig, data; | |
749 | ||
750 | if (enable && (adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) { | |
751 | data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL); | |
752 | data |= 0xfff; | |
753 | WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data); | |
754 | ||
755 | orig = data = RREG32(mmUVD_CGC_CTRL); | |
756 | data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK; | |
757 | if (orig != data) | |
758 | WREG32(mmUVD_CGC_CTRL, data); | |
759 | } else { | |
760 | data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL); | |
761 | data &= ~0xfff; | |
762 | WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data); | |
763 | ||
764 | orig = data = RREG32(mmUVD_CGC_CTRL); | |
765 | data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK; | |
766 | if (orig != data) | |
767 | WREG32(mmUVD_CGC_CTRL, data); | |
768 | } | |
769 | } | |
4be5097c | 770 | |
5fc3aeeb | 771 | static int uvd_v5_0_set_clockgating_state(void *handle, |
772 | enum amd_clockgating_state state) | |
aaa36a97 | 773 | { |
35e5912d | 774 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
be3ecca7 | 775 | bool enable = (state == AMD_CG_STATE_GATE) ? true : false; |
35e5912d | 776 | |
be3ecca7 | 777 | if (enable) { |
be3ecca7 TSD |
778 | /* wait for STATUS to clear */ |
779 | if (uvd_v5_0_wait_for_idle(handle)) | |
780 | return -EBUSY; | |
809a6a62 | 781 | uvd_v5_0_enable_clock_gating(adev, true); |
be3ecca7 TSD |
782 | |
783 | /* enable HW gates because UVD is idle */ | |
784 | /* uvd_v5_0_set_hw_clock_gating(adev); */ | |
809a6a62 RZ |
785 | } else { |
786 | uvd_v5_0_enable_clock_gating(adev, false); | |
be3ecca7 TSD |
787 | } |
788 | ||
809a6a62 | 789 | uvd_v5_0_set_sw_clock_gating(adev); |
aaa36a97 AD |
790 | return 0; |
791 | } | |
792 | ||
5fc3aeeb | 793 | static int uvd_v5_0_set_powergating_state(void *handle, |
794 | enum amd_powergating_state state) | |
aaa36a97 AD |
795 | { |
796 | /* This doesn't actually powergate the UVD block. | |
797 | * That's done in the dpm code via the SMC. This | |
798 | * just re-inits the block as necessary. The actual | |
799 | * gating still happens in the dpm code. We should | |
800 | * revisit this when there is a cleaner line between | |
801 | * the smc and the hw blocks | |
802 | */ | |
5fc3aeeb | 803 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
c8781f56 | 804 | int ret = 0; |
5fc3aeeb | 805 | |
806 | if (state == AMD_PG_STATE_GATE) { | |
aaa36a97 | 807 | uvd_v5_0_stop(adev); |
aaa36a97 | 808 | } else { |
c8781f56 HR |
809 | ret = uvd_v5_0_start(adev); |
810 | if (ret) | |
811 | goto out; | |
c8781f56 HR |
812 | } |
813 | ||
814 | out: | |
815 | return ret; | |
816 | } | |
817 | ||
818 | static void uvd_v5_0_get_clockgating_state(void *handle, u32 *flags) | |
819 | { | |
820 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
821 | int data; | |
822 | ||
823 | mutex_lock(&adev->pm.mutex); | |
824 | ||
254cd2e0 RZ |
825 | if (RREG32_SMC(ixCURRENT_PG_STATUS) & |
826 | CURRENT_PG_STATUS__UVD_PG_STATUS_MASK) { | |
c8781f56 HR |
827 | DRM_INFO("Cannot get clockgating state when UVD is powergated.\n"); |
828 | goto out; | |
aaa36a97 | 829 | } |
c8781f56 HR |
830 | |
831 | /* AMD_CG_SUPPORT_UVD_MGCG */ | |
832 | data = RREG32(mmUVD_CGC_CTRL); | |
833 | if (data & UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK) | |
834 | *flags |= AMD_CG_SUPPORT_UVD_MGCG; | |
835 | ||
836 | out: | |
837 | mutex_unlock(&adev->pm.mutex); | |
aaa36a97 AD |
838 | } |
839 | ||
a1255107 | 840 | static const struct amd_ip_funcs uvd_v5_0_ip_funcs = { |
88a907d6 | 841 | .name = "uvd_v5_0", |
aaa36a97 AD |
842 | .early_init = uvd_v5_0_early_init, |
843 | .late_init = NULL, | |
844 | .sw_init = uvd_v5_0_sw_init, | |
845 | .sw_fini = uvd_v5_0_sw_fini, | |
846 | .hw_init = uvd_v5_0_hw_init, | |
847 | .hw_fini = uvd_v5_0_hw_fini, | |
848 | .suspend = uvd_v5_0_suspend, | |
849 | .resume = uvd_v5_0_resume, | |
850 | .is_idle = uvd_v5_0_is_idle, | |
851 | .wait_for_idle = uvd_v5_0_wait_for_idle, | |
852 | .soft_reset = uvd_v5_0_soft_reset, | |
aaa36a97 AD |
853 | .set_clockgating_state = uvd_v5_0_set_clockgating_state, |
854 | .set_powergating_state = uvd_v5_0_set_powergating_state, | |
c8781f56 | 855 | .get_clockgating_state = uvd_v5_0_get_clockgating_state, |
aaa36a97 AD |
856 | }; |
857 | ||
858 | static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = { | |
21cd942e | 859 | .type = AMDGPU_RING_TYPE_UVD, |
79887142 | 860 | .align_mask = 0xf, |
536fbf94 | 861 | .support_64bit_ptrs = false, |
aaa36a97 AD |
862 | .get_rptr = uvd_v5_0_ring_get_rptr, |
863 | .get_wptr = uvd_v5_0_ring_get_wptr, | |
864 | .set_wptr = uvd_v5_0_ring_set_wptr, | |
865 | .parse_cs = amdgpu_uvd_ring_parse_cs, | |
e12f3d7a | 866 | .emit_frame_size = |
e12f3d7a CK |
867 | 14, /* uvd_v5_0_ring_emit_fence x1 no user fence */ |
868 | .emit_ib_size = 6, /* uvd_v5_0_ring_emit_ib */ | |
aaa36a97 AD |
869 | .emit_ib = uvd_v5_0_ring_emit_ib, |
870 | .emit_fence = uvd_v5_0_ring_emit_fence, | |
aaa36a97 | 871 | .test_ring = uvd_v5_0_ring_test_ring, |
8de190c9 | 872 | .test_ib = amdgpu_uvd_ring_test_ib, |
0232e306 | 873 | .insert_nop = uvd_v5_0_ring_insert_nop, |
9e5d5309 | 874 | .pad_ib = amdgpu_ring_generic_pad_ib, |
c4120d55 CK |
875 | .begin_use = amdgpu_uvd_ring_begin_use, |
876 | .end_use = amdgpu_uvd_ring_end_use, | |
aaa36a97 AD |
877 | }; |
878 | ||
879 | static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev) | |
880 | { | |
2bb795f5 | 881 | adev->uvd.inst->ring.funcs = &uvd_v5_0_ring_funcs; |
aaa36a97 AD |
882 | } |
883 | ||
884 | static const struct amdgpu_irq_src_funcs uvd_v5_0_irq_funcs = { | |
885 | .set = uvd_v5_0_set_interrupt_state, | |
886 | .process = uvd_v5_0_process_interrupt, | |
887 | }; | |
888 | ||
889 | static void uvd_v5_0_set_irq_funcs(struct amdgpu_device *adev) | |
890 | { | |
2bb795f5 JZ |
891 | adev->uvd.inst->irq.num_types = 1; |
892 | adev->uvd.inst->irq.funcs = &uvd_v5_0_irq_funcs; | |
aaa36a97 | 893 | } |
a1255107 AD |
894 | |
895 | const struct amdgpu_ip_block_version uvd_v5_0_ip_block = | |
896 | { | |
897 | .type = AMD_IP_BLOCK_TYPE_UVD, | |
898 | .major = 5, | |
899 | .minor = 0, | |
900 | .rev = 0, | |
901 | .funcs = &uvd_v5_0_ip_funcs, | |
902 | }; |