]>
Commit | Line | Data |
---|---|---|
aaa36a97 AD |
1 | /* |
2 | * Copyright 2014 Advanced Micro Devices, Inc. | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice shall be included in | |
12 | * all copies or substantial portions of the Software. | |
13 | * | |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
20 | * OTHER DEALINGS IN THE SOFTWARE. | |
21 | * | |
22 | * Authors: Christian König <christian.koenig@amd.com> | |
23 | */ | |
24 | ||
25 | #include <linux/firmware.h> | |
26 | #include <drm/drmP.h> | |
27 | #include "amdgpu.h" | |
28 | #include "amdgpu_uvd.h" | |
29 | #include "vid.h" | |
30 | #include "uvd/uvd_6_0_d.h" | |
31 | #include "uvd/uvd_6_0_sh_mask.h" | |
32 | #include "oss/oss_2_0_d.h" | |
33 | #include "oss/oss_2_0_sh_mask.h" | |
34 | ||
35 | static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev); | |
36 | static void uvd_v6_0_set_irq_funcs(struct amdgpu_device *adev); | |
37 | static int uvd_v6_0_start(struct amdgpu_device *adev); | |
38 | static void uvd_v6_0_stop(struct amdgpu_device *adev); | |
39 | ||
40 | /** | |
41 | * uvd_v6_0_ring_get_rptr - get read pointer | |
42 | * | |
43 | * @ring: amdgpu_ring pointer | |
44 | * | |
45 | * Returns the current hardware read pointer | |
46 | */ | |
47 | static uint32_t uvd_v6_0_ring_get_rptr(struct amdgpu_ring *ring) | |
48 | { | |
49 | struct amdgpu_device *adev = ring->adev; | |
50 | ||
51 | return RREG32(mmUVD_RBC_RB_RPTR); | |
52 | } | |
53 | ||
54 | /** | |
55 | * uvd_v6_0_ring_get_wptr - get write pointer | |
56 | * | |
57 | * @ring: amdgpu_ring pointer | |
58 | * | |
59 | * Returns the current hardware write pointer | |
60 | */ | |
61 | static uint32_t uvd_v6_0_ring_get_wptr(struct amdgpu_ring *ring) | |
62 | { | |
63 | struct amdgpu_device *adev = ring->adev; | |
64 | ||
65 | return RREG32(mmUVD_RBC_RB_WPTR); | |
66 | } | |
67 | ||
68 | /** | |
69 | * uvd_v6_0_ring_set_wptr - set write pointer | |
70 | * | |
71 | * @ring: amdgpu_ring pointer | |
72 | * | |
73 | * Commits the write pointer to the hardware | |
74 | */ | |
75 | static void uvd_v6_0_ring_set_wptr(struct amdgpu_ring *ring) | |
76 | { | |
77 | struct amdgpu_device *adev = ring->adev; | |
78 | ||
79 | WREG32(mmUVD_RBC_RB_WPTR, ring->wptr); | |
80 | } | |
81 | ||
5fc3aeeb | 82 | static int uvd_v6_0_early_init(void *handle) |
aaa36a97 | 83 | { |
5fc3aeeb | 84 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
85 | ||
aaa36a97 AD |
86 | uvd_v6_0_set_ring_funcs(adev); |
87 | uvd_v6_0_set_irq_funcs(adev); | |
88 | ||
89 | return 0; | |
90 | } | |
91 | ||
5fc3aeeb | 92 | static int uvd_v6_0_sw_init(void *handle) |
aaa36a97 AD |
93 | { |
94 | struct amdgpu_ring *ring; | |
95 | int r; | |
5fc3aeeb | 96 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
aaa36a97 AD |
97 | |
98 | /* UVD TRAP */ | |
99 | r = amdgpu_irq_add_id(adev, 124, &adev->uvd.irq); | |
100 | if (r) | |
101 | return r; | |
102 | ||
103 | r = amdgpu_uvd_sw_init(adev); | |
104 | if (r) | |
105 | return r; | |
106 | ||
107 | r = amdgpu_uvd_resume(adev); | |
108 | if (r) | |
109 | return r; | |
110 | ||
111 | ring = &adev->uvd.ring; | |
112 | sprintf(ring->name, "uvd"); | |
113 | r = amdgpu_ring_init(adev, ring, 4096, CP_PACKET2, 0xf, | |
114 | &adev->uvd.irq, 0, AMDGPU_RING_TYPE_UVD); | |
115 | ||
116 | return r; | |
117 | } | |
118 | ||
5fc3aeeb | 119 | static int uvd_v6_0_sw_fini(void *handle) |
aaa36a97 AD |
120 | { |
121 | int r; | |
5fc3aeeb | 122 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
aaa36a97 AD |
123 | |
124 | r = amdgpu_uvd_suspend(adev); | |
125 | if (r) | |
126 | return r; | |
127 | ||
128 | r = amdgpu_uvd_sw_fini(adev); | |
129 | if (r) | |
130 | return r; | |
131 | ||
132 | return r; | |
133 | } | |
134 | ||
135 | /** | |
136 | * uvd_v6_0_hw_init - start and test UVD block | |
137 | * | |
138 | * @adev: amdgpu_device pointer | |
139 | * | |
140 | * Initialize the hardware, boot up the VCPU and do some testing | |
141 | */ | |
5fc3aeeb | 142 | static int uvd_v6_0_hw_init(void *handle) |
aaa36a97 | 143 | { |
5fc3aeeb | 144 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
aaa36a97 AD |
145 | struct amdgpu_ring *ring = &adev->uvd.ring; |
146 | uint32_t tmp; | |
147 | int r; | |
148 | ||
149 | r = uvd_v6_0_start(adev); | |
150 | if (r) | |
151 | goto done; | |
152 | ||
153 | ring->ready = true; | |
154 | r = amdgpu_ring_test_ring(ring); | |
155 | if (r) { | |
156 | ring->ready = false; | |
157 | goto done; | |
158 | } | |
159 | ||
160 | r = amdgpu_ring_lock(ring, 10); | |
161 | if (r) { | |
162 | DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r); | |
163 | goto done; | |
164 | } | |
165 | ||
166 | tmp = PACKET0(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0); | |
167 | amdgpu_ring_write(ring, tmp); | |
168 | amdgpu_ring_write(ring, 0xFFFFF); | |
169 | ||
170 | tmp = PACKET0(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0); | |
171 | amdgpu_ring_write(ring, tmp); | |
172 | amdgpu_ring_write(ring, 0xFFFFF); | |
173 | ||
174 | tmp = PACKET0(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0); | |
175 | amdgpu_ring_write(ring, tmp); | |
176 | amdgpu_ring_write(ring, 0xFFFFF); | |
177 | ||
178 | /* Clear timeout status bits */ | |
179 | amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_TIMEOUT_STATUS, 0)); | |
180 | amdgpu_ring_write(ring, 0x8); | |
181 | ||
182 | amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CNTL, 0)); | |
183 | amdgpu_ring_write(ring, 3); | |
184 | ||
185 | amdgpu_ring_unlock_commit(ring); | |
186 | ||
187 | done: | |
188 | if (!r) | |
189 | DRM_INFO("UVD initialized successfully.\n"); | |
190 | ||
191 | return r; | |
192 | } | |
193 | ||
194 | /** | |
195 | * uvd_v6_0_hw_fini - stop the hardware block | |
196 | * | |
197 | * @adev: amdgpu_device pointer | |
198 | * | |
199 | * Stop the UVD block, mark ring as not ready any more | |
200 | */ | |
5fc3aeeb | 201 | static int uvd_v6_0_hw_fini(void *handle) |
aaa36a97 | 202 | { |
5fc3aeeb | 203 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
aaa36a97 AD |
204 | struct amdgpu_ring *ring = &adev->uvd.ring; |
205 | ||
206 | uvd_v6_0_stop(adev); | |
207 | ring->ready = false; | |
208 | ||
209 | return 0; | |
210 | } | |
211 | ||
5fc3aeeb | 212 | static int uvd_v6_0_suspend(void *handle) |
aaa36a97 AD |
213 | { |
214 | int r; | |
5fc3aeeb | 215 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
aaa36a97 | 216 | |
1f445210 LL |
217 | /* Skip this for APU for now */ |
218 | if (!(adev->flags & AMD_IS_APU)) { | |
219 | r = amdgpu_uvd_suspend(adev); | |
220 | if (r) | |
221 | return r; | |
222 | } | |
2bd188d0 | 223 | r = uvd_v6_0_hw_fini(adev); |
aaa36a97 AD |
224 | if (r) |
225 | return r; | |
226 | ||
227 | return r; | |
228 | } | |
229 | ||
5fc3aeeb | 230 | static int uvd_v6_0_resume(void *handle) |
aaa36a97 AD |
231 | { |
232 | int r; | |
5fc3aeeb | 233 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
aaa36a97 | 234 | |
1f445210 LL |
235 | /* Skip this for APU for now */ |
236 | if (!(adev->flags & AMD_IS_APU)) { | |
237 | r = amdgpu_uvd_resume(adev); | |
238 | if (r) | |
239 | return r; | |
240 | } | |
aaa36a97 AD |
241 | r = uvd_v6_0_hw_init(adev); |
242 | if (r) | |
243 | return r; | |
244 | ||
245 | return r; | |
246 | } | |
247 | ||
248 | /** | |
249 | * uvd_v6_0_mc_resume - memory controller programming | |
250 | * | |
251 | * @adev: amdgpu_device pointer | |
252 | * | |
253 | * Let the UVD memory controller know it's offsets | |
254 | */ | |
255 | static void uvd_v6_0_mc_resume(struct amdgpu_device *adev) | |
256 | { | |
257 | uint64_t offset; | |
258 | uint32_t size; | |
259 | ||
260 | /* programm memory controller bits 0-27 */ | |
261 | WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, | |
262 | lower_32_bits(adev->uvd.gpu_addr)); | |
263 | WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, | |
264 | upper_32_bits(adev->uvd.gpu_addr)); | |
265 | ||
266 | offset = AMDGPU_UVD_FIRMWARE_OFFSET; | |
267 | size = AMDGPU_GPU_PAGE_ALIGN(adev->uvd.fw->size + 4); | |
268 | WREG32(mmUVD_VCPU_CACHE_OFFSET0, offset >> 3); | |
269 | WREG32(mmUVD_VCPU_CACHE_SIZE0, size); | |
270 | ||
271 | offset += size; | |
272 | size = AMDGPU_UVD_STACK_SIZE; | |
273 | WREG32(mmUVD_VCPU_CACHE_OFFSET1, offset >> 3); | |
274 | WREG32(mmUVD_VCPU_CACHE_SIZE1, size); | |
275 | ||
276 | offset += size; | |
277 | size = AMDGPU_UVD_HEAP_SIZE; | |
278 | WREG32(mmUVD_VCPU_CACHE_OFFSET2, offset >> 3); | |
279 | WREG32(mmUVD_VCPU_CACHE_SIZE2, size); | |
280 | } | |
281 | ||
282 | /** | |
283 | * uvd_v6_0_start - start UVD block | |
284 | * | |
285 | * @adev: amdgpu_device pointer | |
286 | * | |
287 | * Setup and start the UVD block | |
288 | */ | |
289 | static int uvd_v6_0_start(struct amdgpu_device *adev) | |
290 | { | |
291 | struct amdgpu_ring *ring = &adev->uvd.ring; | |
292 | uint32_t rb_bufsz, tmp; | |
293 | uint32_t lmi_swap_cntl; | |
294 | uint32_t mp_swap_cntl; | |
295 | int i, j, r; | |
296 | ||
297 | /*disable DPG */ | |
298 | WREG32_P(mmUVD_POWER_STATUS, 0, ~(1 << 2)); | |
299 | ||
300 | /* disable byte swapping */ | |
301 | lmi_swap_cntl = 0; | |
302 | mp_swap_cntl = 0; | |
303 | ||
304 | uvd_v6_0_mc_resume(adev); | |
305 | ||
306 | /* disable clock gating */ | |
307 | WREG32(mmUVD_CGC_GATE, 0); | |
308 | ||
309 | /* disable interupt */ | |
310 | WREG32_P(mmUVD_MASTINT_EN, 0, ~(1 << 1)); | |
311 | ||
312 | /* stall UMC and register bus before resetting VCPU */ | |
313 | WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8)); | |
314 | mdelay(1); | |
315 | ||
316 | /* put LMI, VCPU, RBC etc... into reset */ | |
317 | WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__LMI_SOFT_RESET_MASK | | |
318 | UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK | UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK | | |
319 | UVD_SOFT_RESET__RBC_SOFT_RESET_MASK | UVD_SOFT_RESET__CSM_SOFT_RESET_MASK | | |
320 | UVD_SOFT_RESET__CXW_SOFT_RESET_MASK | UVD_SOFT_RESET__TAP_SOFT_RESET_MASK | | |
321 | UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK); | |
322 | mdelay(5); | |
323 | ||
324 | /* take UVD block out of reset */ | |
325 | WREG32_P(mmSRBM_SOFT_RESET, 0, ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK); | |
326 | mdelay(5); | |
327 | ||
328 | /* initialize UVD memory controller */ | |
329 | WREG32(mmUVD_LMI_CTRL, 0x40 | (1 << 8) | (1 << 13) | | |
330 | (1 << 21) | (1 << 9) | (1 << 20)); | |
331 | ||
332 | #ifdef __BIG_ENDIAN | |
333 | /* swap (8 in 32) RB and IB */ | |
334 | lmi_swap_cntl = 0xa; | |
335 | mp_swap_cntl = 0; | |
336 | #endif | |
337 | WREG32(mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl); | |
338 | WREG32(mmUVD_MP_SWAP_CNTL, mp_swap_cntl); | |
339 | ||
340 | WREG32(mmUVD_MPC_SET_MUXA0, 0x40c2040); | |
341 | WREG32(mmUVD_MPC_SET_MUXA1, 0x0); | |
342 | WREG32(mmUVD_MPC_SET_MUXB0, 0x40c2040); | |
343 | WREG32(mmUVD_MPC_SET_MUXB1, 0x0); | |
344 | WREG32(mmUVD_MPC_SET_ALU, 0); | |
345 | WREG32(mmUVD_MPC_SET_MUX, 0x88); | |
346 | ||
347 | /* take all subblocks out of reset, except VCPU */ | |
348 | WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); | |
349 | mdelay(5); | |
350 | ||
351 | /* enable VCPU clock */ | |
352 | WREG32(mmUVD_VCPU_CNTL, 1 << 9); | |
353 | ||
354 | /* enable UMC */ | |
355 | WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8)); | |
356 | ||
357 | /* boot up the VCPU */ | |
358 | WREG32(mmUVD_SOFT_RESET, 0); | |
359 | mdelay(10); | |
360 | ||
361 | for (i = 0; i < 10; ++i) { | |
362 | uint32_t status; | |
363 | ||
364 | for (j = 0; j < 100; ++j) { | |
365 | status = RREG32(mmUVD_STATUS); | |
366 | if (status & 2) | |
367 | break; | |
368 | mdelay(10); | |
369 | } | |
370 | r = 0; | |
371 | if (status & 2) | |
372 | break; | |
373 | ||
374 | DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n"); | |
375 | WREG32_P(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK, | |
376 | ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); | |
377 | mdelay(10); | |
378 | WREG32_P(mmUVD_SOFT_RESET, 0, | |
379 | ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); | |
380 | mdelay(10); | |
381 | r = -1; | |
382 | } | |
383 | ||
384 | if (r) { | |
385 | DRM_ERROR("UVD not responding, giving up!!!\n"); | |
386 | return r; | |
387 | } | |
388 | /* enable master interrupt */ | |
389 | WREG32_P(mmUVD_MASTINT_EN, 3 << 1, ~(3 << 1)); | |
390 | ||
391 | /* clear the bit 4 of UVD_STATUS */ | |
392 | WREG32_P(mmUVD_STATUS, 0, ~(2 << 1)); | |
393 | ||
394 | rb_bufsz = order_base_2(ring->ring_size); | |
395 | tmp = 0; | |
396 | tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz); | |
397 | tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1); | |
398 | tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1); | |
399 | tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0); | |
400 | tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1); | |
401 | tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1); | |
402 | /* force RBC into idle state */ | |
403 | WREG32(mmUVD_RBC_RB_CNTL, tmp); | |
404 | ||
405 | /* set the write pointer delay */ | |
406 | WREG32(mmUVD_RBC_RB_WPTR_CNTL, 0); | |
407 | ||
408 | /* set the wb address */ | |
409 | WREG32(mmUVD_RBC_RB_RPTR_ADDR, (upper_32_bits(ring->gpu_addr) >> 2)); | |
410 | ||
411 | /* programm the RB_BASE for ring buffer */ | |
412 | WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_LOW, | |
413 | lower_32_bits(ring->gpu_addr)); | |
414 | WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH, | |
415 | upper_32_bits(ring->gpu_addr)); | |
416 | ||
417 | /* Initialize the ring buffer's read and write pointers */ | |
418 | WREG32(mmUVD_RBC_RB_RPTR, 0); | |
419 | ||
420 | ring->wptr = RREG32(mmUVD_RBC_RB_RPTR); | |
421 | WREG32(mmUVD_RBC_RB_WPTR, ring->wptr); | |
422 | ||
423 | WREG32_P(mmUVD_RBC_RB_CNTL, 0, ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK); | |
424 | ||
425 | return 0; | |
426 | } | |
427 | ||
428 | /** | |
429 | * uvd_v6_0_stop - stop UVD block | |
430 | * | |
431 | * @adev: amdgpu_device pointer | |
432 | * | |
433 | * stop the UVD block | |
434 | */ | |
435 | static void uvd_v6_0_stop(struct amdgpu_device *adev) | |
436 | { | |
437 | /* force RBC into idle state */ | |
438 | WREG32(mmUVD_RBC_RB_CNTL, 0x11010101); | |
439 | ||
440 | /* Stall UMC and register bus before resetting VCPU */ | |
441 | WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8)); | |
442 | mdelay(1); | |
443 | ||
444 | /* put VCPU into reset */ | |
445 | WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); | |
446 | mdelay(5); | |
447 | ||
448 | /* disable VCPU clock */ | |
449 | WREG32(mmUVD_VCPU_CNTL, 0x0); | |
450 | ||
451 | /* Unstall UMC and register bus */ | |
452 | WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8)); | |
453 | } | |
454 | ||
455 | /** | |
456 | * uvd_v6_0_ring_emit_fence - emit an fence & trap command | |
457 | * | |
458 | * @ring: amdgpu_ring pointer | |
459 | * @fence: fence to emit | |
460 | * | |
461 | * Write a fence and a trap command to the ring. | |
462 | */ | |
463 | static void uvd_v6_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, | |
890ee23f | 464 | unsigned flags) |
aaa36a97 | 465 | { |
890ee23f | 466 | WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT); |
aaa36a97 AD |
467 | |
468 | amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0)); | |
469 | amdgpu_ring_write(ring, seq); | |
470 | amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0)); | |
471 | amdgpu_ring_write(ring, addr & 0xffffffff); | |
472 | amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0)); | |
473 | amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff); | |
474 | amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0)); | |
475 | amdgpu_ring_write(ring, 0); | |
476 | ||
477 | amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0)); | |
478 | amdgpu_ring_write(ring, 0); | |
479 | amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0)); | |
480 | amdgpu_ring_write(ring, 0); | |
481 | amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0)); | |
482 | amdgpu_ring_write(ring, 2); | |
483 | } | |
484 | ||
485 | /** | |
486 | * uvd_v6_0_ring_emit_semaphore - emit semaphore command | |
487 | * | |
488 | * @ring: amdgpu_ring pointer | |
489 | * @semaphore: semaphore to emit commands for | |
490 | * @emit_wait: true if we should emit a wait command | |
491 | * | |
492 | * Emit a semaphore command (either wait or signal) to the UVD ring. | |
493 | */ | |
494 | static bool uvd_v6_0_ring_emit_semaphore(struct amdgpu_ring *ring, | |
495 | struct amdgpu_semaphore *semaphore, | |
496 | bool emit_wait) | |
497 | { | |
498 | uint64_t addr = semaphore->gpu_addr; | |
499 | ||
500 | amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_ADDR_LOW, 0)); | |
501 | amdgpu_ring_write(ring, (addr >> 3) & 0x000FFFFF); | |
502 | ||
503 | amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_ADDR_HIGH, 0)); | |
504 | amdgpu_ring_write(ring, (addr >> 23) & 0x000FFFFF); | |
505 | ||
506 | amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CMD, 0)); | |
507 | amdgpu_ring_write(ring, 0x80 | (emit_wait ? 1 : 0)); | |
508 | ||
509 | return true; | |
510 | } | |
511 | ||
512 | /** | |
513 | * uvd_v6_0_ring_test_ring - register write test | |
514 | * | |
515 | * @ring: amdgpu_ring pointer | |
516 | * | |
517 | * Test if we can successfully write to the context register | |
518 | */ | |
519 | static int uvd_v6_0_ring_test_ring(struct amdgpu_ring *ring) | |
520 | { | |
521 | struct amdgpu_device *adev = ring->adev; | |
522 | uint32_t tmp = 0; | |
523 | unsigned i; | |
524 | int r; | |
525 | ||
526 | WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD); | |
527 | r = amdgpu_ring_lock(ring, 3); | |
528 | if (r) { | |
529 | DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", | |
530 | ring->idx, r); | |
531 | return r; | |
532 | } | |
533 | amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0)); | |
534 | amdgpu_ring_write(ring, 0xDEADBEEF); | |
535 | amdgpu_ring_unlock_commit(ring); | |
536 | for (i = 0; i < adev->usec_timeout; i++) { | |
537 | tmp = RREG32(mmUVD_CONTEXT_ID); | |
538 | if (tmp == 0xDEADBEEF) | |
539 | break; | |
540 | DRM_UDELAY(1); | |
541 | } | |
542 | ||
543 | if (i < adev->usec_timeout) { | |
544 | DRM_INFO("ring test on %d succeeded in %d usecs\n", | |
545 | ring->idx, i); | |
546 | } else { | |
547 | DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n", | |
548 | ring->idx, tmp); | |
549 | r = -EINVAL; | |
550 | } | |
551 | return r; | |
552 | } | |
553 | ||
554 | /** | |
555 | * uvd_v6_0_ring_emit_ib - execute indirect buffer | |
556 | * | |
557 | * @ring: amdgpu_ring pointer | |
558 | * @ib: indirect buffer to execute | |
559 | * | |
560 | * Write ring commands to execute the indirect buffer | |
561 | */ | |
562 | static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring, | |
563 | struct amdgpu_ib *ib) | |
564 | { | |
565 | amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0)); | |
566 | amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); | |
567 | amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH, 0)); | |
568 | amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); | |
569 | amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_SIZE, 0)); | |
570 | amdgpu_ring_write(ring, ib->length_dw); | |
571 | } | |
572 | ||
573 | /** | |
574 | * uvd_v6_0_ring_test_ib - test ib execution | |
575 | * | |
576 | * @ring: amdgpu_ring pointer | |
577 | * | |
578 | * Test if we can successfully execute an IB | |
579 | */ | |
580 | static int uvd_v6_0_ring_test_ib(struct amdgpu_ring *ring) | |
581 | { | |
0e3f154a | 582 | struct fence *fence = NULL; |
aaa36a97 AD |
583 | int r; |
584 | ||
585 | r = amdgpu_uvd_get_create_msg(ring, 1, NULL); | |
586 | if (r) { | |
587 | DRM_ERROR("amdgpu: failed to get create msg (%d).\n", r); | |
588 | goto error; | |
589 | } | |
590 | ||
591 | r = amdgpu_uvd_get_destroy_msg(ring, 1, &fence); | |
592 | if (r) { | |
593 | DRM_ERROR("amdgpu: failed to get destroy ib (%d).\n", r); | |
594 | goto error; | |
595 | } | |
596 | ||
0e3f154a | 597 | r = fence_wait(fence, false); |
aaa36a97 AD |
598 | if (r) { |
599 | DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); | |
600 | goto error; | |
601 | } | |
602 | DRM_INFO("ib test on ring %d succeeded\n", ring->idx); | |
603 | error: | |
0e3f154a | 604 | fence_put(fence); |
aaa36a97 AD |
605 | return r; |
606 | } | |
607 | ||
5fc3aeeb | 608 | static bool uvd_v6_0_is_idle(void *handle) |
aaa36a97 | 609 | { |
5fc3aeeb | 610 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
611 | ||
aaa36a97 AD |
612 | return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK); |
613 | } | |
614 | ||
5fc3aeeb | 615 | static int uvd_v6_0_wait_for_idle(void *handle) |
aaa36a97 AD |
616 | { |
617 | unsigned i; | |
5fc3aeeb | 618 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
aaa36a97 AD |
619 | |
620 | for (i = 0; i < adev->usec_timeout; i++) { | |
621 | if (!(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK)) | |
622 | return 0; | |
623 | } | |
624 | return -ETIMEDOUT; | |
625 | } | |
626 | ||
5fc3aeeb | 627 | static int uvd_v6_0_soft_reset(void *handle) |
aaa36a97 | 628 | { |
5fc3aeeb | 629 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
630 | ||
aaa36a97 AD |
631 | uvd_v6_0_stop(adev); |
632 | ||
633 | WREG32_P(mmSRBM_SOFT_RESET, SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK, | |
634 | ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK); | |
635 | mdelay(5); | |
636 | ||
637 | return uvd_v6_0_start(adev); | |
638 | } | |
639 | ||
5fc3aeeb | 640 | static void uvd_v6_0_print_status(void *handle) |
aaa36a97 | 641 | { |
5fc3aeeb | 642 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
aaa36a97 AD |
643 | dev_info(adev->dev, "UVD 6.0 registers\n"); |
644 | dev_info(adev->dev, " UVD_SEMA_ADDR_LOW=0x%08X\n", | |
645 | RREG32(mmUVD_SEMA_ADDR_LOW)); | |
646 | dev_info(adev->dev, " UVD_SEMA_ADDR_HIGH=0x%08X\n", | |
647 | RREG32(mmUVD_SEMA_ADDR_HIGH)); | |
648 | dev_info(adev->dev, " UVD_SEMA_CMD=0x%08X\n", | |
649 | RREG32(mmUVD_SEMA_CMD)); | |
650 | dev_info(adev->dev, " UVD_GPCOM_VCPU_CMD=0x%08X\n", | |
651 | RREG32(mmUVD_GPCOM_VCPU_CMD)); | |
652 | dev_info(adev->dev, " UVD_GPCOM_VCPU_DATA0=0x%08X\n", | |
653 | RREG32(mmUVD_GPCOM_VCPU_DATA0)); | |
654 | dev_info(adev->dev, " UVD_GPCOM_VCPU_DATA1=0x%08X\n", | |
655 | RREG32(mmUVD_GPCOM_VCPU_DATA1)); | |
656 | dev_info(adev->dev, " UVD_ENGINE_CNTL=0x%08X\n", | |
657 | RREG32(mmUVD_ENGINE_CNTL)); | |
658 | dev_info(adev->dev, " UVD_UDEC_ADDR_CONFIG=0x%08X\n", | |
659 | RREG32(mmUVD_UDEC_ADDR_CONFIG)); | |
660 | dev_info(adev->dev, " UVD_UDEC_DB_ADDR_CONFIG=0x%08X\n", | |
661 | RREG32(mmUVD_UDEC_DB_ADDR_CONFIG)); | |
662 | dev_info(adev->dev, " UVD_UDEC_DBW_ADDR_CONFIG=0x%08X\n", | |
663 | RREG32(mmUVD_UDEC_DBW_ADDR_CONFIG)); | |
664 | dev_info(adev->dev, " UVD_SEMA_CNTL=0x%08X\n", | |
665 | RREG32(mmUVD_SEMA_CNTL)); | |
666 | dev_info(adev->dev, " UVD_LMI_EXT40_ADDR=0x%08X\n", | |
667 | RREG32(mmUVD_LMI_EXT40_ADDR)); | |
668 | dev_info(adev->dev, " UVD_CTX_INDEX=0x%08X\n", | |
669 | RREG32(mmUVD_CTX_INDEX)); | |
670 | dev_info(adev->dev, " UVD_CTX_DATA=0x%08X\n", | |
671 | RREG32(mmUVD_CTX_DATA)); | |
672 | dev_info(adev->dev, " UVD_CGC_GATE=0x%08X\n", | |
673 | RREG32(mmUVD_CGC_GATE)); | |
674 | dev_info(adev->dev, " UVD_CGC_CTRL=0x%08X\n", | |
675 | RREG32(mmUVD_CGC_CTRL)); | |
676 | dev_info(adev->dev, " UVD_LMI_CTRL2=0x%08X\n", | |
677 | RREG32(mmUVD_LMI_CTRL2)); | |
678 | dev_info(adev->dev, " UVD_MASTINT_EN=0x%08X\n", | |
679 | RREG32(mmUVD_MASTINT_EN)); | |
680 | dev_info(adev->dev, " UVD_LMI_ADDR_EXT=0x%08X\n", | |
681 | RREG32(mmUVD_LMI_ADDR_EXT)); | |
682 | dev_info(adev->dev, " UVD_LMI_CTRL=0x%08X\n", | |
683 | RREG32(mmUVD_LMI_CTRL)); | |
684 | dev_info(adev->dev, " UVD_LMI_SWAP_CNTL=0x%08X\n", | |
685 | RREG32(mmUVD_LMI_SWAP_CNTL)); | |
686 | dev_info(adev->dev, " UVD_MP_SWAP_CNTL=0x%08X\n", | |
687 | RREG32(mmUVD_MP_SWAP_CNTL)); | |
688 | dev_info(adev->dev, " UVD_MPC_SET_MUXA0=0x%08X\n", | |
689 | RREG32(mmUVD_MPC_SET_MUXA0)); | |
690 | dev_info(adev->dev, " UVD_MPC_SET_MUXA1=0x%08X\n", | |
691 | RREG32(mmUVD_MPC_SET_MUXA1)); | |
692 | dev_info(adev->dev, " UVD_MPC_SET_MUXB0=0x%08X\n", | |
693 | RREG32(mmUVD_MPC_SET_MUXB0)); | |
694 | dev_info(adev->dev, " UVD_MPC_SET_MUXB1=0x%08X\n", | |
695 | RREG32(mmUVD_MPC_SET_MUXB1)); | |
696 | dev_info(adev->dev, " UVD_MPC_SET_MUX=0x%08X\n", | |
697 | RREG32(mmUVD_MPC_SET_MUX)); | |
698 | dev_info(adev->dev, " UVD_MPC_SET_ALU=0x%08X\n", | |
699 | RREG32(mmUVD_MPC_SET_ALU)); | |
700 | dev_info(adev->dev, " UVD_VCPU_CACHE_OFFSET0=0x%08X\n", | |
701 | RREG32(mmUVD_VCPU_CACHE_OFFSET0)); | |
702 | dev_info(adev->dev, " UVD_VCPU_CACHE_SIZE0=0x%08X\n", | |
703 | RREG32(mmUVD_VCPU_CACHE_SIZE0)); | |
704 | dev_info(adev->dev, " UVD_VCPU_CACHE_OFFSET1=0x%08X\n", | |
705 | RREG32(mmUVD_VCPU_CACHE_OFFSET1)); | |
706 | dev_info(adev->dev, " UVD_VCPU_CACHE_SIZE1=0x%08X\n", | |
707 | RREG32(mmUVD_VCPU_CACHE_SIZE1)); | |
708 | dev_info(adev->dev, " UVD_VCPU_CACHE_OFFSET2=0x%08X\n", | |
709 | RREG32(mmUVD_VCPU_CACHE_OFFSET2)); | |
710 | dev_info(adev->dev, " UVD_VCPU_CACHE_SIZE2=0x%08X\n", | |
711 | RREG32(mmUVD_VCPU_CACHE_SIZE2)); | |
712 | dev_info(adev->dev, " UVD_VCPU_CNTL=0x%08X\n", | |
713 | RREG32(mmUVD_VCPU_CNTL)); | |
714 | dev_info(adev->dev, " UVD_SOFT_RESET=0x%08X\n", | |
715 | RREG32(mmUVD_SOFT_RESET)); | |
716 | dev_info(adev->dev, " UVD_RBC_IB_SIZE=0x%08X\n", | |
717 | RREG32(mmUVD_RBC_IB_SIZE)); | |
718 | dev_info(adev->dev, " UVD_RBC_RB_RPTR=0x%08X\n", | |
719 | RREG32(mmUVD_RBC_RB_RPTR)); | |
720 | dev_info(adev->dev, " UVD_RBC_RB_WPTR=0x%08X\n", | |
721 | RREG32(mmUVD_RBC_RB_WPTR)); | |
722 | dev_info(adev->dev, " UVD_RBC_RB_WPTR_CNTL=0x%08X\n", | |
723 | RREG32(mmUVD_RBC_RB_WPTR_CNTL)); | |
724 | dev_info(adev->dev, " UVD_RBC_RB_CNTL=0x%08X\n", | |
725 | RREG32(mmUVD_RBC_RB_CNTL)); | |
726 | dev_info(adev->dev, " UVD_STATUS=0x%08X\n", | |
727 | RREG32(mmUVD_STATUS)); | |
728 | dev_info(adev->dev, " UVD_SEMA_TIMEOUT_STATUS=0x%08X\n", | |
729 | RREG32(mmUVD_SEMA_TIMEOUT_STATUS)); | |
730 | dev_info(adev->dev, " UVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL=0x%08X\n", | |
731 | RREG32(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL)); | |
732 | dev_info(adev->dev, " UVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL=0x%08X\n", | |
733 | RREG32(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL)); | |
734 | dev_info(adev->dev, " UVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL=0x%08X\n", | |
735 | RREG32(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL)); | |
736 | dev_info(adev->dev, " UVD_CONTEXT_ID=0x%08X\n", | |
737 | RREG32(mmUVD_CONTEXT_ID)); | |
738 | } | |
739 | ||
740 | static int uvd_v6_0_set_interrupt_state(struct amdgpu_device *adev, | |
741 | struct amdgpu_irq_src *source, | |
742 | unsigned type, | |
743 | enum amdgpu_interrupt_state state) | |
744 | { | |
745 | // TODO | |
746 | return 0; | |
747 | } | |
748 | ||
749 | static int uvd_v6_0_process_interrupt(struct amdgpu_device *adev, | |
750 | struct amdgpu_irq_src *source, | |
751 | struct amdgpu_iv_entry *entry) | |
752 | { | |
753 | DRM_DEBUG("IH: UVD TRAP\n"); | |
754 | amdgpu_fence_process(&adev->uvd.ring); | |
755 | return 0; | |
756 | } | |
757 | ||
5fc3aeeb | 758 | static int uvd_v6_0_set_clockgating_state(void *handle, |
759 | enum amd_clockgating_state state) | |
aaa36a97 | 760 | { |
aaa36a97 AD |
761 | return 0; |
762 | } | |
763 | ||
5fc3aeeb | 764 | static int uvd_v6_0_set_powergating_state(void *handle, |
765 | enum amd_powergating_state state) | |
aaa36a97 AD |
766 | { |
767 | /* This doesn't actually powergate the UVD block. | |
768 | * That's done in the dpm code via the SMC. This | |
769 | * just re-inits the block as necessary. The actual | |
770 | * gating still happens in the dpm code. We should | |
771 | * revisit this when there is a cleaner line between | |
772 | * the smc and the hw blocks | |
773 | */ | |
5fc3aeeb | 774 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
775 | ||
776 | if (state == AMD_PG_STATE_GATE) { | |
aaa36a97 AD |
777 | uvd_v6_0_stop(adev); |
778 | return 0; | |
779 | } else { | |
780 | return uvd_v6_0_start(adev); | |
781 | } | |
782 | } | |
783 | ||
5fc3aeeb | 784 | const struct amd_ip_funcs uvd_v6_0_ip_funcs = { |
aaa36a97 AD |
785 | .early_init = uvd_v6_0_early_init, |
786 | .late_init = NULL, | |
787 | .sw_init = uvd_v6_0_sw_init, | |
788 | .sw_fini = uvd_v6_0_sw_fini, | |
789 | .hw_init = uvd_v6_0_hw_init, | |
790 | .hw_fini = uvd_v6_0_hw_fini, | |
791 | .suspend = uvd_v6_0_suspend, | |
792 | .resume = uvd_v6_0_resume, | |
793 | .is_idle = uvd_v6_0_is_idle, | |
794 | .wait_for_idle = uvd_v6_0_wait_for_idle, | |
795 | .soft_reset = uvd_v6_0_soft_reset, | |
796 | .print_status = uvd_v6_0_print_status, | |
797 | .set_clockgating_state = uvd_v6_0_set_clockgating_state, | |
798 | .set_powergating_state = uvd_v6_0_set_powergating_state, | |
799 | }; | |
800 | ||
801 | static const struct amdgpu_ring_funcs uvd_v6_0_ring_funcs = { | |
802 | .get_rptr = uvd_v6_0_ring_get_rptr, | |
803 | .get_wptr = uvd_v6_0_ring_get_wptr, | |
804 | .set_wptr = uvd_v6_0_ring_set_wptr, | |
805 | .parse_cs = amdgpu_uvd_ring_parse_cs, | |
806 | .emit_ib = uvd_v6_0_ring_emit_ib, | |
807 | .emit_fence = uvd_v6_0_ring_emit_fence, | |
808 | .emit_semaphore = uvd_v6_0_ring_emit_semaphore, | |
809 | .test_ring = uvd_v6_0_ring_test_ring, | |
810 | .test_ib = uvd_v6_0_ring_test_ib, | |
edff0e28 | 811 | .insert_nop = amdgpu_ring_insert_nop, |
aaa36a97 AD |
812 | }; |
813 | ||
814 | static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev) | |
815 | { | |
816 | adev->uvd.ring.funcs = &uvd_v6_0_ring_funcs; | |
817 | } | |
818 | ||
819 | static const struct amdgpu_irq_src_funcs uvd_v6_0_irq_funcs = { | |
820 | .set = uvd_v6_0_set_interrupt_state, | |
821 | .process = uvd_v6_0_process_interrupt, | |
822 | }; | |
823 | ||
824 | static void uvd_v6_0_set_irq_funcs(struct amdgpu_device *adev) | |
825 | { | |
826 | adev->uvd.irq.num_types = 1; | |
827 | adev->uvd.irq.funcs = &uvd_v6_0_irq_funcs; | |
828 | } |