]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
Merge tag 'for-v4.12-2' of git://git.kernel.org/pub/scm/linux/kernel/git/sre/linux...
[mirror_ubuntu-bionic-kernel.git] / drivers / gpu / drm / amd / amdgpu / uvd_v5_0.c
1 /*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Christian König <christian.koenig@amd.com>
23 */
24
25 #include <linux/firmware.h>
26 #include <drm/drmP.h>
27 #include "amdgpu.h"
28 #include "amdgpu_uvd.h"
29 #include "vid.h"
30 #include "uvd/uvd_5_0_d.h"
31 #include "uvd/uvd_5_0_sh_mask.h"
32 #include "oss/oss_2_0_d.h"
33 #include "oss/oss_2_0_sh_mask.h"
34 #include "bif/bif_5_0_d.h"
35 #include "vi.h"
36 #include "smu/smu_7_1_2_d.h"
37 #include "smu/smu_7_1_2_sh_mask.h"
38
39 static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev);
40 static void uvd_v5_0_set_irq_funcs(struct amdgpu_device *adev);
41 static int uvd_v5_0_start(struct amdgpu_device *adev);
42 static void uvd_v5_0_stop(struct amdgpu_device *adev);
43 static int uvd_v5_0_set_clockgating_state(void *handle,
44 enum amd_clockgating_state state);
45 static void uvd_v5_0_enable_mgcg(struct amdgpu_device *adev,
46 bool enable);
47 /**
48 * uvd_v5_0_ring_get_rptr - get read pointer
49 *
50 * @ring: amdgpu_ring pointer
51 *
52 * Returns the current hardware read pointer
53 */
54 static uint64_t uvd_v5_0_ring_get_rptr(struct amdgpu_ring *ring)
55 {
56 struct amdgpu_device *adev = ring->adev;
57
58 return RREG32(mmUVD_RBC_RB_RPTR);
59 }
60
61 /**
62 * uvd_v5_0_ring_get_wptr - get write pointer
63 *
64 * @ring: amdgpu_ring pointer
65 *
66 * Returns the current hardware write pointer
67 */
68 static uint64_t uvd_v5_0_ring_get_wptr(struct amdgpu_ring *ring)
69 {
70 struct amdgpu_device *adev = ring->adev;
71
72 return RREG32(mmUVD_RBC_RB_WPTR);
73 }
74
75 /**
76 * uvd_v5_0_ring_set_wptr - set write pointer
77 *
78 * @ring: amdgpu_ring pointer
79 *
80 * Commits the write pointer to the hardware
81 */
82 static void uvd_v5_0_ring_set_wptr(struct amdgpu_ring *ring)
83 {
84 struct amdgpu_device *adev = ring->adev;
85
86 WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
87 }
88
89 static int uvd_v5_0_early_init(void *handle)
90 {
91 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
92
93 uvd_v5_0_set_ring_funcs(adev);
94 uvd_v5_0_set_irq_funcs(adev);
95
96 return 0;
97 }
98
99 static int uvd_v5_0_sw_init(void *handle)
100 {
101 struct amdgpu_ring *ring;
102 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
103 int r;
104
105 /* UVD TRAP */
106 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 124, &adev->uvd.irq);
107 if (r)
108 return r;
109
110 r = amdgpu_uvd_sw_init(adev);
111 if (r)
112 return r;
113
114 r = amdgpu_uvd_resume(adev);
115 if (r)
116 return r;
117
118 ring = &adev->uvd.ring;
119 sprintf(ring->name, "uvd");
120 r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0);
121
122 return r;
123 }
124
125 static int uvd_v5_0_sw_fini(void *handle)
126 {
127 int r;
128 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
129
130 r = amdgpu_uvd_suspend(adev);
131 if (r)
132 return r;
133
134 return amdgpu_uvd_sw_fini(adev);
135 }
136
137 /**
138 * uvd_v5_0_hw_init - start and test UVD block
139 *
140 * @adev: amdgpu_device pointer
141 *
142 * Initialize the hardware, boot up the VCPU and do some testing
143 */
144 static int uvd_v5_0_hw_init(void *handle)
145 {
146 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
147 struct amdgpu_ring *ring = &adev->uvd.ring;
148 uint32_t tmp;
149 int r;
150
151 amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
152 uvd_v5_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE);
153 uvd_v5_0_enable_mgcg(adev, true);
154
155 ring->ready = true;
156 r = amdgpu_ring_test_ring(ring);
157 if (r) {
158 ring->ready = false;
159 goto done;
160 }
161
162 r = amdgpu_ring_alloc(ring, 10);
163 if (r) {
164 DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r);
165 goto done;
166 }
167
168 tmp = PACKET0(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0);
169 amdgpu_ring_write(ring, tmp);
170 amdgpu_ring_write(ring, 0xFFFFF);
171
172 tmp = PACKET0(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0);
173 amdgpu_ring_write(ring, tmp);
174 amdgpu_ring_write(ring, 0xFFFFF);
175
176 tmp = PACKET0(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0);
177 amdgpu_ring_write(ring, tmp);
178 amdgpu_ring_write(ring, 0xFFFFF);
179
180 /* Clear timeout status bits */
181 amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_TIMEOUT_STATUS, 0));
182 amdgpu_ring_write(ring, 0x8);
183
184 amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CNTL, 0));
185 amdgpu_ring_write(ring, 3);
186
187 amdgpu_ring_commit(ring);
188
189 done:
190 if (!r)
191 DRM_INFO("UVD initialized successfully.\n");
192
193 return r;
194
195 }
196
197 /**
198 * uvd_v5_0_hw_fini - stop the hardware block
199 *
200 * @adev: amdgpu_device pointer
201 *
202 * Stop the UVD block, mark ring as not ready any more
203 */
204 static int uvd_v5_0_hw_fini(void *handle)
205 {
206 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
207 struct amdgpu_ring *ring = &adev->uvd.ring;
208
209 if (RREG32(mmUVD_STATUS) != 0)
210 uvd_v5_0_stop(adev);
211
212 ring->ready = false;
213
214 return 0;
215 }
216
217 static int uvd_v5_0_suspend(void *handle)
218 {
219 int r;
220 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
221
222 r = uvd_v5_0_hw_fini(adev);
223 if (r)
224 return r;
225 uvd_v5_0_set_clockgating_state(adev, AMD_CG_STATE_GATE);
226
227 return amdgpu_uvd_suspend(adev);
228 }
229
230 static int uvd_v5_0_resume(void *handle)
231 {
232 int r;
233 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
234
235 r = amdgpu_uvd_resume(adev);
236 if (r)
237 return r;
238
239 return uvd_v5_0_hw_init(adev);
240 }
241
242 /**
243 * uvd_v5_0_mc_resume - memory controller programming
244 *
245 * @adev: amdgpu_device pointer
246 *
247 * Let the UVD memory controller know it's offsets
248 */
249 static void uvd_v5_0_mc_resume(struct amdgpu_device *adev)
250 {
251 uint64_t offset;
252 uint32_t size;
253
254 /* programm memory controller bits 0-27 */
255 WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
256 lower_32_bits(adev->uvd.gpu_addr));
257 WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
258 upper_32_bits(adev->uvd.gpu_addr));
259
260 offset = AMDGPU_UVD_FIRMWARE_OFFSET;
261 size = AMDGPU_GPU_PAGE_ALIGN(adev->uvd.fw->size + 4);
262 WREG32(mmUVD_VCPU_CACHE_OFFSET0, offset >> 3);
263 WREG32(mmUVD_VCPU_CACHE_SIZE0, size);
264
265 offset += size;
266 size = AMDGPU_UVD_HEAP_SIZE;
267 WREG32(mmUVD_VCPU_CACHE_OFFSET1, offset >> 3);
268 WREG32(mmUVD_VCPU_CACHE_SIZE1, size);
269
270 offset += size;
271 size = AMDGPU_UVD_STACK_SIZE +
272 (AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles);
273 WREG32(mmUVD_VCPU_CACHE_OFFSET2, offset >> 3);
274 WREG32(mmUVD_VCPU_CACHE_SIZE2, size);
275
276 WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
277 WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
278 WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
279 }
280
281 /**
282 * uvd_v5_0_start - start UVD block
283 *
284 * @adev: amdgpu_device pointer
285 *
286 * Setup and start the UVD block
287 */
288 static int uvd_v5_0_start(struct amdgpu_device *adev)
289 {
290 struct amdgpu_ring *ring = &adev->uvd.ring;
291 uint32_t rb_bufsz, tmp;
292 uint32_t lmi_swap_cntl;
293 uint32_t mp_swap_cntl;
294 int i, j, r;
295
296 /*disable DPG */
297 WREG32_P(mmUVD_POWER_STATUS, 0, ~(1 << 2));
298
299 /* disable byte swapping */
300 lmi_swap_cntl = 0;
301 mp_swap_cntl = 0;
302
303 uvd_v5_0_mc_resume(adev);
304
305 /* disable interupt */
306 WREG32_P(mmUVD_MASTINT_EN, 0, ~(1 << 1));
307
308 /* stall UMC and register bus before resetting VCPU */
309 WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
310 mdelay(1);
311
312 /* put LMI, VCPU, RBC etc... into reset */
313 WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
314 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK | UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
315 UVD_SOFT_RESET__RBC_SOFT_RESET_MASK | UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
316 UVD_SOFT_RESET__CXW_SOFT_RESET_MASK | UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
317 UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
318 mdelay(5);
319
320 /* take UVD block out of reset */
321 WREG32_P(mmSRBM_SOFT_RESET, 0, ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK);
322 mdelay(5);
323
324 /* initialize UVD memory controller */
325 WREG32(mmUVD_LMI_CTRL, 0x40 | (1 << 8) | (1 << 13) |
326 (1 << 21) | (1 << 9) | (1 << 20));
327
328 #ifdef __BIG_ENDIAN
329 /* swap (8 in 32) RB and IB */
330 lmi_swap_cntl = 0xa;
331 mp_swap_cntl = 0;
332 #endif
333 WREG32(mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
334 WREG32(mmUVD_MP_SWAP_CNTL, mp_swap_cntl);
335
336 WREG32(mmUVD_MPC_SET_MUXA0, 0x40c2040);
337 WREG32(mmUVD_MPC_SET_MUXA1, 0x0);
338 WREG32(mmUVD_MPC_SET_MUXB0, 0x40c2040);
339 WREG32(mmUVD_MPC_SET_MUXB1, 0x0);
340 WREG32(mmUVD_MPC_SET_ALU, 0);
341 WREG32(mmUVD_MPC_SET_MUX, 0x88);
342
343 /* take all subblocks out of reset, except VCPU */
344 WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
345 mdelay(5);
346
347 /* enable VCPU clock */
348 WREG32(mmUVD_VCPU_CNTL, 1 << 9);
349
350 /* enable UMC */
351 WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
352
353 /* boot up the VCPU */
354 WREG32(mmUVD_SOFT_RESET, 0);
355 mdelay(10);
356
357 for (i = 0; i < 10; ++i) {
358 uint32_t status;
359 for (j = 0; j < 100; ++j) {
360 status = RREG32(mmUVD_STATUS);
361 if (status & 2)
362 break;
363 mdelay(10);
364 }
365 r = 0;
366 if (status & 2)
367 break;
368
369 DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n");
370 WREG32_P(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
371 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
372 mdelay(10);
373 WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
374 mdelay(10);
375 r = -1;
376 }
377
378 if (r) {
379 DRM_ERROR("UVD not responding, giving up!!!\n");
380 return r;
381 }
382 /* enable master interrupt */
383 WREG32_P(mmUVD_MASTINT_EN, 3 << 1, ~(3 << 1));
384
385 /* clear the bit 4 of UVD_STATUS */
386 WREG32_P(mmUVD_STATUS, 0, ~(2 << 1));
387
388 rb_bufsz = order_base_2(ring->ring_size);
389 tmp = 0;
390 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
391 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
392 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
393 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0);
394 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
395 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
396 /* force RBC into idle state */
397 WREG32(mmUVD_RBC_RB_CNTL, tmp);
398
399 /* set the write pointer delay */
400 WREG32(mmUVD_RBC_RB_WPTR_CNTL, 0);
401
402 /* set the wb address */
403 WREG32(mmUVD_RBC_RB_RPTR_ADDR, (upper_32_bits(ring->gpu_addr) >> 2));
404
405 /* programm the RB_BASE for ring buffer */
406 WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
407 lower_32_bits(ring->gpu_addr));
408 WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
409 upper_32_bits(ring->gpu_addr));
410
411 /* Initialize the ring buffer's read and write pointers */
412 WREG32(mmUVD_RBC_RB_RPTR, 0);
413
414 ring->wptr = RREG32(mmUVD_RBC_RB_RPTR);
415 WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
416
417 WREG32_P(mmUVD_RBC_RB_CNTL, 0, ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK);
418
419 return 0;
420 }
421
422 /**
423 * uvd_v5_0_stop - stop UVD block
424 *
425 * @adev: amdgpu_device pointer
426 *
427 * stop the UVD block
428 */
429 static void uvd_v5_0_stop(struct amdgpu_device *adev)
430 {
431 /* force RBC into idle state */
432 WREG32(mmUVD_RBC_RB_CNTL, 0x11010101);
433
434 /* Stall UMC and register bus before resetting VCPU */
435 WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
436 mdelay(1);
437
438 /* put VCPU into reset */
439 WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
440 mdelay(5);
441
442 /* disable VCPU clock */
443 WREG32(mmUVD_VCPU_CNTL, 0x0);
444
445 /* Unstall UMC and register bus */
446 WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
447
448 WREG32(mmUVD_STATUS, 0);
449 }
450
451 /**
452 * uvd_v5_0_ring_emit_fence - emit an fence & trap command
453 *
454 * @ring: amdgpu_ring pointer
455 * @fence: fence to emit
456 *
457 * Write a fence and a trap command to the ring.
458 */
459 static void uvd_v5_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
460 unsigned flags)
461 {
462 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
463
464 amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
465 amdgpu_ring_write(ring, seq);
466 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
467 amdgpu_ring_write(ring, addr & 0xffffffff);
468 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
469 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
470 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
471 amdgpu_ring_write(ring, 0);
472
473 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
474 amdgpu_ring_write(ring, 0);
475 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
476 amdgpu_ring_write(ring, 0);
477 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
478 amdgpu_ring_write(ring, 2);
479 }
480
481 /**
482 * uvd_v5_0_ring_emit_hdp_flush - emit an hdp flush
483 *
484 * @ring: amdgpu_ring pointer
485 *
486 * Emits an hdp flush.
487 */
488 static void uvd_v5_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
489 {
490 amdgpu_ring_write(ring, PACKET0(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0));
491 amdgpu_ring_write(ring, 0);
492 }
493
494 /**
495 * uvd_v5_0_ring_hdp_invalidate - emit an hdp invalidate
496 *
497 * @ring: amdgpu_ring pointer
498 *
499 * Emits an hdp invalidate.
500 */
501 static void uvd_v5_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
502 {
503 amdgpu_ring_write(ring, PACKET0(mmHDP_DEBUG0, 0));
504 amdgpu_ring_write(ring, 1);
505 }
506
507 /**
508 * uvd_v5_0_ring_test_ring - register write test
509 *
510 * @ring: amdgpu_ring pointer
511 *
512 * Test if we can successfully write to the context register
513 */
514 static int uvd_v5_0_ring_test_ring(struct amdgpu_ring *ring)
515 {
516 struct amdgpu_device *adev = ring->adev;
517 uint32_t tmp = 0;
518 unsigned i;
519 int r;
520
521 WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD);
522 r = amdgpu_ring_alloc(ring, 3);
523 if (r) {
524 DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
525 ring->idx, r);
526 return r;
527 }
528 amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
529 amdgpu_ring_write(ring, 0xDEADBEEF);
530 amdgpu_ring_commit(ring);
531 for (i = 0; i < adev->usec_timeout; i++) {
532 tmp = RREG32(mmUVD_CONTEXT_ID);
533 if (tmp == 0xDEADBEEF)
534 break;
535 DRM_UDELAY(1);
536 }
537
538 if (i < adev->usec_timeout) {
539 DRM_INFO("ring test on %d succeeded in %d usecs\n",
540 ring->idx, i);
541 } else {
542 DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
543 ring->idx, tmp);
544 r = -EINVAL;
545 }
546 return r;
547 }
548
549 /**
550 * uvd_v5_0_ring_emit_ib - execute indirect buffer
551 *
552 * @ring: amdgpu_ring pointer
553 * @ib: indirect buffer to execute
554 *
555 * Write ring commands to execute the indirect buffer
556 */
557 static void uvd_v5_0_ring_emit_ib(struct amdgpu_ring *ring,
558 struct amdgpu_ib *ib,
559 unsigned vm_id, bool ctx_switch)
560 {
561 amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0));
562 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
563 amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH, 0));
564 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
565 amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_SIZE, 0));
566 amdgpu_ring_write(ring, ib->length_dw);
567 }
568
569 static bool uvd_v5_0_is_idle(void *handle)
570 {
571 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
572
573 return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK);
574 }
575
576 static int uvd_v5_0_wait_for_idle(void *handle)
577 {
578 unsigned i;
579 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
580
581 for (i = 0; i < adev->usec_timeout; i++) {
582 if (!(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK))
583 return 0;
584 }
585 return -ETIMEDOUT;
586 }
587
588 static int uvd_v5_0_soft_reset(void *handle)
589 {
590 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
591
592 uvd_v5_0_stop(adev);
593
594 WREG32_P(mmSRBM_SOFT_RESET, SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK,
595 ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK);
596 mdelay(5);
597
598 return uvd_v5_0_start(adev);
599 }
600
601 static int uvd_v5_0_set_interrupt_state(struct amdgpu_device *adev,
602 struct amdgpu_irq_src *source,
603 unsigned type,
604 enum amdgpu_interrupt_state state)
605 {
606 // TODO
607 return 0;
608 }
609
610 static int uvd_v5_0_process_interrupt(struct amdgpu_device *adev,
611 struct amdgpu_irq_src *source,
612 struct amdgpu_iv_entry *entry)
613 {
614 DRM_DEBUG("IH: UVD TRAP\n");
615 amdgpu_fence_process(&adev->uvd.ring);
616 return 0;
617 }
618
619 static void uvd_v5_0_enable_clock_gating(struct amdgpu_device *adev, bool enable)
620 {
621 uint32_t data1, data3, suvd_flags;
622
623 data1 = RREG32(mmUVD_SUVD_CGC_GATE);
624 data3 = RREG32(mmUVD_CGC_GATE);
625
626 suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
627 UVD_SUVD_CGC_GATE__SIT_MASK |
628 UVD_SUVD_CGC_GATE__SMP_MASK |
629 UVD_SUVD_CGC_GATE__SCM_MASK |
630 UVD_SUVD_CGC_GATE__SDB_MASK;
631
632 if (enable) {
633 data3 |= (UVD_CGC_GATE__SYS_MASK |
634 UVD_CGC_GATE__UDEC_MASK |
635 UVD_CGC_GATE__MPEG2_MASK |
636 UVD_CGC_GATE__RBC_MASK |
637 UVD_CGC_GATE__LMI_MC_MASK |
638 UVD_CGC_GATE__IDCT_MASK |
639 UVD_CGC_GATE__MPRD_MASK |
640 UVD_CGC_GATE__MPC_MASK |
641 UVD_CGC_GATE__LBSI_MASK |
642 UVD_CGC_GATE__LRBBM_MASK |
643 UVD_CGC_GATE__UDEC_RE_MASK |
644 UVD_CGC_GATE__UDEC_CM_MASK |
645 UVD_CGC_GATE__UDEC_IT_MASK |
646 UVD_CGC_GATE__UDEC_DB_MASK |
647 UVD_CGC_GATE__UDEC_MP_MASK |
648 UVD_CGC_GATE__WCB_MASK |
649 UVD_CGC_GATE__JPEG_MASK |
650 UVD_CGC_GATE__SCPU_MASK);
651 /* only in pg enabled, we can gate clock to vcpu*/
652 if (adev->pg_flags & AMD_PG_SUPPORT_UVD)
653 data3 |= UVD_CGC_GATE__VCPU_MASK;
654 data3 &= ~UVD_CGC_GATE__REGS_MASK;
655 data1 |= suvd_flags;
656 } else {
657 data3 = 0;
658 data1 = 0;
659 }
660
661 WREG32(mmUVD_SUVD_CGC_GATE, data1);
662 WREG32(mmUVD_CGC_GATE, data3);
663 }
664
665 static void uvd_v5_0_set_sw_clock_gating(struct amdgpu_device *adev)
666 {
667 uint32_t data, data2;
668
669 data = RREG32(mmUVD_CGC_CTRL);
670 data2 = RREG32(mmUVD_SUVD_CGC_CTRL);
671
672
673 data &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK |
674 UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK);
675
676
677 data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK |
678 (1 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_GATE_DLY_TIMER)) |
679 (4 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_OFF_DELAY));
680
681 data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
682 UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
683 UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
684 UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
685 UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
686 UVD_CGC_CTRL__SYS_MODE_MASK |
687 UVD_CGC_CTRL__UDEC_MODE_MASK |
688 UVD_CGC_CTRL__MPEG2_MODE_MASK |
689 UVD_CGC_CTRL__REGS_MODE_MASK |
690 UVD_CGC_CTRL__RBC_MODE_MASK |
691 UVD_CGC_CTRL__LMI_MC_MODE_MASK |
692 UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
693 UVD_CGC_CTRL__IDCT_MODE_MASK |
694 UVD_CGC_CTRL__MPRD_MODE_MASK |
695 UVD_CGC_CTRL__MPC_MODE_MASK |
696 UVD_CGC_CTRL__LBSI_MODE_MASK |
697 UVD_CGC_CTRL__LRBBM_MODE_MASK |
698 UVD_CGC_CTRL__WCB_MODE_MASK |
699 UVD_CGC_CTRL__VCPU_MODE_MASK |
700 UVD_CGC_CTRL__JPEG_MODE_MASK |
701 UVD_CGC_CTRL__SCPU_MODE_MASK);
702 data2 &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK |
703 UVD_SUVD_CGC_CTRL__SIT_MODE_MASK |
704 UVD_SUVD_CGC_CTRL__SMP_MODE_MASK |
705 UVD_SUVD_CGC_CTRL__SCM_MODE_MASK |
706 UVD_SUVD_CGC_CTRL__SDB_MODE_MASK);
707
708 WREG32(mmUVD_CGC_CTRL, data);
709 WREG32(mmUVD_SUVD_CGC_CTRL, data2);
710 }
711
712 #if 0
713 static void uvd_v5_0_set_hw_clock_gating(struct amdgpu_device *adev)
714 {
715 uint32_t data, data1, cgc_flags, suvd_flags;
716
717 data = RREG32(mmUVD_CGC_GATE);
718 data1 = RREG32(mmUVD_SUVD_CGC_GATE);
719
720 cgc_flags = UVD_CGC_GATE__SYS_MASK |
721 UVD_CGC_GATE__UDEC_MASK |
722 UVD_CGC_GATE__MPEG2_MASK |
723 UVD_CGC_GATE__RBC_MASK |
724 UVD_CGC_GATE__LMI_MC_MASK |
725 UVD_CGC_GATE__IDCT_MASK |
726 UVD_CGC_GATE__MPRD_MASK |
727 UVD_CGC_GATE__MPC_MASK |
728 UVD_CGC_GATE__LBSI_MASK |
729 UVD_CGC_GATE__LRBBM_MASK |
730 UVD_CGC_GATE__UDEC_RE_MASK |
731 UVD_CGC_GATE__UDEC_CM_MASK |
732 UVD_CGC_GATE__UDEC_IT_MASK |
733 UVD_CGC_GATE__UDEC_DB_MASK |
734 UVD_CGC_GATE__UDEC_MP_MASK |
735 UVD_CGC_GATE__WCB_MASK |
736 UVD_CGC_GATE__VCPU_MASK |
737 UVD_CGC_GATE__SCPU_MASK;
738
739 suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
740 UVD_SUVD_CGC_GATE__SIT_MASK |
741 UVD_SUVD_CGC_GATE__SMP_MASK |
742 UVD_SUVD_CGC_GATE__SCM_MASK |
743 UVD_SUVD_CGC_GATE__SDB_MASK;
744
745 data |= cgc_flags;
746 data1 |= suvd_flags;
747
748 WREG32(mmUVD_CGC_GATE, data);
749 WREG32(mmUVD_SUVD_CGC_GATE, data1);
750 }
751 #endif
752
753 static void uvd_v5_0_enable_mgcg(struct amdgpu_device *adev,
754 bool enable)
755 {
756 u32 orig, data;
757
758 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) {
759 data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
760 data |= 0xfff;
761 WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
762
763 orig = data = RREG32(mmUVD_CGC_CTRL);
764 data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
765 if (orig != data)
766 WREG32(mmUVD_CGC_CTRL, data);
767 } else {
768 data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
769 data &= ~0xfff;
770 WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
771
772 orig = data = RREG32(mmUVD_CGC_CTRL);
773 data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
774 if (orig != data)
775 WREG32(mmUVD_CGC_CTRL, data);
776 }
777 }
778
779 static int uvd_v5_0_set_clockgating_state(void *handle,
780 enum amd_clockgating_state state)
781 {
782 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
783 bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
784
785 if (enable) {
786 /* wait for STATUS to clear */
787 if (uvd_v5_0_wait_for_idle(handle))
788 return -EBUSY;
789 uvd_v5_0_enable_clock_gating(adev, true);
790
791 /* enable HW gates because UVD is idle */
792 /* uvd_v5_0_set_hw_clock_gating(adev); */
793 } else {
794 uvd_v5_0_enable_clock_gating(adev, false);
795 }
796
797 uvd_v5_0_set_sw_clock_gating(adev);
798 return 0;
799 }
800
801 static int uvd_v5_0_set_powergating_state(void *handle,
802 enum amd_powergating_state state)
803 {
804 /* This doesn't actually powergate the UVD block.
805 * That's done in the dpm code via the SMC. This
806 * just re-inits the block as necessary. The actual
807 * gating still happens in the dpm code. We should
808 * revisit this when there is a cleaner line between
809 * the smc and the hw blocks
810 */
811 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
812 int ret = 0;
813
814 if (state == AMD_PG_STATE_GATE) {
815 uvd_v5_0_stop(adev);
816 } else {
817 ret = uvd_v5_0_start(adev);
818 if (ret)
819 goto out;
820 }
821
822 out:
823 return ret;
824 }
825
826 static void uvd_v5_0_get_clockgating_state(void *handle, u32 *flags)
827 {
828 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
829 int data;
830
831 mutex_lock(&adev->pm.mutex);
832
833 if (RREG32_SMC(ixCURRENT_PG_STATUS) &
834 CURRENT_PG_STATUS__UVD_PG_STATUS_MASK) {
835 DRM_INFO("Cannot get clockgating state when UVD is powergated.\n");
836 goto out;
837 }
838
839 /* AMD_CG_SUPPORT_UVD_MGCG */
840 data = RREG32(mmUVD_CGC_CTRL);
841 if (data & UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK)
842 *flags |= AMD_CG_SUPPORT_UVD_MGCG;
843
844 out:
845 mutex_unlock(&adev->pm.mutex);
846 }
847
848 static const struct amd_ip_funcs uvd_v5_0_ip_funcs = {
849 .name = "uvd_v5_0",
850 .early_init = uvd_v5_0_early_init,
851 .late_init = NULL,
852 .sw_init = uvd_v5_0_sw_init,
853 .sw_fini = uvd_v5_0_sw_fini,
854 .hw_init = uvd_v5_0_hw_init,
855 .hw_fini = uvd_v5_0_hw_fini,
856 .suspend = uvd_v5_0_suspend,
857 .resume = uvd_v5_0_resume,
858 .is_idle = uvd_v5_0_is_idle,
859 .wait_for_idle = uvd_v5_0_wait_for_idle,
860 .soft_reset = uvd_v5_0_soft_reset,
861 .set_clockgating_state = uvd_v5_0_set_clockgating_state,
862 .set_powergating_state = uvd_v5_0_set_powergating_state,
863 .get_clockgating_state = uvd_v5_0_get_clockgating_state,
864 };
865
866 static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = {
867 .type = AMDGPU_RING_TYPE_UVD,
868 .align_mask = 0xf,
869 .nop = PACKET0(mmUVD_NO_OP, 0),
870 .support_64bit_ptrs = false,
871 .get_rptr = uvd_v5_0_ring_get_rptr,
872 .get_wptr = uvd_v5_0_ring_get_wptr,
873 .set_wptr = uvd_v5_0_ring_set_wptr,
874 .parse_cs = amdgpu_uvd_ring_parse_cs,
875 .emit_frame_size =
876 2 + /* uvd_v5_0_ring_emit_hdp_flush */
877 2 + /* uvd_v5_0_ring_emit_hdp_invalidate */
878 14, /* uvd_v5_0_ring_emit_fence x1 no user fence */
879 .emit_ib_size = 6, /* uvd_v5_0_ring_emit_ib */
880 .emit_ib = uvd_v5_0_ring_emit_ib,
881 .emit_fence = uvd_v5_0_ring_emit_fence,
882 .emit_hdp_flush = uvd_v5_0_ring_emit_hdp_flush,
883 .emit_hdp_invalidate = uvd_v5_0_ring_emit_hdp_invalidate,
884 .test_ring = uvd_v5_0_ring_test_ring,
885 .test_ib = amdgpu_uvd_ring_test_ib,
886 .insert_nop = amdgpu_ring_insert_nop,
887 .pad_ib = amdgpu_ring_generic_pad_ib,
888 .begin_use = amdgpu_uvd_ring_begin_use,
889 .end_use = amdgpu_uvd_ring_end_use,
890 };
891
892 static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev)
893 {
894 adev->uvd.ring.funcs = &uvd_v5_0_ring_funcs;
895 }
896
897 static const struct amdgpu_irq_src_funcs uvd_v5_0_irq_funcs = {
898 .set = uvd_v5_0_set_interrupt_state,
899 .process = uvd_v5_0_process_interrupt,
900 };
901
902 static void uvd_v5_0_set_irq_funcs(struct amdgpu_device *adev)
903 {
904 adev->uvd.irq.num_types = 1;
905 adev->uvd.irq.funcs = &uvd_v5_0_irq_funcs;
906 }
907
908 const struct amdgpu_ip_block_version uvd_v5_0_ip_block =
909 {
910 .type = AMD_IP_BLOCK_TYPE_UVD,
911 .major = 5,
912 .minor = 0,
913 .rev = 0,
914 .funcs = &uvd_v5_0_ip_funcs,
915 };