2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/slab.h>
26 #include "amdgpu_atombios.h"
27 #include "amdgpu_ih.h"
28 #include "amdgpu_uvd.h"
29 #include "amdgpu_vce.h"
30 #include "amdgpu_ucode.h"
34 #include "gmc/gmc_8_1_d.h"
35 #include "gmc/gmc_8_1_sh_mask.h"
37 #include "oss/oss_3_0_d.h"
38 #include "oss/oss_3_0_sh_mask.h"
40 #include "bif/bif_5_0_d.h"
41 #include "bif/bif_5_0_sh_mask.h"
43 #include "gca/gfx_8_0_d.h"
44 #include "gca/gfx_8_0_sh_mask.h"
46 #include "smu/smu_7_1_1_d.h"
47 #include "smu/smu_7_1_1_sh_mask.h"
49 #include "uvd/uvd_5_0_d.h"
50 #include "uvd/uvd_5_0_sh_mask.h"
52 #include "vce/vce_3_0_d.h"
53 #include "vce/vce_3_0_sh_mask.h"
55 #include "dce/dce_10_0_d.h"
56 #include "dce/dce_10_0_sh_mask.h"
64 #include "sdma_v2_4.h"
65 #include "sdma_v3_0.h"
66 #include "dce_v10_0.h"
67 #include "dce_v11_0.h"
68 #include "iceland_ih.h"
74 #include "amdgpu_powerplay.h"
75 #if defined(CONFIG_DRM_AMD_ACP)
76 #include "amdgpu_acp.h"
78 #include "dce_virtual.h"
81 * Indirect registers accessor
83 static u32
vi_pcie_rreg(struct amdgpu_device
*adev
, u32 reg
)
88 spin_lock_irqsave(&adev
->pcie_idx_lock
, flags
);
89 WREG32(mmPCIE_INDEX
, reg
);
90 (void)RREG32(mmPCIE_INDEX
);
91 r
= RREG32(mmPCIE_DATA
);
92 spin_unlock_irqrestore(&adev
->pcie_idx_lock
, flags
);
96 static void vi_pcie_wreg(struct amdgpu_device
*adev
, u32 reg
, u32 v
)
100 spin_lock_irqsave(&adev
->pcie_idx_lock
, flags
);
101 WREG32(mmPCIE_INDEX
, reg
);
102 (void)RREG32(mmPCIE_INDEX
);
103 WREG32(mmPCIE_DATA
, v
);
104 (void)RREG32(mmPCIE_DATA
);
105 spin_unlock_irqrestore(&adev
->pcie_idx_lock
, flags
);
108 static u32
vi_smc_rreg(struct amdgpu_device
*adev
, u32 reg
)
113 spin_lock_irqsave(&adev
->smc_idx_lock
, flags
);
114 WREG32(mmSMC_IND_INDEX_11
, (reg
));
115 r
= RREG32(mmSMC_IND_DATA_11
);
116 spin_unlock_irqrestore(&adev
->smc_idx_lock
, flags
);
120 static void vi_smc_wreg(struct amdgpu_device
*adev
, u32 reg
, u32 v
)
124 spin_lock_irqsave(&adev
->smc_idx_lock
, flags
);
125 WREG32(mmSMC_IND_INDEX_11
, (reg
));
126 WREG32(mmSMC_IND_DATA_11
, (v
));
127 spin_unlock_irqrestore(&adev
->smc_idx_lock
, flags
);
131 #define mmMP0PUB_IND_INDEX 0x180
132 #define mmMP0PUB_IND_DATA 0x181
134 static u32
cz_smc_rreg(struct amdgpu_device
*adev
, u32 reg
)
139 spin_lock_irqsave(&adev
->smc_idx_lock
, flags
);
140 WREG32(mmMP0PUB_IND_INDEX
, (reg
));
141 r
= RREG32(mmMP0PUB_IND_DATA
);
142 spin_unlock_irqrestore(&adev
->smc_idx_lock
, flags
);
146 static void cz_smc_wreg(struct amdgpu_device
*adev
, u32 reg
, u32 v
)
150 spin_lock_irqsave(&adev
->smc_idx_lock
, flags
);
151 WREG32(mmMP0PUB_IND_INDEX
, (reg
));
152 WREG32(mmMP0PUB_IND_DATA
, (v
));
153 spin_unlock_irqrestore(&adev
->smc_idx_lock
, flags
);
156 static u32
vi_uvd_ctx_rreg(struct amdgpu_device
*adev
, u32 reg
)
161 spin_lock_irqsave(&adev
->uvd_ctx_idx_lock
, flags
);
162 WREG32(mmUVD_CTX_INDEX
, ((reg
) & 0x1ff));
163 r
= RREG32(mmUVD_CTX_DATA
);
164 spin_unlock_irqrestore(&adev
->uvd_ctx_idx_lock
, flags
);
168 static void vi_uvd_ctx_wreg(struct amdgpu_device
*adev
, u32 reg
, u32 v
)
172 spin_lock_irqsave(&adev
->uvd_ctx_idx_lock
, flags
);
173 WREG32(mmUVD_CTX_INDEX
, ((reg
) & 0x1ff));
174 WREG32(mmUVD_CTX_DATA
, (v
));
175 spin_unlock_irqrestore(&adev
->uvd_ctx_idx_lock
, flags
);
178 static u32
vi_didt_rreg(struct amdgpu_device
*adev
, u32 reg
)
183 spin_lock_irqsave(&adev
->didt_idx_lock
, flags
);
184 WREG32(mmDIDT_IND_INDEX
, (reg
));
185 r
= RREG32(mmDIDT_IND_DATA
);
186 spin_unlock_irqrestore(&adev
->didt_idx_lock
, flags
);
190 static void vi_didt_wreg(struct amdgpu_device
*adev
, u32 reg
, u32 v
)
194 spin_lock_irqsave(&adev
->didt_idx_lock
, flags
);
195 WREG32(mmDIDT_IND_INDEX
, (reg
));
196 WREG32(mmDIDT_IND_DATA
, (v
));
197 spin_unlock_irqrestore(&adev
->didt_idx_lock
, flags
);
200 static u32
vi_gc_cac_rreg(struct amdgpu_device
*adev
, u32 reg
)
205 spin_lock_irqsave(&adev
->gc_cac_idx_lock
, flags
);
206 WREG32(mmGC_CAC_IND_INDEX
, (reg
));
207 r
= RREG32(mmGC_CAC_IND_DATA
);
208 spin_unlock_irqrestore(&adev
->gc_cac_idx_lock
, flags
);
212 static void vi_gc_cac_wreg(struct amdgpu_device
*adev
, u32 reg
, u32 v
)
216 spin_lock_irqsave(&adev
->gc_cac_idx_lock
, flags
);
217 WREG32(mmGC_CAC_IND_INDEX
, (reg
));
218 WREG32(mmGC_CAC_IND_DATA
, (v
));
219 spin_unlock_irqrestore(&adev
->gc_cac_idx_lock
, flags
);
223 static const u32 tonga_mgcg_cgcg_init
[] =
225 mmCGTT_DRM_CLK_CTRL0
, 0xffffffff, 0x00600100,
226 mmPCIE_INDEX
, 0xffffffff, 0x0140001c,
227 mmPCIE_DATA
, 0x000f0000, 0x00000000,
228 mmSMC_IND_INDEX_4
, 0xffffffff, 0xC060000C,
229 mmSMC_IND_DATA_4
, 0xc0000fff, 0x00000100,
230 mmCGTT_DRM_CLK_CTRL0
, 0xff000fff, 0x00000100,
231 mmHDP_XDP_CGTT_BLK_CTRL
, 0xc0000fff, 0x00000104,
234 static const u32 fiji_mgcg_cgcg_init
[] =
236 mmCGTT_DRM_CLK_CTRL0
, 0xffffffff, 0x00600100,
237 mmPCIE_INDEX
, 0xffffffff, 0x0140001c,
238 mmPCIE_DATA
, 0x000f0000, 0x00000000,
239 mmSMC_IND_INDEX_4
, 0xffffffff, 0xC060000C,
240 mmSMC_IND_DATA_4
, 0xc0000fff, 0x00000100,
241 mmCGTT_DRM_CLK_CTRL0
, 0xff000fff, 0x00000100,
242 mmHDP_XDP_CGTT_BLK_CTRL
, 0xc0000fff, 0x00000104,
245 static const u32 iceland_mgcg_cgcg_init
[] =
247 mmPCIE_INDEX
, 0xffffffff, ixPCIE_CNTL2
,
248 mmPCIE_DATA
, 0x000f0000, 0x00000000,
249 mmSMC_IND_INDEX_4
, 0xffffffff, ixCGTT_ROM_CLK_CTRL0
,
250 mmSMC_IND_DATA_4
, 0xc0000fff, 0x00000100,
251 mmHDP_XDP_CGTT_BLK_CTRL
, 0xc0000fff, 0x00000104,
254 static const u32 cz_mgcg_cgcg_init
[] =
256 mmCGTT_DRM_CLK_CTRL0
, 0xffffffff, 0x00600100,
257 mmPCIE_INDEX
, 0xffffffff, 0x0140001c,
258 mmPCIE_DATA
, 0x000f0000, 0x00000000,
259 mmCGTT_DRM_CLK_CTRL0
, 0xff000fff, 0x00000100,
260 mmHDP_XDP_CGTT_BLK_CTRL
, 0xc0000fff, 0x00000104,
263 static const u32 stoney_mgcg_cgcg_init
[] =
265 mmCGTT_DRM_CLK_CTRL0
, 0xffffffff, 0x00000100,
266 mmHDP_XDP_CGTT_BLK_CTRL
, 0xffffffff, 0x00000104,
267 mmHDP_HOST_PATH_CNTL
, 0xffffffff, 0x0f000027,
270 static void vi_init_golden_registers(struct amdgpu_device
*adev
)
272 /* Some of the registers might be dependent on GRBM_GFX_INDEX */
273 mutex_lock(&adev
->grbm_idx_mutex
);
275 switch (adev
->asic_type
) {
277 amdgpu_program_register_sequence(adev
,
278 iceland_mgcg_cgcg_init
,
279 (const u32
)ARRAY_SIZE(iceland_mgcg_cgcg_init
));
282 amdgpu_program_register_sequence(adev
,
284 (const u32
)ARRAY_SIZE(fiji_mgcg_cgcg_init
));
287 amdgpu_program_register_sequence(adev
,
288 tonga_mgcg_cgcg_init
,
289 (const u32
)ARRAY_SIZE(tonga_mgcg_cgcg_init
));
292 amdgpu_program_register_sequence(adev
,
294 (const u32
)ARRAY_SIZE(cz_mgcg_cgcg_init
));
297 amdgpu_program_register_sequence(adev
,
298 stoney_mgcg_cgcg_init
,
299 (const u32
)ARRAY_SIZE(stoney_mgcg_cgcg_init
));
307 mutex_unlock(&adev
->grbm_idx_mutex
);
311 * vi_get_xclk - get the xclk
313 * @adev: amdgpu_device pointer
315 * Returns the reference clock used by the gfx engine
318 static u32
vi_get_xclk(struct amdgpu_device
*adev
)
320 u32 reference_clock
= adev
->clock
.spll
.reference_freq
;
323 if (adev
->flags
& AMD_IS_APU
)
324 return reference_clock
;
326 tmp
= RREG32_SMC(ixCG_CLKPIN_CNTL_2
);
327 if (REG_GET_FIELD(tmp
, CG_CLKPIN_CNTL_2
, MUX_TCLK_TO_XCLK
))
330 tmp
= RREG32_SMC(ixCG_CLKPIN_CNTL
);
331 if (REG_GET_FIELD(tmp
, CG_CLKPIN_CNTL
, XTALIN_DIVIDE
))
332 return reference_clock
/ 4;
334 return reference_clock
;
338 * vi_srbm_select - select specific register instances
340 * @adev: amdgpu_device pointer
341 * @me: selected ME (micro engine)
346 * Switches the currently active registers instances. Some
347 * registers are instanced per VMID, others are instanced per
348 * me/pipe/queue combination.
350 void vi_srbm_select(struct amdgpu_device
*adev
,
351 u32 me
, u32 pipe
, u32 queue
, u32 vmid
)
353 u32 srbm_gfx_cntl
= 0;
354 srbm_gfx_cntl
= REG_SET_FIELD(srbm_gfx_cntl
, SRBM_GFX_CNTL
, PIPEID
, pipe
);
355 srbm_gfx_cntl
= REG_SET_FIELD(srbm_gfx_cntl
, SRBM_GFX_CNTL
, MEID
, me
);
356 srbm_gfx_cntl
= REG_SET_FIELD(srbm_gfx_cntl
, SRBM_GFX_CNTL
, VMID
, vmid
);
357 srbm_gfx_cntl
= REG_SET_FIELD(srbm_gfx_cntl
, SRBM_GFX_CNTL
, QUEUEID
, queue
);
358 WREG32(mmSRBM_GFX_CNTL
, srbm_gfx_cntl
);
361 static void vi_vga_set_state(struct amdgpu_device
*adev
, bool state
)
366 static bool vi_read_disabled_bios(struct amdgpu_device
*adev
)
369 u32 d1vga_control
= 0;
370 u32 d2vga_control
= 0;
371 u32 vga_render_control
= 0;
375 bus_cntl
= RREG32(mmBUS_CNTL
);
376 if (adev
->mode_info
.num_crtc
) {
377 d1vga_control
= RREG32(mmD1VGA_CONTROL
);
378 d2vga_control
= RREG32(mmD2VGA_CONTROL
);
379 vga_render_control
= RREG32(mmVGA_RENDER_CONTROL
);
381 rom_cntl
= RREG32_SMC(ixROM_CNTL
);
384 WREG32(mmBUS_CNTL
, (bus_cntl
& ~BUS_CNTL__BIOS_ROM_DIS_MASK
));
385 if (adev
->mode_info
.num_crtc
) {
386 /* Disable VGA mode */
387 WREG32(mmD1VGA_CONTROL
,
388 (d1vga_control
& ~(D1VGA_CONTROL__D1VGA_MODE_ENABLE_MASK
|
389 D1VGA_CONTROL__D1VGA_TIMING_SELECT_MASK
)));
390 WREG32(mmD2VGA_CONTROL
,
391 (d2vga_control
& ~(D2VGA_CONTROL__D2VGA_MODE_ENABLE_MASK
|
392 D2VGA_CONTROL__D2VGA_TIMING_SELECT_MASK
)));
393 WREG32(mmVGA_RENDER_CONTROL
,
394 (vga_render_control
& ~VGA_RENDER_CONTROL__VGA_VSTATUS_CNTL_MASK
));
396 WREG32_SMC(ixROM_CNTL
, rom_cntl
| ROM_CNTL__SCK_OVERWRITE_MASK
);
398 r
= amdgpu_read_bios(adev
);
401 WREG32(mmBUS_CNTL
, bus_cntl
);
402 if (adev
->mode_info
.num_crtc
) {
403 WREG32(mmD1VGA_CONTROL
, d1vga_control
);
404 WREG32(mmD2VGA_CONTROL
, d2vga_control
);
405 WREG32(mmVGA_RENDER_CONTROL
, vga_render_control
);
407 WREG32_SMC(ixROM_CNTL
, rom_cntl
);
411 static bool vi_read_bios_from_rom(struct amdgpu_device
*adev
,
412 u8
*bios
, u32 length_bytes
)
420 if (length_bytes
== 0)
422 /* APU vbios image is part of sbios image */
423 if (adev
->flags
& AMD_IS_APU
)
426 dw_ptr
= (u32
*)bios
;
427 length_dw
= ALIGN(length_bytes
, 4) / 4;
428 /* take the smc lock since we are using the smc index */
429 spin_lock_irqsave(&adev
->smc_idx_lock
, flags
);
430 /* set rom index to 0 */
431 WREG32(mmSMC_IND_INDEX_11
, ixROM_INDEX
);
432 WREG32(mmSMC_IND_DATA_11
, 0);
433 /* set index to data for continous read */
434 WREG32(mmSMC_IND_INDEX_11
, ixROM_DATA
);
435 for (i
= 0; i
< length_dw
; i
++)
436 dw_ptr
[i
] = RREG32(mmSMC_IND_DATA_11
);
437 spin_unlock_irqrestore(&adev
->smc_idx_lock
, flags
);
442 static void vi_detect_hw_virtualization(struct amdgpu_device
*adev
)
444 uint32_t reg
= RREG32(mmBIF_IOV_FUNC_IDENTIFIER
);
445 /* bit0: 0 means pf and 1 means vf */
446 /* bit31: 0 means disable IOV and 1 means enable */
448 adev
->virt
.caps
|= AMDGPU_SRIOV_CAPS_IS_VF
;
450 if (reg
& 0x80000000)
451 adev
->virt
.caps
|= AMDGPU_SRIOV_CAPS_ENABLE_IOV
;
454 if (is_virtual_machine()) /* passthrough mode exclus sr-iov mode */
455 adev
->virt
.caps
|= AMDGPU_PASSTHROUGH_MODE
;
459 static const struct amdgpu_allowed_register_entry tonga_allowed_read_registers
[] = {
460 {mmGB_MACROTILE_MODE7
, true},
463 static const struct amdgpu_allowed_register_entry cz_allowed_read_registers
[] = {
464 {mmGB_TILE_MODE7
, true},
465 {mmGB_TILE_MODE12
, true},
466 {mmGB_TILE_MODE17
, true},
467 {mmGB_TILE_MODE23
, true},
468 {mmGB_MACROTILE_MODE7
, true},
471 static const struct amdgpu_allowed_register_entry vi_allowed_read_registers
[] = {
472 {mmGRBM_STATUS
, false},
473 {mmGRBM_STATUS2
, false},
474 {mmGRBM_STATUS_SE0
, false},
475 {mmGRBM_STATUS_SE1
, false},
476 {mmGRBM_STATUS_SE2
, false},
477 {mmGRBM_STATUS_SE3
, false},
478 {mmSRBM_STATUS
, false},
479 {mmSRBM_STATUS2
, false},
480 {mmSRBM_STATUS3
, false},
481 {mmSDMA0_STATUS_REG
+ SDMA0_REGISTER_OFFSET
, false},
482 {mmSDMA0_STATUS_REG
+ SDMA1_REGISTER_OFFSET
, false},
484 {mmCP_STALLED_STAT1
, false},
485 {mmCP_STALLED_STAT2
, false},
486 {mmCP_STALLED_STAT3
, false},
487 {mmCP_CPF_BUSY_STAT
, false},
488 {mmCP_CPF_STALLED_STAT1
, false},
489 {mmCP_CPF_STATUS
, false},
490 {mmCP_CPC_BUSY_STAT
, false},
491 {mmCP_CPC_STALLED_STAT1
, false},
492 {mmCP_CPC_STATUS
, false},
493 {mmGB_ADDR_CONFIG
, false},
494 {mmMC_ARB_RAMCFG
, false},
495 {mmGB_TILE_MODE0
, false},
496 {mmGB_TILE_MODE1
, false},
497 {mmGB_TILE_MODE2
, false},
498 {mmGB_TILE_MODE3
, false},
499 {mmGB_TILE_MODE4
, false},
500 {mmGB_TILE_MODE5
, false},
501 {mmGB_TILE_MODE6
, false},
502 {mmGB_TILE_MODE7
, false},
503 {mmGB_TILE_MODE8
, false},
504 {mmGB_TILE_MODE9
, false},
505 {mmGB_TILE_MODE10
, false},
506 {mmGB_TILE_MODE11
, false},
507 {mmGB_TILE_MODE12
, false},
508 {mmGB_TILE_MODE13
, false},
509 {mmGB_TILE_MODE14
, false},
510 {mmGB_TILE_MODE15
, false},
511 {mmGB_TILE_MODE16
, false},
512 {mmGB_TILE_MODE17
, false},
513 {mmGB_TILE_MODE18
, false},
514 {mmGB_TILE_MODE19
, false},
515 {mmGB_TILE_MODE20
, false},
516 {mmGB_TILE_MODE21
, false},
517 {mmGB_TILE_MODE22
, false},
518 {mmGB_TILE_MODE23
, false},
519 {mmGB_TILE_MODE24
, false},
520 {mmGB_TILE_MODE25
, false},
521 {mmGB_TILE_MODE26
, false},
522 {mmGB_TILE_MODE27
, false},
523 {mmGB_TILE_MODE28
, false},
524 {mmGB_TILE_MODE29
, false},
525 {mmGB_TILE_MODE30
, false},
526 {mmGB_TILE_MODE31
, false},
527 {mmGB_MACROTILE_MODE0
, false},
528 {mmGB_MACROTILE_MODE1
, false},
529 {mmGB_MACROTILE_MODE2
, false},
530 {mmGB_MACROTILE_MODE3
, false},
531 {mmGB_MACROTILE_MODE4
, false},
532 {mmGB_MACROTILE_MODE5
, false},
533 {mmGB_MACROTILE_MODE6
, false},
534 {mmGB_MACROTILE_MODE7
, false},
535 {mmGB_MACROTILE_MODE8
, false},
536 {mmGB_MACROTILE_MODE9
, false},
537 {mmGB_MACROTILE_MODE10
, false},
538 {mmGB_MACROTILE_MODE11
, false},
539 {mmGB_MACROTILE_MODE12
, false},
540 {mmGB_MACROTILE_MODE13
, false},
541 {mmGB_MACROTILE_MODE14
, false},
542 {mmGB_MACROTILE_MODE15
, false},
543 {mmCC_RB_BACKEND_DISABLE
, false, true},
544 {mmGC_USER_RB_BACKEND_DISABLE
, false, true},
545 {mmGB_BACKEND_MAP
, false, false},
546 {mmPA_SC_RASTER_CONFIG
, false, true},
547 {mmPA_SC_RASTER_CONFIG_1
, false, true},
550 static uint32_t vi_get_register_value(struct amdgpu_device
*adev
,
551 bool indexed
, u32 se_num
,
552 u32 sh_num
, u32 reg_offset
)
556 unsigned se_idx
= (se_num
== 0xffffffff) ? 0 : se_num
;
557 unsigned sh_idx
= (sh_num
== 0xffffffff) ? 0 : sh_num
;
559 switch (reg_offset
) {
560 case mmCC_RB_BACKEND_DISABLE
:
561 return adev
->gfx
.config
.rb_config
[se_idx
][sh_idx
].rb_backend_disable
;
562 case mmGC_USER_RB_BACKEND_DISABLE
:
563 return adev
->gfx
.config
.rb_config
[se_idx
][sh_idx
].user_rb_backend_disable
;
564 case mmPA_SC_RASTER_CONFIG
:
565 return adev
->gfx
.config
.rb_config
[se_idx
][sh_idx
].raster_config
;
566 case mmPA_SC_RASTER_CONFIG_1
:
567 return adev
->gfx
.config
.rb_config
[se_idx
][sh_idx
].raster_config_1
;
570 mutex_lock(&adev
->grbm_idx_mutex
);
571 if (se_num
!= 0xffffffff || sh_num
!= 0xffffffff)
572 amdgpu_gfx_select_se_sh(adev
, se_num
, sh_num
, 0xffffffff);
574 val
= RREG32(reg_offset
);
576 if (se_num
!= 0xffffffff || sh_num
!= 0xffffffff)
577 amdgpu_gfx_select_se_sh(adev
, 0xffffffff, 0xffffffff, 0xffffffff);
578 mutex_unlock(&adev
->grbm_idx_mutex
);
583 switch (reg_offset
) {
584 case mmGB_ADDR_CONFIG
:
585 return adev
->gfx
.config
.gb_addr_config
;
586 case mmMC_ARB_RAMCFG
:
587 return adev
->gfx
.config
.mc_arb_ramcfg
;
588 case mmGB_TILE_MODE0
:
589 case mmGB_TILE_MODE1
:
590 case mmGB_TILE_MODE2
:
591 case mmGB_TILE_MODE3
:
592 case mmGB_TILE_MODE4
:
593 case mmGB_TILE_MODE5
:
594 case mmGB_TILE_MODE6
:
595 case mmGB_TILE_MODE7
:
596 case mmGB_TILE_MODE8
:
597 case mmGB_TILE_MODE9
:
598 case mmGB_TILE_MODE10
:
599 case mmGB_TILE_MODE11
:
600 case mmGB_TILE_MODE12
:
601 case mmGB_TILE_MODE13
:
602 case mmGB_TILE_MODE14
:
603 case mmGB_TILE_MODE15
:
604 case mmGB_TILE_MODE16
:
605 case mmGB_TILE_MODE17
:
606 case mmGB_TILE_MODE18
:
607 case mmGB_TILE_MODE19
:
608 case mmGB_TILE_MODE20
:
609 case mmGB_TILE_MODE21
:
610 case mmGB_TILE_MODE22
:
611 case mmGB_TILE_MODE23
:
612 case mmGB_TILE_MODE24
:
613 case mmGB_TILE_MODE25
:
614 case mmGB_TILE_MODE26
:
615 case mmGB_TILE_MODE27
:
616 case mmGB_TILE_MODE28
:
617 case mmGB_TILE_MODE29
:
618 case mmGB_TILE_MODE30
:
619 case mmGB_TILE_MODE31
:
620 idx
= (reg_offset
- mmGB_TILE_MODE0
);
621 return adev
->gfx
.config
.tile_mode_array
[idx
];
622 case mmGB_MACROTILE_MODE0
:
623 case mmGB_MACROTILE_MODE1
:
624 case mmGB_MACROTILE_MODE2
:
625 case mmGB_MACROTILE_MODE3
:
626 case mmGB_MACROTILE_MODE4
:
627 case mmGB_MACROTILE_MODE5
:
628 case mmGB_MACROTILE_MODE6
:
629 case mmGB_MACROTILE_MODE7
:
630 case mmGB_MACROTILE_MODE8
:
631 case mmGB_MACROTILE_MODE9
:
632 case mmGB_MACROTILE_MODE10
:
633 case mmGB_MACROTILE_MODE11
:
634 case mmGB_MACROTILE_MODE12
:
635 case mmGB_MACROTILE_MODE13
:
636 case mmGB_MACROTILE_MODE14
:
637 case mmGB_MACROTILE_MODE15
:
638 idx
= (reg_offset
- mmGB_MACROTILE_MODE0
);
639 return adev
->gfx
.config
.macrotile_mode_array
[idx
];
641 return RREG32(reg_offset
);
646 static int vi_read_register(struct amdgpu_device
*adev
, u32 se_num
,
647 u32 sh_num
, u32 reg_offset
, u32
*value
)
649 const struct amdgpu_allowed_register_entry
*asic_register_table
= NULL
;
650 const struct amdgpu_allowed_register_entry
*asic_register_entry
;
654 switch (adev
->asic_type
) {
656 asic_register_table
= tonga_allowed_read_registers
;
657 size
= ARRAY_SIZE(tonga_allowed_read_registers
);
666 asic_register_table
= cz_allowed_read_registers
;
667 size
= ARRAY_SIZE(cz_allowed_read_registers
);
673 if (asic_register_table
) {
674 for (i
= 0; i
< size
; i
++) {
675 asic_register_entry
= asic_register_table
+ i
;
676 if (reg_offset
!= asic_register_entry
->reg_offset
)
678 if (!asic_register_entry
->untouched
)
679 *value
= vi_get_register_value(adev
,
680 asic_register_entry
->grbm_indexed
,
681 se_num
, sh_num
, reg_offset
);
686 for (i
= 0; i
< ARRAY_SIZE(vi_allowed_read_registers
); i
++) {
687 if (reg_offset
!= vi_allowed_read_registers
[i
].reg_offset
)
690 if (!vi_allowed_read_registers
[i
].untouched
)
691 *value
= vi_get_register_value(adev
,
692 vi_allowed_read_registers
[i
].grbm_indexed
,
693 se_num
, sh_num
, reg_offset
);
699 static int vi_gpu_pci_config_reset(struct amdgpu_device
*adev
)
703 dev_info(adev
->dev
, "GPU pci config reset\n");
706 pci_clear_master(adev
->pdev
);
708 amdgpu_pci_config_reset(adev
);
712 /* wait for asic to come out of reset */
713 for (i
= 0; i
< adev
->usec_timeout
; i
++) {
714 if (RREG32(mmCONFIG_MEMSIZE
) != 0xffffffff) {
716 pci_set_master(adev
->pdev
);
725 * vi_asic_reset - soft reset GPU
727 * @adev: amdgpu_device pointer
729 * Look up which blocks are hung and attempt
731 * Returns 0 for success.
733 static int vi_asic_reset(struct amdgpu_device
*adev
)
737 amdgpu_atombios_scratch_regs_engine_hung(adev
, true);
739 r
= vi_gpu_pci_config_reset(adev
);
741 amdgpu_atombios_scratch_regs_engine_hung(adev
, false);
746 static int vi_set_uvd_clock(struct amdgpu_device
*adev
, u32 clock
,
747 u32 cntl_reg
, u32 status_reg
)
750 struct atom_clock_dividers dividers
;
753 r
= amdgpu_atombios_get_clock_dividers(adev
,
754 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK
,
755 clock
, false, ÷rs
);
759 tmp
= RREG32_SMC(cntl_reg
);
760 tmp
&= ~(CG_DCLK_CNTL__DCLK_DIR_CNTL_EN_MASK
|
761 CG_DCLK_CNTL__DCLK_DIVIDER_MASK
);
762 tmp
|= dividers
.post_divider
;
763 WREG32_SMC(cntl_reg
, tmp
);
765 for (i
= 0; i
< 100; i
++) {
766 if (RREG32_SMC(status_reg
) & CG_DCLK_STATUS__DCLK_STATUS_MASK
)
776 static int vi_set_uvd_clocks(struct amdgpu_device
*adev
, u32 vclk
, u32 dclk
)
780 r
= vi_set_uvd_clock(adev
, vclk
, ixCG_VCLK_CNTL
, ixCG_VCLK_STATUS
);
784 r
= vi_set_uvd_clock(adev
, dclk
, ixCG_DCLK_CNTL
, ixCG_DCLK_STATUS
);
789 static int vi_set_vce_clocks(struct amdgpu_device
*adev
, u32 evclk
, u32 ecclk
)
792 struct atom_clock_dividers dividers
;
795 r
= amdgpu_atombios_get_clock_dividers(adev
,
796 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK
,
797 ecclk
, false, ÷rs
);
801 for (i
= 0; i
< 100; i
++) {
802 if (RREG32_SMC(ixCG_ECLK_STATUS
) & CG_ECLK_STATUS__ECLK_STATUS_MASK
)
809 tmp
= RREG32_SMC(ixCG_ECLK_CNTL
);
810 tmp
&= ~(CG_ECLK_CNTL__ECLK_DIR_CNTL_EN_MASK
|
811 CG_ECLK_CNTL__ECLK_DIVIDER_MASK
);
812 tmp
|= dividers
.post_divider
;
813 WREG32_SMC(ixCG_ECLK_CNTL
, tmp
);
815 for (i
= 0; i
< 100; i
++) {
816 if (RREG32_SMC(ixCG_ECLK_STATUS
) & CG_ECLK_STATUS__ECLK_STATUS_MASK
)
826 static void vi_pcie_gen3_enable(struct amdgpu_device
*adev
)
828 if (pci_is_root_bus(adev
->pdev
->bus
))
831 if (amdgpu_pcie_gen2
== 0)
834 if (adev
->flags
& AMD_IS_APU
)
837 if (!(adev
->pm
.pcie_gen_mask
& (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2
|
838 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3
)))
844 static void vi_program_aspm(struct amdgpu_device
*adev
)
847 if (amdgpu_aspm
== 0)
853 static void vi_enable_doorbell_aperture(struct amdgpu_device
*adev
,
858 /* not necessary on CZ */
859 if (adev
->flags
& AMD_IS_APU
)
862 tmp
= RREG32(mmBIF_DOORBELL_APER_EN
);
864 tmp
= REG_SET_FIELD(tmp
, BIF_DOORBELL_APER_EN
, BIF_DOORBELL_APER_EN
, 1);
866 tmp
= REG_SET_FIELD(tmp
, BIF_DOORBELL_APER_EN
, BIF_DOORBELL_APER_EN
, 0);
868 WREG32(mmBIF_DOORBELL_APER_EN
, tmp
);
871 #define ATI_REV_ID_FUSE_MACRO__ADDRESS 0xC0014044
872 #define ATI_REV_ID_FUSE_MACRO__SHIFT 9
873 #define ATI_REV_ID_FUSE_MACRO__MASK 0x00001E00
875 static uint32_t vi_get_rev_id(struct amdgpu_device
*adev
)
877 if (adev
->flags
& AMD_IS_APU
)
878 return (RREG32_SMC(ATI_REV_ID_FUSE_MACRO__ADDRESS
) & ATI_REV_ID_FUSE_MACRO__MASK
)
879 >> ATI_REV_ID_FUSE_MACRO__SHIFT
;
881 return (RREG32(mmPCIE_EFUSE4
) & PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID_MASK
)
882 >> PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID__SHIFT
;
885 static const struct amdgpu_asic_funcs vi_asic_funcs
=
887 .read_disabled_bios
= &vi_read_disabled_bios
,
888 .read_bios_from_rom
= &vi_read_bios_from_rom
,
889 .read_register
= &vi_read_register
,
890 .reset
= &vi_asic_reset
,
891 .set_vga_state
= &vi_vga_set_state
,
892 .get_xclk
= &vi_get_xclk
,
893 .set_uvd_clocks
= &vi_set_uvd_clocks
,
894 .set_vce_clocks
= &vi_set_vce_clocks
,
897 static int vi_common_early_init(void *handle
)
899 bool smc_enabled
= false;
900 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
902 if (adev
->flags
& AMD_IS_APU
) {
903 adev
->smc_rreg
= &cz_smc_rreg
;
904 adev
->smc_wreg
= &cz_smc_wreg
;
906 adev
->smc_rreg
= &vi_smc_rreg
;
907 adev
->smc_wreg
= &vi_smc_wreg
;
909 adev
->pcie_rreg
= &vi_pcie_rreg
;
910 adev
->pcie_wreg
= &vi_pcie_wreg
;
911 adev
->uvd_ctx_rreg
= &vi_uvd_ctx_rreg
;
912 adev
->uvd_ctx_wreg
= &vi_uvd_ctx_wreg
;
913 adev
->didt_rreg
= &vi_didt_rreg
;
914 adev
->didt_wreg
= &vi_didt_wreg
;
915 adev
->gc_cac_rreg
= &vi_gc_cac_rreg
;
916 adev
->gc_cac_wreg
= &vi_gc_cac_wreg
;
918 adev
->asic_funcs
= &vi_asic_funcs
;
920 if (amdgpu_get_ip_block(adev
, AMD_IP_BLOCK_TYPE_SMC
) &&
921 (amdgpu_ip_block_mask
& (1 << AMD_IP_BLOCK_TYPE_SMC
)))
924 if (amdgpu_sriov_vf(adev
))
925 amdgpu_virt_init_setting(adev
);
927 adev
->rev_id
= vi_get_rev_id(adev
);
928 adev
->external_rev_id
= 0xFF;
929 switch (adev
->asic_type
) {
933 adev
->external_rev_id
= 0x1;
936 adev
->cg_flags
= AMD_CG_SUPPORT_GFX_MGCG
|
937 AMD_CG_SUPPORT_GFX_MGLS
|
938 AMD_CG_SUPPORT_GFX_RLC_LS
|
939 AMD_CG_SUPPORT_GFX_CP_LS
|
940 AMD_CG_SUPPORT_GFX_CGTS
|
941 AMD_CG_SUPPORT_GFX_CGTS_LS
|
942 AMD_CG_SUPPORT_GFX_CGCG
|
943 AMD_CG_SUPPORT_GFX_CGLS
|
944 AMD_CG_SUPPORT_SDMA_MGCG
|
945 AMD_CG_SUPPORT_SDMA_LS
|
946 AMD_CG_SUPPORT_BIF_LS
|
947 AMD_CG_SUPPORT_HDP_MGCG
|
948 AMD_CG_SUPPORT_HDP_LS
|
949 AMD_CG_SUPPORT_ROM_MGCG
|
950 AMD_CG_SUPPORT_MC_MGCG
|
951 AMD_CG_SUPPORT_MC_LS
|
952 AMD_CG_SUPPORT_UVD_MGCG
;
954 adev
->external_rev_id
= adev
->rev_id
+ 0x3c;
957 adev
->cg_flags
= AMD_CG_SUPPORT_GFX_MGCG
|
958 AMD_CG_SUPPORT_GFX_CGCG
|
959 AMD_CG_SUPPORT_GFX_CGLS
|
960 AMD_CG_SUPPORT_SDMA_MGCG
|
961 AMD_CG_SUPPORT_SDMA_LS
|
962 AMD_CG_SUPPORT_BIF_LS
|
963 AMD_CG_SUPPORT_HDP_MGCG
|
964 AMD_CG_SUPPORT_HDP_LS
|
965 AMD_CG_SUPPORT_ROM_MGCG
|
966 AMD_CG_SUPPORT_MC_MGCG
|
967 AMD_CG_SUPPORT_MC_LS
|
968 AMD_CG_SUPPORT_DRM_LS
|
969 AMD_CG_SUPPORT_UVD_MGCG
;
971 adev
->external_rev_id
= adev
->rev_id
+ 0x14;
974 adev
->cg_flags
= AMD_CG_SUPPORT_GFX_MGCG
|
975 AMD_CG_SUPPORT_GFX_RLC_LS
|
976 AMD_CG_SUPPORT_GFX_CP_LS
|
977 AMD_CG_SUPPORT_GFX_CGCG
|
978 AMD_CG_SUPPORT_GFX_CGLS
|
979 AMD_CG_SUPPORT_GFX_3D_CGCG
|
980 AMD_CG_SUPPORT_GFX_3D_CGLS
|
981 AMD_CG_SUPPORT_SDMA_MGCG
|
982 AMD_CG_SUPPORT_SDMA_LS
|
983 AMD_CG_SUPPORT_BIF_MGCG
|
984 AMD_CG_SUPPORT_BIF_LS
|
985 AMD_CG_SUPPORT_HDP_MGCG
|
986 AMD_CG_SUPPORT_HDP_LS
|
987 AMD_CG_SUPPORT_ROM_MGCG
|
988 AMD_CG_SUPPORT_MC_MGCG
|
989 AMD_CG_SUPPORT_MC_LS
|
990 AMD_CG_SUPPORT_DRM_LS
|
991 AMD_CG_SUPPORT_UVD_MGCG
|
992 AMD_CG_SUPPORT_VCE_MGCG
;
994 adev
->external_rev_id
= adev
->rev_id
+ 0x5A;
997 adev
->cg_flags
= AMD_CG_SUPPORT_GFX_MGCG
|
998 AMD_CG_SUPPORT_GFX_RLC_LS
|
999 AMD_CG_SUPPORT_GFX_CP_LS
|
1000 AMD_CG_SUPPORT_GFX_CGCG
|
1001 AMD_CG_SUPPORT_GFX_CGLS
|
1002 AMD_CG_SUPPORT_GFX_3D_CGCG
|
1003 AMD_CG_SUPPORT_GFX_3D_CGLS
|
1004 AMD_CG_SUPPORT_SDMA_MGCG
|
1005 AMD_CG_SUPPORT_SDMA_LS
|
1006 AMD_CG_SUPPORT_BIF_MGCG
|
1007 AMD_CG_SUPPORT_BIF_LS
|
1008 AMD_CG_SUPPORT_HDP_MGCG
|
1009 AMD_CG_SUPPORT_HDP_LS
|
1010 AMD_CG_SUPPORT_ROM_MGCG
|
1011 AMD_CG_SUPPORT_MC_MGCG
|
1012 AMD_CG_SUPPORT_MC_LS
|
1013 AMD_CG_SUPPORT_DRM_LS
|
1014 AMD_CG_SUPPORT_UVD_MGCG
|
1015 AMD_CG_SUPPORT_VCE_MGCG
;
1017 adev
->external_rev_id
= adev
->rev_id
+ 0x50;
1019 case CHIP_POLARIS12
:
1020 adev
->cg_flags
= AMD_CG_SUPPORT_UVD_MGCG
;
1022 adev
->external_rev_id
= adev
->rev_id
+ 0x64;
1025 adev
->cg_flags
= AMD_CG_SUPPORT_UVD_MGCG
|
1026 AMD_CG_SUPPORT_GFX_MGCG
|
1027 AMD_CG_SUPPORT_GFX_MGLS
|
1028 AMD_CG_SUPPORT_GFX_RLC_LS
|
1029 AMD_CG_SUPPORT_GFX_CP_LS
|
1030 AMD_CG_SUPPORT_GFX_CGTS
|
1031 AMD_CG_SUPPORT_GFX_MGLS
|
1032 AMD_CG_SUPPORT_GFX_CGTS_LS
|
1033 AMD_CG_SUPPORT_GFX_CGCG
|
1034 AMD_CG_SUPPORT_GFX_CGLS
|
1035 AMD_CG_SUPPORT_BIF_LS
|
1036 AMD_CG_SUPPORT_HDP_MGCG
|
1037 AMD_CG_SUPPORT_HDP_LS
|
1038 AMD_CG_SUPPORT_SDMA_MGCG
|
1039 AMD_CG_SUPPORT_SDMA_LS
|
1040 AMD_CG_SUPPORT_VCE_MGCG
;
1041 /* rev0 hardware requires workarounds to support PG */
1043 if (adev
->rev_id
!= 0x00) {
1044 adev
->pg_flags
|= AMD_PG_SUPPORT_GFX_PG
|
1045 AMD_PG_SUPPORT_GFX_SMG
|
1046 AMD_PG_SUPPORT_GFX_PIPELINE
|
1048 AMD_PG_SUPPORT_UVD
|
1051 adev
->external_rev_id
= adev
->rev_id
+ 0x1;
1054 adev
->cg_flags
= AMD_CG_SUPPORT_UVD_MGCG
|
1055 AMD_CG_SUPPORT_GFX_MGCG
|
1056 AMD_CG_SUPPORT_GFX_MGLS
|
1057 AMD_CG_SUPPORT_GFX_RLC_LS
|
1058 AMD_CG_SUPPORT_GFX_CP_LS
|
1059 AMD_CG_SUPPORT_GFX_CGTS
|
1060 AMD_CG_SUPPORT_GFX_MGLS
|
1061 AMD_CG_SUPPORT_GFX_CGTS_LS
|
1062 AMD_CG_SUPPORT_GFX_CGCG
|
1063 AMD_CG_SUPPORT_GFX_CGLS
|
1064 AMD_CG_SUPPORT_BIF_LS
|
1065 AMD_CG_SUPPORT_HDP_MGCG
|
1066 AMD_CG_SUPPORT_HDP_LS
|
1067 AMD_CG_SUPPORT_SDMA_MGCG
|
1068 AMD_CG_SUPPORT_SDMA_LS
|
1069 AMD_CG_SUPPORT_VCE_MGCG
;
1070 adev
->pg_flags
= AMD_PG_SUPPORT_GFX_PG
|
1071 AMD_PG_SUPPORT_GFX_SMG
|
1072 AMD_PG_SUPPORT_GFX_PIPELINE
|
1074 AMD_PG_SUPPORT_UVD
|
1076 adev
->external_rev_id
= adev
->rev_id
+ 0x61;
1079 /* FIXME: not supported yet */
1083 if (amdgpu_smc_load_fw
&& smc_enabled
)
1084 adev
->firmware
.smu_load
= true;
1086 amdgpu_get_pcie_info(adev
);
1091 static int vi_common_sw_init(void *handle
)
1096 static int vi_common_sw_fini(void *handle
)
1101 static int vi_common_hw_init(void *handle
)
1103 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1105 /* move the golden regs per IP block */
1106 vi_init_golden_registers(adev
);
1107 /* enable pcie gen2/3 link */
1108 vi_pcie_gen3_enable(adev
);
1110 vi_program_aspm(adev
);
1111 /* enable the doorbell aperture */
1112 vi_enable_doorbell_aperture(adev
, true);
1117 static int vi_common_hw_fini(void *handle
)
1119 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1121 /* enable the doorbell aperture */
1122 vi_enable_doorbell_aperture(adev
, false);
1127 static int vi_common_suspend(void *handle
)
1129 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1131 return vi_common_hw_fini(adev
);
1134 static int vi_common_resume(void *handle
)
1136 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1138 return vi_common_hw_init(adev
);
1141 static bool vi_common_is_idle(void *handle
)
1146 static int vi_common_wait_for_idle(void *handle
)
1151 static int vi_common_soft_reset(void *handle
)
1156 static void vi_update_bif_medium_grain_light_sleep(struct amdgpu_device
*adev
,
1159 uint32_t temp
, data
;
1161 temp
= data
= RREG32_PCIE(ixPCIE_CNTL2
);
1163 if (enable
&& (adev
->cg_flags
& AMD_CG_SUPPORT_BIF_LS
))
1164 data
|= PCIE_CNTL2__SLV_MEM_LS_EN_MASK
|
1165 PCIE_CNTL2__MST_MEM_LS_EN_MASK
|
1166 PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK
;
1168 data
&= ~(PCIE_CNTL2__SLV_MEM_LS_EN_MASK
|
1169 PCIE_CNTL2__MST_MEM_LS_EN_MASK
|
1170 PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK
);
1173 WREG32_PCIE(ixPCIE_CNTL2
, data
);
1176 static void vi_update_hdp_medium_grain_clock_gating(struct amdgpu_device
*adev
,
1179 uint32_t temp
, data
;
1181 temp
= data
= RREG32(mmHDP_HOST_PATH_CNTL
);
1183 if (enable
&& (adev
->cg_flags
& AMD_CG_SUPPORT_HDP_MGCG
))
1184 data
&= ~HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK
;
1186 data
|= HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK
;
1189 WREG32(mmHDP_HOST_PATH_CNTL
, data
);
1192 static void vi_update_hdp_light_sleep(struct amdgpu_device
*adev
,
1195 uint32_t temp
, data
;
1197 temp
= data
= RREG32(mmHDP_MEM_POWER_LS
);
1199 if (enable
&& (adev
->cg_flags
& AMD_CG_SUPPORT_HDP_LS
))
1200 data
|= HDP_MEM_POWER_LS__LS_ENABLE_MASK
;
1202 data
&= ~HDP_MEM_POWER_LS__LS_ENABLE_MASK
;
1205 WREG32(mmHDP_MEM_POWER_LS
, data
);
1208 static void vi_update_drm_light_sleep(struct amdgpu_device
*adev
,
1211 uint32_t temp
, data
;
1213 temp
= data
= RREG32(0x157a);
1215 if (enable
&& (adev
->cg_flags
& AMD_CG_SUPPORT_DRM_LS
))
1221 WREG32(0x157a, data
);
1225 static void vi_update_rom_medium_grain_clock_gating(struct amdgpu_device
*adev
,
1228 uint32_t temp
, data
;
1230 temp
= data
= RREG32_SMC(ixCGTT_ROM_CLK_CTRL0
);
1232 if (enable
&& (adev
->cg_flags
& AMD_CG_SUPPORT_ROM_MGCG
))
1233 data
&= ~(CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK
|
1234 CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK
);
1236 data
|= CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK
|
1237 CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK
;
1240 WREG32_SMC(ixCGTT_ROM_CLK_CTRL0
, data
);
1243 static int vi_common_set_clockgating_state_by_smu(void *handle
,
1244 enum amd_clockgating_state state
)
1246 uint32_t msg_id
, pp_state
= 0;
1247 uint32_t pp_support_state
= 0;
1248 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1249 void *pp_handle
= adev
->powerplay
.pp_handle
;
1251 if (adev
->cg_flags
& (AMD_CG_SUPPORT_MC_LS
| AMD_CG_SUPPORT_MC_MGCG
)) {
1252 if (adev
->cg_flags
& AMD_CG_SUPPORT_MC_LS
) {
1253 pp_support_state
= AMD_CG_SUPPORT_MC_LS
;
1254 pp_state
= PP_STATE_LS
;
1256 if (adev
->cg_flags
& AMD_CG_SUPPORT_MC_MGCG
) {
1257 pp_support_state
|= AMD_CG_SUPPORT_MC_MGCG
;
1258 pp_state
|= PP_STATE_CG
;
1260 if (state
== AMD_CG_STATE_UNGATE
)
1262 msg_id
= PP_CG_MSG_ID(PP_GROUP_SYS
,
1266 amd_set_clockgating_by_smu(pp_handle
, msg_id
);
1269 if (adev
->cg_flags
& (AMD_CG_SUPPORT_SDMA_LS
| AMD_CG_SUPPORT_SDMA_MGCG
)) {
1270 if (adev
->cg_flags
& AMD_CG_SUPPORT_SDMA_LS
) {
1271 pp_support_state
= AMD_CG_SUPPORT_SDMA_LS
;
1272 pp_state
= PP_STATE_LS
;
1274 if (adev
->cg_flags
& AMD_CG_SUPPORT_SDMA_MGCG
) {
1275 pp_support_state
|= AMD_CG_SUPPORT_SDMA_MGCG
;
1276 pp_state
|= PP_STATE_CG
;
1278 if (state
== AMD_CG_STATE_UNGATE
)
1280 msg_id
= PP_CG_MSG_ID(PP_GROUP_SYS
,
1284 amd_set_clockgating_by_smu(pp_handle
, msg_id
);
1287 if (adev
->cg_flags
& (AMD_CG_SUPPORT_HDP_LS
| AMD_CG_SUPPORT_HDP_MGCG
)) {
1288 if (adev
->cg_flags
& AMD_CG_SUPPORT_HDP_LS
) {
1289 pp_support_state
= AMD_CG_SUPPORT_HDP_LS
;
1290 pp_state
= PP_STATE_LS
;
1292 if (adev
->cg_flags
& AMD_CG_SUPPORT_HDP_MGCG
) {
1293 pp_support_state
|= AMD_CG_SUPPORT_HDP_MGCG
;
1294 pp_state
|= PP_STATE_CG
;
1296 if (state
== AMD_CG_STATE_UNGATE
)
1298 msg_id
= PP_CG_MSG_ID(PP_GROUP_SYS
,
1302 amd_set_clockgating_by_smu(pp_handle
, msg_id
);
1306 if (adev
->cg_flags
& AMD_CG_SUPPORT_BIF_LS
) {
1307 if (state
== AMD_CG_STATE_UNGATE
)
1310 pp_state
= PP_STATE_LS
;
1312 msg_id
= PP_CG_MSG_ID(PP_GROUP_SYS
,
1314 PP_STATE_SUPPORT_LS
,
1316 amd_set_clockgating_by_smu(pp_handle
, msg_id
);
1318 if (adev
->cg_flags
& AMD_CG_SUPPORT_BIF_MGCG
) {
1319 if (state
== AMD_CG_STATE_UNGATE
)
1322 pp_state
= PP_STATE_CG
;
1324 msg_id
= PP_CG_MSG_ID(PP_GROUP_SYS
,
1326 PP_STATE_SUPPORT_CG
,
1328 amd_set_clockgating_by_smu(pp_handle
, msg_id
);
1331 if (adev
->cg_flags
& AMD_CG_SUPPORT_DRM_LS
) {
1333 if (state
== AMD_CG_STATE_UNGATE
)
1336 pp_state
= PP_STATE_LS
;
1338 msg_id
= PP_CG_MSG_ID(PP_GROUP_SYS
,
1340 PP_STATE_SUPPORT_LS
,
1342 amd_set_clockgating_by_smu(pp_handle
, msg_id
);
1345 if (adev
->cg_flags
& AMD_CG_SUPPORT_ROM_MGCG
) {
1347 if (state
== AMD_CG_STATE_UNGATE
)
1350 pp_state
= PP_STATE_CG
;
1352 msg_id
= PP_CG_MSG_ID(PP_GROUP_SYS
,
1354 PP_STATE_SUPPORT_CG
,
1356 amd_set_clockgating_by_smu(pp_handle
, msg_id
);
1361 static int vi_common_set_clockgating_state(void *handle
,
1362 enum amd_clockgating_state state
)
1364 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1366 switch (adev
->asic_type
) {
1368 vi_update_bif_medium_grain_light_sleep(adev
,
1369 state
== AMD_CG_STATE_GATE
? true : false);
1370 vi_update_hdp_medium_grain_clock_gating(adev
,
1371 state
== AMD_CG_STATE_GATE
? true : false);
1372 vi_update_hdp_light_sleep(adev
,
1373 state
== AMD_CG_STATE_GATE
? true : false);
1374 vi_update_rom_medium_grain_clock_gating(adev
,
1375 state
== AMD_CG_STATE_GATE
? true : false);
1379 vi_update_bif_medium_grain_light_sleep(adev
,
1380 state
== AMD_CG_STATE_GATE
? true : false);
1381 vi_update_hdp_medium_grain_clock_gating(adev
,
1382 state
== AMD_CG_STATE_GATE
? true : false);
1383 vi_update_hdp_light_sleep(adev
,
1384 state
== AMD_CG_STATE_GATE
? true : false);
1385 vi_update_drm_light_sleep(adev
,
1386 state
== AMD_CG_STATE_GATE
? true : false);
1389 case CHIP_POLARIS10
:
1390 case CHIP_POLARIS11
:
1391 case CHIP_POLARIS12
:
1392 vi_common_set_clockgating_state_by_smu(adev
, state
);
1399 static int vi_common_set_powergating_state(void *handle
,
1400 enum amd_powergating_state state
)
1405 static void vi_common_get_clockgating_state(void *handle
, u32
*flags
)
1407 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1410 /* AMD_CG_SUPPORT_BIF_LS */
1411 data
= RREG32_PCIE(ixPCIE_CNTL2
);
1412 if (data
& PCIE_CNTL2__SLV_MEM_LS_EN_MASK
)
1413 *flags
|= AMD_CG_SUPPORT_BIF_LS
;
1415 /* AMD_CG_SUPPORT_HDP_LS */
1416 data
= RREG32(mmHDP_MEM_POWER_LS
);
1417 if (data
& HDP_MEM_POWER_LS__LS_ENABLE_MASK
)
1418 *flags
|= AMD_CG_SUPPORT_HDP_LS
;
1420 /* AMD_CG_SUPPORT_HDP_MGCG */
1421 data
= RREG32(mmHDP_HOST_PATH_CNTL
);
1422 if (!(data
& HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK
))
1423 *flags
|= AMD_CG_SUPPORT_HDP_MGCG
;
1425 /* AMD_CG_SUPPORT_ROM_MGCG */
1426 data
= RREG32_SMC(ixCGTT_ROM_CLK_CTRL0
);
1427 if (!(data
& CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK
))
1428 *flags
|= AMD_CG_SUPPORT_ROM_MGCG
;
1431 static const struct amd_ip_funcs vi_common_ip_funcs
= {
1432 .name
= "vi_common",
1433 .early_init
= vi_common_early_init
,
1435 .sw_init
= vi_common_sw_init
,
1436 .sw_fini
= vi_common_sw_fini
,
1437 .hw_init
= vi_common_hw_init
,
1438 .hw_fini
= vi_common_hw_fini
,
1439 .suspend
= vi_common_suspend
,
1440 .resume
= vi_common_resume
,
1441 .is_idle
= vi_common_is_idle
,
1442 .wait_for_idle
= vi_common_wait_for_idle
,
1443 .soft_reset
= vi_common_soft_reset
,
1444 .set_clockgating_state
= vi_common_set_clockgating_state
,
1445 .set_powergating_state
= vi_common_set_powergating_state
,
1446 .get_clockgating_state
= vi_common_get_clockgating_state
,
1449 static const struct amdgpu_ip_block_version vi_common_ip_block
=
1451 .type
= AMD_IP_BLOCK_TYPE_COMMON
,
1455 .funcs
= &vi_common_ip_funcs
,
1458 int vi_set_ip_blocks(struct amdgpu_device
*adev
)
1460 /* in early init stage, vbios code won't work */
1461 vi_detect_hw_virtualization(adev
);
1463 switch (adev
->asic_type
) {
1465 /* topaz has no DCE, UVD, VCE */
1466 amdgpu_ip_block_add(adev
, &vi_common_ip_block
);
1467 amdgpu_ip_block_add(adev
, &gmc_v7_4_ip_block
);
1468 amdgpu_ip_block_add(adev
, &iceland_ih_ip_block
);
1469 amdgpu_ip_block_add(adev
, &amdgpu_pp_ip_block
);
1470 if (adev
->enable_virtual_display
)
1471 amdgpu_ip_block_add(adev
, &dce_virtual_ip_block
);
1472 amdgpu_ip_block_add(adev
, &gfx_v8_0_ip_block
);
1473 amdgpu_ip_block_add(adev
, &sdma_v2_4_ip_block
);
1476 amdgpu_ip_block_add(adev
, &vi_common_ip_block
);
1477 amdgpu_ip_block_add(adev
, &gmc_v8_5_ip_block
);
1478 amdgpu_ip_block_add(adev
, &tonga_ih_ip_block
);
1479 amdgpu_ip_block_add(adev
, &amdgpu_pp_ip_block
);
1480 if (adev
->enable_virtual_display
|| amdgpu_sriov_vf(adev
))
1481 amdgpu_ip_block_add(adev
, &dce_virtual_ip_block
);
1483 amdgpu_ip_block_add(adev
, &dce_v10_1_ip_block
);
1484 amdgpu_ip_block_add(adev
, &gfx_v8_0_ip_block
);
1485 amdgpu_ip_block_add(adev
, &sdma_v3_0_ip_block
);
1486 if (!amdgpu_sriov_vf(adev
)) {
1487 amdgpu_ip_block_add(adev
, &uvd_v6_0_ip_block
);
1488 amdgpu_ip_block_add(adev
, &vce_v3_0_ip_block
);
1492 amdgpu_ip_block_add(adev
, &vi_common_ip_block
);
1493 amdgpu_ip_block_add(adev
, &gmc_v8_0_ip_block
);
1494 amdgpu_ip_block_add(adev
, &tonga_ih_ip_block
);
1495 amdgpu_ip_block_add(adev
, &amdgpu_pp_ip_block
);
1496 if (adev
->enable_virtual_display
|| amdgpu_sriov_vf(adev
))
1497 amdgpu_ip_block_add(adev
, &dce_virtual_ip_block
);
1499 amdgpu_ip_block_add(adev
, &dce_v10_0_ip_block
);
1500 amdgpu_ip_block_add(adev
, &gfx_v8_0_ip_block
);
1501 amdgpu_ip_block_add(adev
, &sdma_v3_0_ip_block
);
1502 if (!amdgpu_sriov_vf(adev
)) {
1503 amdgpu_ip_block_add(adev
, &uvd_v5_0_ip_block
);
1504 amdgpu_ip_block_add(adev
, &vce_v3_0_ip_block
);
1507 case CHIP_POLARIS11
:
1508 case CHIP_POLARIS10
:
1509 case CHIP_POLARIS12
:
1510 amdgpu_ip_block_add(adev
, &vi_common_ip_block
);
1511 amdgpu_ip_block_add(adev
, &gmc_v8_1_ip_block
);
1512 amdgpu_ip_block_add(adev
, &tonga_ih_ip_block
);
1513 amdgpu_ip_block_add(adev
, &amdgpu_pp_ip_block
);
1514 if (adev
->enable_virtual_display
)
1515 amdgpu_ip_block_add(adev
, &dce_virtual_ip_block
);
1517 amdgpu_ip_block_add(adev
, &dce_v11_2_ip_block
);
1518 amdgpu_ip_block_add(adev
, &gfx_v8_0_ip_block
);
1519 amdgpu_ip_block_add(adev
, &sdma_v3_1_ip_block
);
1520 amdgpu_ip_block_add(adev
, &uvd_v6_3_ip_block
);
1521 amdgpu_ip_block_add(adev
, &vce_v3_4_ip_block
);
1524 amdgpu_ip_block_add(adev
, &vi_common_ip_block
);
1525 amdgpu_ip_block_add(adev
, &gmc_v8_0_ip_block
);
1526 amdgpu_ip_block_add(adev
, &cz_ih_ip_block
);
1527 amdgpu_ip_block_add(adev
, &amdgpu_pp_ip_block
);
1528 if (adev
->enable_virtual_display
)
1529 amdgpu_ip_block_add(adev
, &dce_virtual_ip_block
);
1531 amdgpu_ip_block_add(adev
, &dce_v11_0_ip_block
);
1532 amdgpu_ip_block_add(adev
, &gfx_v8_0_ip_block
);
1533 amdgpu_ip_block_add(adev
, &sdma_v3_0_ip_block
);
1534 amdgpu_ip_block_add(adev
, &uvd_v6_0_ip_block
);
1535 amdgpu_ip_block_add(adev
, &vce_v3_1_ip_block
);
1536 #if defined(CONFIG_DRM_AMD_ACP)
1537 amdgpu_ip_block_add(adev
, &acp_ip_block
);
1541 amdgpu_ip_block_add(adev
, &vi_common_ip_block
);
1542 amdgpu_ip_block_add(adev
, &gmc_v8_0_ip_block
);
1543 amdgpu_ip_block_add(adev
, &cz_ih_ip_block
);
1544 amdgpu_ip_block_add(adev
, &amdgpu_pp_ip_block
);
1545 if (adev
->enable_virtual_display
)
1546 amdgpu_ip_block_add(adev
, &dce_virtual_ip_block
);
1548 amdgpu_ip_block_add(adev
, &dce_v11_0_ip_block
);
1549 amdgpu_ip_block_add(adev
, &gfx_v8_1_ip_block
);
1550 amdgpu_ip_block_add(adev
, &sdma_v3_0_ip_block
);
1551 amdgpu_ip_block_add(adev
, &uvd_v6_2_ip_block
);
1552 amdgpu_ip_block_add(adev
, &vce_v3_4_ip_block
);
1553 #if defined(CONFIG_DRM_AMD_ACP)
1554 amdgpu_ip_block_add(adev
, &acp_ip_block
);
1558 /* FIXME: not supported yet */