2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/slab.h>
26 #include "amdgpu_atombios.h"
27 #include "amdgpu_ih.h"
28 #include "amdgpu_uvd.h"
29 #include "amdgpu_vce.h"
30 #include "amdgpu_ucode.h"
34 #include "gmc/gmc_8_1_d.h"
35 #include "gmc/gmc_8_1_sh_mask.h"
37 #include "oss/oss_3_0_d.h"
38 #include "oss/oss_3_0_sh_mask.h"
40 #include "bif/bif_5_0_d.h"
41 #include "bif/bif_5_0_sh_mask.h"
43 #include "gca/gfx_8_0_d.h"
44 #include "gca/gfx_8_0_sh_mask.h"
46 #include "smu/smu_7_1_1_d.h"
47 #include "smu/smu_7_1_1_sh_mask.h"
49 #include "uvd/uvd_5_0_d.h"
50 #include "uvd/uvd_5_0_sh_mask.h"
52 #include "vce/vce_3_0_d.h"
53 #include "vce/vce_3_0_sh_mask.h"
55 #include "dce/dce_10_0_d.h"
56 #include "dce/dce_10_0_sh_mask.h"
64 #include "sdma_v2_4.h"
65 #include "sdma_v3_0.h"
66 #include "dce_v10_0.h"
67 #include "dce_v11_0.h"
68 #include "iceland_ih.h"
74 #include "amdgpu_powerplay.h"
75 #if defined(CONFIG_DRM_AMD_ACP)
76 #include "amdgpu_acp.h"
78 #include "dce_virtual.h"
81 * Indirect registers accessor
83 static u32
vi_pcie_rreg(struct amdgpu_device
*adev
, u32 reg
)
88 spin_lock_irqsave(&adev
->pcie_idx_lock
, flags
);
89 WREG32(mmPCIE_INDEX
, reg
);
90 (void)RREG32(mmPCIE_INDEX
);
91 r
= RREG32(mmPCIE_DATA
);
92 spin_unlock_irqrestore(&adev
->pcie_idx_lock
, flags
);
96 static void vi_pcie_wreg(struct amdgpu_device
*adev
, u32 reg
, u32 v
)
100 spin_lock_irqsave(&adev
->pcie_idx_lock
, flags
);
101 WREG32(mmPCIE_INDEX
, reg
);
102 (void)RREG32(mmPCIE_INDEX
);
103 WREG32(mmPCIE_DATA
, v
);
104 (void)RREG32(mmPCIE_DATA
);
105 spin_unlock_irqrestore(&adev
->pcie_idx_lock
, flags
);
108 static u32
vi_smc_rreg(struct amdgpu_device
*adev
, u32 reg
)
113 spin_lock_irqsave(&adev
->smc_idx_lock
, flags
);
114 WREG32(mmSMC_IND_INDEX_11
, (reg
));
115 r
= RREG32(mmSMC_IND_DATA_11
);
116 spin_unlock_irqrestore(&adev
->smc_idx_lock
, flags
);
120 static void vi_smc_wreg(struct amdgpu_device
*adev
, u32 reg
, u32 v
)
124 spin_lock_irqsave(&adev
->smc_idx_lock
, flags
);
125 WREG32(mmSMC_IND_INDEX_11
, (reg
));
126 WREG32(mmSMC_IND_DATA_11
, (v
));
127 spin_unlock_irqrestore(&adev
->smc_idx_lock
, flags
);
131 #define mmMP0PUB_IND_INDEX 0x180
132 #define mmMP0PUB_IND_DATA 0x181
134 static u32
cz_smc_rreg(struct amdgpu_device
*adev
, u32 reg
)
139 spin_lock_irqsave(&adev
->smc_idx_lock
, flags
);
140 WREG32(mmMP0PUB_IND_INDEX
, (reg
));
141 r
= RREG32(mmMP0PUB_IND_DATA
);
142 spin_unlock_irqrestore(&adev
->smc_idx_lock
, flags
);
146 static void cz_smc_wreg(struct amdgpu_device
*adev
, u32 reg
, u32 v
)
150 spin_lock_irqsave(&adev
->smc_idx_lock
, flags
);
151 WREG32(mmMP0PUB_IND_INDEX
, (reg
));
152 WREG32(mmMP0PUB_IND_DATA
, (v
));
153 spin_unlock_irqrestore(&adev
->smc_idx_lock
, flags
);
156 static u32
vi_uvd_ctx_rreg(struct amdgpu_device
*adev
, u32 reg
)
161 spin_lock_irqsave(&adev
->uvd_ctx_idx_lock
, flags
);
162 WREG32(mmUVD_CTX_INDEX
, ((reg
) & 0x1ff));
163 r
= RREG32(mmUVD_CTX_DATA
);
164 spin_unlock_irqrestore(&adev
->uvd_ctx_idx_lock
, flags
);
168 static void vi_uvd_ctx_wreg(struct amdgpu_device
*adev
, u32 reg
, u32 v
)
172 spin_lock_irqsave(&adev
->uvd_ctx_idx_lock
, flags
);
173 WREG32(mmUVD_CTX_INDEX
, ((reg
) & 0x1ff));
174 WREG32(mmUVD_CTX_DATA
, (v
));
175 spin_unlock_irqrestore(&adev
->uvd_ctx_idx_lock
, flags
);
178 static u32
vi_didt_rreg(struct amdgpu_device
*adev
, u32 reg
)
183 spin_lock_irqsave(&adev
->didt_idx_lock
, flags
);
184 WREG32(mmDIDT_IND_INDEX
, (reg
));
185 r
= RREG32(mmDIDT_IND_DATA
);
186 spin_unlock_irqrestore(&adev
->didt_idx_lock
, flags
);
190 static void vi_didt_wreg(struct amdgpu_device
*adev
, u32 reg
, u32 v
)
194 spin_lock_irqsave(&adev
->didt_idx_lock
, flags
);
195 WREG32(mmDIDT_IND_INDEX
, (reg
));
196 WREG32(mmDIDT_IND_DATA
, (v
));
197 spin_unlock_irqrestore(&adev
->didt_idx_lock
, flags
);
200 static u32
vi_gc_cac_rreg(struct amdgpu_device
*adev
, u32 reg
)
205 spin_lock_irqsave(&adev
->gc_cac_idx_lock
, flags
);
206 WREG32(mmGC_CAC_IND_INDEX
, (reg
));
207 r
= RREG32(mmGC_CAC_IND_DATA
);
208 spin_unlock_irqrestore(&adev
->gc_cac_idx_lock
, flags
);
212 static void vi_gc_cac_wreg(struct amdgpu_device
*adev
, u32 reg
, u32 v
)
216 spin_lock_irqsave(&adev
->gc_cac_idx_lock
, flags
);
217 WREG32(mmGC_CAC_IND_INDEX
, (reg
));
218 WREG32(mmGC_CAC_IND_DATA
, (v
));
219 spin_unlock_irqrestore(&adev
->gc_cac_idx_lock
, flags
);
223 static const u32 tonga_mgcg_cgcg_init
[] =
225 mmCGTT_DRM_CLK_CTRL0
, 0xffffffff, 0x00600100,
226 mmPCIE_INDEX
, 0xffffffff, 0x0140001c,
227 mmPCIE_DATA
, 0x000f0000, 0x00000000,
228 mmSMC_IND_INDEX_4
, 0xffffffff, 0xC060000C,
229 mmSMC_IND_DATA_4
, 0xc0000fff, 0x00000100,
230 mmCGTT_DRM_CLK_CTRL0
, 0xff000fff, 0x00000100,
231 mmHDP_XDP_CGTT_BLK_CTRL
, 0xc0000fff, 0x00000104,
234 static const u32 fiji_mgcg_cgcg_init
[] =
236 mmCGTT_DRM_CLK_CTRL0
, 0xffffffff, 0x00600100,
237 mmPCIE_INDEX
, 0xffffffff, 0x0140001c,
238 mmPCIE_DATA
, 0x000f0000, 0x00000000,
239 mmSMC_IND_INDEX_4
, 0xffffffff, 0xC060000C,
240 mmSMC_IND_DATA_4
, 0xc0000fff, 0x00000100,
241 mmCGTT_DRM_CLK_CTRL0
, 0xff000fff, 0x00000100,
242 mmHDP_XDP_CGTT_BLK_CTRL
, 0xc0000fff, 0x00000104,
245 static const u32 iceland_mgcg_cgcg_init
[] =
247 mmPCIE_INDEX
, 0xffffffff, ixPCIE_CNTL2
,
248 mmPCIE_DATA
, 0x000f0000, 0x00000000,
249 mmSMC_IND_INDEX_4
, 0xffffffff, ixCGTT_ROM_CLK_CTRL0
,
250 mmSMC_IND_DATA_4
, 0xc0000fff, 0x00000100,
251 mmHDP_XDP_CGTT_BLK_CTRL
, 0xc0000fff, 0x00000104,
254 static const u32 cz_mgcg_cgcg_init
[] =
256 mmCGTT_DRM_CLK_CTRL0
, 0xffffffff, 0x00600100,
257 mmPCIE_INDEX
, 0xffffffff, 0x0140001c,
258 mmPCIE_DATA
, 0x000f0000, 0x00000000,
259 mmCGTT_DRM_CLK_CTRL0
, 0xff000fff, 0x00000100,
260 mmHDP_XDP_CGTT_BLK_CTRL
, 0xc0000fff, 0x00000104,
263 static const u32 stoney_mgcg_cgcg_init
[] =
265 mmCGTT_DRM_CLK_CTRL0
, 0xffffffff, 0x00000100,
266 mmHDP_XDP_CGTT_BLK_CTRL
, 0xffffffff, 0x00000104,
267 mmHDP_HOST_PATH_CNTL
, 0xffffffff, 0x0f000027,
270 static void vi_init_golden_registers(struct amdgpu_device
*adev
)
272 /* Some of the registers might be dependent on GRBM_GFX_INDEX */
273 mutex_lock(&adev
->grbm_idx_mutex
);
275 switch (adev
->asic_type
) {
277 amdgpu_program_register_sequence(adev
,
278 iceland_mgcg_cgcg_init
,
279 (const u32
)ARRAY_SIZE(iceland_mgcg_cgcg_init
));
282 amdgpu_program_register_sequence(adev
,
284 (const u32
)ARRAY_SIZE(fiji_mgcg_cgcg_init
));
287 amdgpu_program_register_sequence(adev
,
288 tonga_mgcg_cgcg_init
,
289 (const u32
)ARRAY_SIZE(tonga_mgcg_cgcg_init
));
292 amdgpu_program_register_sequence(adev
,
294 (const u32
)ARRAY_SIZE(cz_mgcg_cgcg_init
));
297 amdgpu_program_register_sequence(adev
,
298 stoney_mgcg_cgcg_init
,
299 (const u32
)ARRAY_SIZE(stoney_mgcg_cgcg_init
));
307 mutex_unlock(&adev
->grbm_idx_mutex
);
311 * vi_get_xclk - get the xclk
313 * @adev: amdgpu_device pointer
315 * Returns the reference clock used by the gfx engine
318 static u32
vi_get_xclk(struct amdgpu_device
*adev
)
320 u32 reference_clock
= adev
->clock
.spll
.reference_freq
;
323 if (adev
->flags
& AMD_IS_APU
)
324 return reference_clock
;
326 tmp
= RREG32_SMC(ixCG_CLKPIN_CNTL_2
);
327 if (REG_GET_FIELD(tmp
, CG_CLKPIN_CNTL_2
, MUX_TCLK_TO_XCLK
))
330 tmp
= RREG32_SMC(ixCG_CLKPIN_CNTL
);
331 if (REG_GET_FIELD(tmp
, CG_CLKPIN_CNTL
, XTALIN_DIVIDE
))
332 return reference_clock
/ 4;
334 return reference_clock
;
338 * vi_srbm_select - select specific register instances
340 * @adev: amdgpu_device pointer
341 * @me: selected ME (micro engine)
346 * Switches the currently active registers instances. Some
347 * registers are instanced per VMID, others are instanced per
348 * me/pipe/queue combination.
350 void vi_srbm_select(struct amdgpu_device
*adev
,
351 u32 me
, u32 pipe
, u32 queue
, u32 vmid
)
353 u32 srbm_gfx_cntl
= 0;
354 srbm_gfx_cntl
= REG_SET_FIELD(srbm_gfx_cntl
, SRBM_GFX_CNTL
, PIPEID
, pipe
);
355 srbm_gfx_cntl
= REG_SET_FIELD(srbm_gfx_cntl
, SRBM_GFX_CNTL
, MEID
, me
);
356 srbm_gfx_cntl
= REG_SET_FIELD(srbm_gfx_cntl
, SRBM_GFX_CNTL
, VMID
, vmid
);
357 srbm_gfx_cntl
= REG_SET_FIELD(srbm_gfx_cntl
, SRBM_GFX_CNTL
, QUEUEID
, queue
);
358 WREG32(mmSRBM_GFX_CNTL
, srbm_gfx_cntl
);
361 static void vi_vga_set_state(struct amdgpu_device
*adev
, bool state
)
366 static bool vi_read_disabled_bios(struct amdgpu_device
*adev
)
369 u32 d1vga_control
= 0;
370 u32 d2vga_control
= 0;
371 u32 vga_render_control
= 0;
375 bus_cntl
= RREG32(mmBUS_CNTL
);
376 if (adev
->mode_info
.num_crtc
) {
377 d1vga_control
= RREG32(mmD1VGA_CONTROL
);
378 d2vga_control
= RREG32(mmD2VGA_CONTROL
);
379 vga_render_control
= RREG32(mmVGA_RENDER_CONTROL
);
381 rom_cntl
= RREG32_SMC(ixROM_CNTL
);
384 WREG32(mmBUS_CNTL
, (bus_cntl
& ~BUS_CNTL__BIOS_ROM_DIS_MASK
));
385 if (adev
->mode_info
.num_crtc
) {
386 /* Disable VGA mode */
387 WREG32(mmD1VGA_CONTROL
,
388 (d1vga_control
& ~(D1VGA_CONTROL__D1VGA_MODE_ENABLE_MASK
|
389 D1VGA_CONTROL__D1VGA_TIMING_SELECT_MASK
)));
390 WREG32(mmD2VGA_CONTROL
,
391 (d2vga_control
& ~(D2VGA_CONTROL__D2VGA_MODE_ENABLE_MASK
|
392 D2VGA_CONTROL__D2VGA_TIMING_SELECT_MASK
)));
393 WREG32(mmVGA_RENDER_CONTROL
,
394 (vga_render_control
& ~VGA_RENDER_CONTROL__VGA_VSTATUS_CNTL_MASK
));
396 WREG32_SMC(ixROM_CNTL
, rom_cntl
| ROM_CNTL__SCK_OVERWRITE_MASK
);
398 r
= amdgpu_read_bios(adev
);
401 WREG32(mmBUS_CNTL
, bus_cntl
);
402 if (adev
->mode_info
.num_crtc
) {
403 WREG32(mmD1VGA_CONTROL
, d1vga_control
);
404 WREG32(mmD2VGA_CONTROL
, d2vga_control
);
405 WREG32(mmVGA_RENDER_CONTROL
, vga_render_control
);
407 WREG32_SMC(ixROM_CNTL
, rom_cntl
);
411 static bool vi_read_bios_from_rom(struct amdgpu_device
*adev
,
412 u8
*bios
, u32 length_bytes
)
420 if (length_bytes
== 0)
422 /* APU vbios image is part of sbios image */
423 if (adev
->flags
& AMD_IS_APU
)
426 dw_ptr
= (u32
*)bios
;
427 length_dw
= ALIGN(length_bytes
, 4) / 4;
428 /* take the smc lock since we are using the smc index */
429 spin_lock_irqsave(&adev
->smc_idx_lock
, flags
);
430 /* set rom index to 0 */
431 WREG32(mmSMC_IND_INDEX_11
, ixROM_INDEX
);
432 WREG32(mmSMC_IND_DATA_11
, 0);
433 /* set index to data for continous read */
434 WREG32(mmSMC_IND_INDEX_11
, ixROM_DATA
);
435 for (i
= 0; i
< length_dw
; i
++)
436 dw_ptr
[i
] = RREG32(mmSMC_IND_DATA_11
);
437 spin_unlock_irqrestore(&adev
->smc_idx_lock
, flags
);
442 static void vi_detect_hw_virtualization(struct amdgpu_device
*adev
)
444 uint32_t reg
= RREG32(mmBIF_IOV_FUNC_IDENTIFIER
);
445 /* bit0: 0 means pf and 1 means vf */
446 /* bit31: 0 means disable IOV and 1 means enable */
448 adev
->virtualization
.virtual_caps
|= AMDGPU_SRIOV_CAPS_IS_VF
;
450 if (reg
& 0x80000000)
451 adev
->virtualization
.virtual_caps
|= AMDGPU_SRIOV_CAPS_ENABLE_IOV
;
454 if (is_virtual_machine()) /* passthrough mode exclus sr-iov mode */
455 adev
->virtualization
.virtual_caps
|= AMDGPU_PASSTHROUGH_MODE
;
459 static const struct amdgpu_allowed_register_entry tonga_allowed_read_registers
[] = {
460 {mmGB_MACROTILE_MODE7
, true},
463 static const struct amdgpu_allowed_register_entry cz_allowed_read_registers
[] = {
464 {mmGB_TILE_MODE7
, true},
465 {mmGB_TILE_MODE12
, true},
466 {mmGB_TILE_MODE17
, true},
467 {mmGB_TILE_MODE23
, true},
468 {mmGB_MACROTILE_MODE7
, true},
471 static const struct amdgpu_allowed_register_entry vi_allowed_read_registers
[] = {
472 {mmGRBM_STATUS
, false},
473 {mmGRBM_STATUS2
, false},
474 {mmGRBM_STATUS_SE0
, false},
475 {mmGRBM_STATUS_SE1
, false},
476 {mmGRBM_STATUS_SE2
, false},
477 {mmGRBM_STATUS_SE3
, false},
478 {mmSRBM_STATUS
, false},
479 {mmSRBM_STATUS2
, false},
480 {mmSRBM_STATUS3
, false},
481 {mmSDMA0_STATUS_REG
+ SDMA0_REGISTER_OFFSET
, false},
482 {mmSDMA0_STATUS_REG
+ SDMA1_REGISTER_OFFSET
, false},
484 {mmCP_STALLED_STAT1
, false},
485 {mmCP_STALLED_STAT2
, false},
486 {mmCP_STALLED_STAT3
, false},
487 {mmCP_CPF_BUSY_STAT
, false},
488 {mmCP_CPF_STALLED_STAT1
, false},
489 {mmCP_CPF_STATUS
, false},
490 {mmCP_CPC_BUSY_STAT
, false},
491 {mmCP_CPC_STALLED_STAT1
, false},
492 {mmCP_CPC_STATUS
, false},
493 {mmGB_ADDR_CONFIG
, false},
494 {mmMC_ARB_RAMCFG
, false},
495 {mmGB_TILE_MODE0
, false},
496 {mmGB_TILE_MODE1
, false},
497 {mmGB_TILE_MODE2
, false},
498 {mmGB_TILE_MODE3
, false},
499 {mmGB_TILE_MODE4
, false},
500 {mmGB_TILE_MODE5
, false},
501 {mmGB_TILE_MODE6
, false},
502 {mmGB_TILE_MODE7
, false},
503 {mmGB_TILE_MODE8
, false},
504 {mmGB_TILE_MODE9
, false},
505 {mmGB_TILE_MODE10
, false},
506 {mmGB_TILE_MODE11
, false},
507 {mmGB_TILE_MODE12
, false},
508 {mmGB_TILE_MODE13
, false},
509 {mmGB_TILE_MODE14
, false},
510 {mmGB_TILE_MODE15
, false},
511 {mmGB_TILE_MODE16
, false},
512 {mmGB_TILE_MODE17
, false},
513 {mmGB_TILE_MODE18
, false},
514 {mmGB_TILE_MODE19
, false},
515 {mmGB_TILE_MODE20
, false},
516 {mmGB_TILE_MODE21
, false},
517 {mmGB_TILE_MODE22
, false},
518 {mmGB_TILE_MODE23
, false},
519 {mmGB_TILE_MODE24
, false},
520 {mmGB_TILE_MODE25
, false},
521 {mmGB_TILE_MODE26
, false},
522 {mmGB_TILE_MODE27
, false},
523 {mmGB_TILE_MODE28
, false},
524 {mmGB_TILE_MODE29
, false},
525 {mmGB_TILE_MODE30
, false},
526 {mmGB_TILE_MODE31
, false},
527 {mmGB_MACROTILE_MODE0
, false},
528 {mmGB_MACROTILE_MODE1
, false},
529 {mmGB_MACROTILE_MODE2
, false},
530 {mmGB_MACROTILE_MODE3
, false},
531 {mmGB_MACROTILE_MODE4
, false},
532 {mmGB_MACROTILE_MODE5
, false},
533 {mmGB_MACROTILE_MODE6
, false},
534 {mmGB_MACROTILE_MODE7
, false},
535 {mmGB_MACROTILE_MODE8
, false},
536 {mmGB_MACROTILE_MODE9
, false},
537 {mmGB_MACROTILE_MODE10
, false},
538 {mmGB_MACROTILE_MODE11
, false},
539 {mmGB_MACROTILE_MODE12
, false},
540 {mmGB_MACROTILE_MODE13
, false},
541 {mmGB_MACROTILE_MODE14
, false},
542 {mmGB_MACROTILE_MODE15
, false},
543 {mmCC_RB_BACKEND_DISABLE
, false, true},
544 {mmGC_USER_RB_BACKEND_DISABLE
, false, true},
545 {mmGB_BACKEND_MAP
, false, false},
546 {mmPA_SC_RASTER_CONFIG
, false, true},
547 {mmPA_SC_RASTER_CONFIG_1
, false, true},
550 static uint32_t vi_get_register_value(struct amdgpu_device
*adev
,
551 bool indexed
, u32 se_num
,
552 u32 sh_num
, u32 reg_offset
)
556 unsigned se_idx
= (se_num
== 0xffffffff) ? 0 : se_num
;
557 unsigned sh_idx
= (sh_num
== 0xffffffff) ? 0 : sh_num
;
559 switch (reg_offset
) {
560 case mmCC_RB_BACKEND_DISABLE
:
561 return adev
->gfx
.config
.rb_config
[se_idx
][sh_idx
].rb_backend_disable
;
562 case mmGC_USER_RB_BACKEND_DISABLE
:
563 return adev
->gfx
.config
.rb_config
[se_idx
][sh_idx
].user_rb_backend_disable
;
564 case mmPA_SC_RASTER_CONFIG
:
565 return adev
->gfx
.config
.rb_config
[se_idx
][sh_idx
].raster_config
;
566 case mmPA_SC_RASTER_CONFIG_1
:
567 return adev
->gfx
.config
.rb_config
[se_idx
][sh_idx
].raster_config_1
;
570 mutex_lock(&adev
->grbm_idx_mutex
);
571 if (se_num
!= 0xffffffff || sh_num
!= 0xffffffff)
572 amdgpu_gfx_select_se_sh(adev
, se_num
, sh_num
, 0xffffffff);
574 val
= RREG32(reg_offset
);
576 if (se_num
!= 0xffffffff || sh_num
!= 0xffffffff)
577 amdgpu_gfx_select_se_sh(adev
, 0xffffffff, 0xffffffff, 0xffffffff);
578 mutex_unlock(&adev
->grbm_idx_mutex
);
583 switch (reg_offset
) {
584 case mmGB_ADDR_CONFIG
:
585 return adev
->gfx
.config
.gb_addr_config
;
586 case mmMC_ARB_RAMCFG
:
587 return adev
->gfx
.config
.mc_arb_ramcfg
;
588 case mmGB_TILE_MODE0
:
589 case mmGB_TILE_MODE1
:
590 case mmGB_TILE_MODE2
:
591 case mmGB_TILE_MODE3
:
592 case mmGB_TILE_MODE4
:
593 case mmGB_TILE_MODE5
:
594 case mmGB_TILE_MODE6
:
595 case mmGB_TILE_MODE7
:
596 case mmGB_TILE_MODE8
:
597 case mmGB_TILE_MODE9
:
598 case mmGB_TILE_MODE10
:
599 case mmGB_TILE_MODE11
:
600 case mmGB_TILE_MODE12
:
601 case mmGB_TILE_MODE13
:
602 case mmGB_TILE_MODE14
:
603 case mmGB_TILE_MODE15
:
604 case mmGB_TILE_MODE16
:
605 case mmGB_TILE_MODE17
:
606 case mmGB_TILE_MODE18
:
607 case mmGB_TILE_MODE19
:
608 case mmGB_TILE_MODE20
:
609 case mmGB_TILE_MODE21
:
610 case mmGB_TILE_MODE22
:
611 case mmGB_TILE_MODE23
:
612 case mmGB_TILE_MODE24
:
613 case mmGB_TILE_MODE25
:
614 case mmGB_TILE_MODE26
:
615 case mmGB_TILE_MODE27
:
616 case mmGB_TILE_MODE28
:
617 case mmGB_TILE_MODE29
:
618 case mmGB_TILE_MODE30
:
619 case mmGB_TILE_MODE31
:
620 idx
= (reg_offset
- mmGB_TILE_MODE0
);
621 return adev
->gfx
.config
.tile_mode_array
[idx
];
622 case mmGB_MACROTILE_MODE0
:
623 case mmGB_MACROTILE_MODE1
:
624 case mmGB_MACROTILE_MODE2
:
625 case mmGB_MACROTILE_MODE3
:
626 case mmGB_MACROTILE_MODE4
:
627 case mmGB_MACROTILE_MODE5
:
628 case mmGB_MACROTILE_MODE6
:
629 case mmGB_MACROTILE_MODE7
:
630 case mmGB_MACROTILE_MODE8
:
631 case mmGB_MACROTILE_MODE9
:
632 case mmGB_MACROTILE_MODE10
:
633 case mmGB_MACROTILE_MODE11
:
634 case mmGB_MACROTILE_MODE12
:
635 case mmGB_MACROTILE_MODE13
:
636 case mmGB_MACROTILE_MODE14
:
637 case mmGB_MACROTILE_MODE15
:
638 idx
= (reg_offset
- mmGB_MACROTILE_MODE0
);
639 return adev
->gfx
.config
.macrotile_mode_array
[idx
];
641 return RREG32(reg_offset
);
646 static int vi_read_register(struct amdgpu_device
*adev
, u32 se_num
,
647 u32 sh_num
, u32 reg_offset
, u32
*value
)
649 const struct amdgpu_allowed_register_entry
*asic_register_table
= NULL
;
650 const struct amdgpu_allowed_register_entry
*asic_register_entry
;
654 switch (adev
->asic_type
) {
656 asic_register_table
= tonga_allowed_read_registers
;
657 size
= ARRAY_SIZE(tonga_allowed_read_registers
);
666 asic_register_table
= cz_allowed_read_registers
;
667 size
= ARRAY_SIZE(cz_allowed_read_registers
);
673 if (asic_register_table
) {
674 for (i
= 0; i
< size
; i
++) {
675 asic_register_entry
= asic_register_table
+ i
;
676 if (reg_offset
!= asic_register_entry
->reg_offset
)
678 if (!asic_register_entry
->untouched
)
679 *value
= vi_get_register_value(adev
,
680 asic_register_entry
->grbm_indexed
,
681 se_num
, sh_num
, reg_offset
);
686 for (i
= 0; i
< ARRAY_SIZE(vi_allowed_read_registers
); i
++) {
687 if (reg_offset
!= vi_allowed_read_registers
[i
].reg_offset
)
690 if (!vi_allowed_read_registers
[i
].untouched
)
691 *value
= vi_get_register_value(adev
,
692 vi_allowed_read_registers
[i
].grbm_indexed
,
693 se_num
, sh_num
, reg_offset
);
699 static int vi_gpu_pci_config_reset(struct amdgpu_device
*adev
)
703 dev_info(adev
->dev
, "GPU pci config reset\n");
706 pci_clear_master(adev
->pdev
);
708 amdgpu_pci_config_reset(adev
);
712 /* wait for asic to come out of reset */
713 for (i
= 0; i
< adev
->usec_timeout
; i
++) {
714 if (RREG32(mmCONFIG_MEMSIZE
) != 0xffffffff) {
716 pci_set_master(adev
->pdev
);
725 * vi_asic_reset - soft reset GPU
727 * @adev: amdgpu_device pointer
729 * Look up which blocks are hung and attempt
731 * Returns 0 for success.
733 static int vi_asic_reset(struct amdgpu_device
*adev
)
737 amdgpu_atombios_scratch_regs_engine_hung(adev
, true);
739 r
= vi_gpu_pci_config_reset(adev
);
741 amdgpu_atombios_scratch_regs_engine_hung(adev
, false);
746 static int vi_set_uvd_clock(struct amdgpu_device
*adev
, u32 clock
,
747 u32 cntl_reg
, u32 status_reg
)
750 struct atom_clock_dividers dividers
;
753 r
= amdgpu_atombios_get_clock_dividers(adev
,
754 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK
,
755 clock
, false, ÷rs
);
759 tmp
= RREG32_SMC(cntl_reg
);
760 tmp
&= ~(CG_DCLK_CNTL__DCLK_DIR_CNTL_EN_MASK
|
761 CG_DCLK_CNTL__DCLK_DIVIDER_MASK
);
762 tmp
|= dividers
.post_divider
;
763 WREG32_SMC(cntl_reg
, tmp
);
765 for (i
= 0; i
< 100; i
++) {
766 if (RREG32_SMC(status_reg
) & CG_DCLK_STATUS__DCLK_STATUS_MASK
)
776 static int vi_set_uvd_clocks(struct amdgpu_device
*adev
, u32 vclk
, u32 dclk
)
780 r
= vi_set_uvd_clock(adev
, vclk
, ixCG_VCLK_CNTL
, ixCG_VCLK_STATUS
);
784 r
= vi_set_uvd_clock(adev
, dclk
, ixCG_DCLK_CNTL
, ixCG_DCLK_STATUS
);
789 static int vi_set_vce_clocks(struct amdgpu_device
*adev
, u32 evclk
, u32 ecclk
)
796 static void vi_pcie_gen3_enable(struct amdgpu_device
*adev
)
798 if (pci_is_root_bus(adev
->pdev
->bus
))
801 if (amdgpu_pcie_gen2
== 0)
804 if (adev
->flags
& AMD_IS_APU
)
807 if (!(adev
->pm
.pcie_gen_mask
& (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2
|
808 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3
)))
814 static void vi_program_aspm(struct amdgpu_device
*adev
)
817 if (amdgpu_aspm
== 0)
823 static void vi_enable_doorbell_aperture(struct amdgpu_device
*adev
,
828 /* not necessary on CZ */
829 if (adev
->flags
& AMD_IS_APU
)
832 tmp
= RREG32(mmBIF_DOORBELL_APER_EN
);
834 tmp
= REG_SET_FIELD(tmp
, BIF_DOORBELL_APER_EN
, BIF_DOORBELL_APER_EN
, 1);
836 tmp
= REG_SET_FIELD(tmp
, BIF_DOORBELL_APER_EN
, BIF_DOORBELL_APER_EN
, 0);
838 WREG32(mmBIF_DOORBELL_APER_EN
, tmp
);
841 #define ATI_REV_ID_FUSE_MACRO__ADDRESS 0xC0014044
842 #define ATI_REV_ID_FUSE_MACRO__SHIFT 9
843 #define ATI_REV_ID_FUSE_MACRO__MASK 0x00001E00
845 static uint32_t vi_get_rev_id(struct amdgpu_device
*adev
)
847 if (adev
->flags
& AMD_IS_APU
)
848 return (RREG32_SMC(ATI_REV_ID_FUSE_MACRO__ADDRESS
) & ATI_REV_ID_FUSE_MACRO__MASK
)
849 >> ATI_REV_ID_FUSE_MACRO__SHIFT
;
851 return (RREG32(mmPCIE_EFUSE4
) & PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID_MASK
)
852 >> PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID__SHIFT
;
855 static const struct amdgpu_asic_funcs vi_asic_funcs
=
857 .read_disabled_bios
= &vi_read_disabled_bios
,
858 .read_bios_from_rom
= &vi_read_bios_from_rom
,
859 .detect_hw_virtualization
= vi_detect_hw_virtualization
,
860 .read_register
= &vi_read_register
,
861 .reset
= &vi_asic_reset
,
862 .set_vga_state
= &vi_vga_set_state
,
863 .get_xclk
= &vi_get_xclk
,
864 .set_uvd_clocks
= &vi_set_uvd_clocks
,
865 .set_vce_clocks
= &vi_set_vce_clocks
,
868 static int vi_common_early_init(void *handle
)
870 bool smc_enabled
= false;
871 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
873 if (adev
->flags
& AMD_IS_APU
) {
874 adev
->smc_rreg
= &cz_smc_rreg
;
875 adev
->smc_wreg
= &cz_smc_wreg
;
877 adev
->smc_rreg
= &vi_smc_rreg
;
878 adev
->smc_wreg
= &vi_smc_wreg
;
880 adev
->pcie_rreg
= &vi_pcie_rreg
;
881 adev
->pcie_wreg
= &vi_pcie_wreg
;
882 adev
->uvd_ctx_rreg
= &vi_uvd_ctx_rreg
;
883 adev
->uvd_ctx_wreg
= &vi_uvd_ctx_wreg
;
884 adev
->didt_rreg
= &vi_didt_rreg
;
885 adev
->didt_wreg
= &vi_didt_wreg
;
886 adev
->gc_cac_rreg
= &vi_gc_cac_rreg
;
887 adev
->gc_cac_wreg
= &vi_gc_cac_wreg
;
889 adev
->asic_funcs
= &vi_asic_funcs
;
891 if (amdgpu_get_ip_block(adev
, AMD_IP_BLOCK_TYPE_SMC
) &&
892 (amdgpu_ip_block_mask
& (1 << AMD_IP_BLOCK_TYPE_SMC
)))
895 adev
->rev_id
= vi_get_rev_id(adev
);
896 adev
->external_rev_id
= 0xFF;
897 switch (adev
->asic_type
) {
901 adev
->external_rev_id
= 0x1;
904 adev
->cg_flags
= AMD_CG_SUPPORT_GFX_MGCG
|
905 AMD_CG_SUPPORT_GFX_MGLS
|
906 AMD_CG_SUPPORT_GFX_RLC_LS
|
907 AMD_CG_SUPPORT_GFX_CP_LS
|
908 AMD_CG_SUPPORT_GFX_CGTS
|
909 AMD_CG_SUPPORT_GFX_CGTS_LS
|
910 AMD_CG_SUPPORT_GFX_CGCG
|
911 AMD_CG_SUPPORT_GFX_CGLS
|
912 AMD_CG_SUPPORT_SDMA_MGCG
|
913 AMD_CG_SUPPORT_SDMA_LS
|
914 AMD_CG_SUPPORT_BIF_LS
|
915 AMD_CG_SUPPORT_HDP_MGCG
|
916 AMD_CG_SUPPORT_HDP_LS
|
917 AMD_CG_SUPPORT_ROM_MGCG
|
918 AMD_CG_SUPPORT_MC_MGCG
|
919 AMD_CG_SUPPORT_MC_LS
|
920 AMD_CG_SUPPORT_UVD_MGCG
;
922 adev
->external_rev_id
= adev
->rev_id
+ 0x3c;
925 adev
->cg_flags
= AMD_CG_SUPPORT_GFX_MGCG
|
926 AMD_CG_SUPPORT_GFX_CGCG
|
927 AMD_CG_SUPPORT_GFX_CGLS
|
928 AMD_CG_SUPPORT_SDMA_MGCG
|
929 AMD_CG_SUPPORT_SDMA_LS
|
930 AMD_CG_SUPPORT_BIF_LS
|
931 AMD_CG_SUPPORT_HDP_MGCG
|
932 AMD_CG_SUPPORT_HDP_LS
|
933 AMD_CG_SUPPORT_ROM_MGCG
|
934 AMD_CG_SUPPORT_MC_MGCG
|
935 AMD_CG_SUPPORT_MC_LS
|
936 AMD_CG_SUPPORT_DRM_LS
|
937 AMD_CG_SUPPORT_UVD_MGCG
;
939 adev
->external_rev_id
= adev
->rev_id
+ 0x14;
942 adev
->cg_flags
= AMD_CG_SUPPORT_GFX_MGCG
|
943 AMD_CG_SUPPORT_GFX_RLC_LS
|
944 AMD_CG_SUPPORT_GFX_CP_LS
|
945 AMD_CG_SUPPORT_GFX_CGCG
|
946 AMD_CG_SUPPORT_GFX_CGLS
|
947 AMD_CG_SUPPORT_GFX_3D_CGCG
|
948 AMD_CG_SUPPORT_GFX_3D_CGLS
|
949 AMD_CG_SUPPORT_SDMA_MGCG
|
950 AMD_CG_SUPPORT_SDMA_LS
|
951 AMD_CG_SUPPORT_BIF_MGCG
|
952 AMD_CG_SUPPORT_BIF_LS
|
953 AMD_CG_SUPPORT_HDP_MGCG
|
954 AMD_CG_SUPPORT_HDP_LS
|
955 AMD_CG_SUPPORT_ROM_MGCG
|
956 AMD_CG_SUPPORT_MC_MGCG
|
957 AMD_CG_SUPPORT_MC_LS
|
958 AMD_CG_SUPPORT_DRM_LS
|
959 AMD_CG_SUPPORT_UVD_MGCG
|
960 AMD_CG_SUPPORT_VCE_MGCG
;
962 adev
->external_rev_id
= adev
->rev_id
+ 0x5A;
965 adev
->cg_flags
= AMD_CG_SUPPORT_GFX_MGCG
|
966 AMD_CG_SUPPORT_GFX_RLC_LS
|
967 AMD_CG_SUPPORT_GFX_CP_LS
|
968 AMD_CG_SUPPORT_GFX_CGCG
|
969 AMD_CG_SUPPORT_GFX_CGLS
|
970 AMD_CG_SUPPORT_GFX_3D_CGCG
|
971 AMD_CG_SUPPORT_GFX_3D_CGLS
|
972 AMD_CG_SUPPORT_SDMA_MGCG
|
973 AMD_CG_SUPPORT_SDMA_LS
|
974 AMD_CG_SUPPORT_BIF_MGCG
|
975 AMD_CG_SUPPORT_BIF_LS
|
976 AMD_CG_SUPPORT_HDP_MGCG
|
977 AMD_CG_SUPPORT_HDP_LS
|
978 AMD_CG_SUPPORT_ROM_MGCG
|
979 AMD_CG_SUPPORT_MC_MGCG
|
980 AMD_CG_SUPPORT_MC_LS
|
981 AMD_CG_SUPPORT_DRM_LS
|
982 AMD_CG_SUPPORT_UVD_MGCG
|
983 AMD_CG_SUPPORT_VCE_MGCG
;
985 adev
->external_rev_id
= adev
->rev_id
+ 0x50;
988 adev
->cg_flags
= AMD_CG_SUPPORT_UVD_MGCG
;
990 adev
->external_rev_id
= adev
->rev_id
+ 0x64;
993 adev
->cg_flags
= AMD_CG_SUPPORT_UVD_MGCG
|
994 AMD_CG_SUPPORT_GFX_MGCG
|
995 AMD_CG_SUPPORT_GFX_MGLS
|
996 AMD_CG_SUPPORT_GFX_RLC_LS
|
997 AMD_CG_SUPPORT_GFX_CP_LS
|
998 AMD_CG_SUPPORT_GFX_CGTS
|
999 AMD_CG_SUPPORT_GFX_MGLS
|
1000 AMD_CG_SUPPORT_GFX_CGTS_LS
|
1001 AMD_CG_SUPPORT_GFX_CGCG
|
1002 AMD_CG_SUPPORT_GFX_CGLS
|
1003 AMD_CG_SUPPORT_BIF_LS
|
1004 AMD_CG_SUPPORT_HDP_MGCG
|
1005 AMD_CG_SUPPORT_HDP_LS
|
1006 AMD_CG_SUPPORT_SDMA_MGCG
|
1007 AMD_CG_SUPPORT_SDMA_LS
|
1008 AMD_CG_SUPPORT_VCE_MGCG
;
1009 /* rev0 hardware requires workarounds to support PG */
1011 if (adev
->rev_id
!= 0x00) {
1012 adev
->pg_flags
|= AMD_PG_SUPPORT_GFX_PG
|
1013 AMD_PG_SUPPORT_GFX_SMG
|
1014 AMD_PG_SUPPORT_GFX_PIPELINE
|
1016 AMD_PG_SUPPORT_UVD
|
1019 adev
->external_rev_id
= adev
->rev_id
+ 0x1;
1022 adev
->cg_flags
= AMD_CG_SUPPORT_UVD_MGCG
|
1023 AMD_CG_SUPPORT_GFX_MGCG
|
1024 AMD_CG_SUPPORT_GFX_MGLS
|
1025 AMD_CG_SUPPORT_GFX_RLC_LS
|
1026 AMD_CG_SUPPORT_GFX_CP_LS
|
1027 AMD_CG_SUPPORT_GFX_CGTS
|
1028 AMD_CG_SUPPORT_GFX_MGLS
|
1029 AMD_CG_SUPPORT_GFX_CGTS_LS
|
1030 AMD_CG_SUPPORT_GFX_CGCG
|
1031 AMD_CG_SUPPORT_GFX_CGLS
|
1032 AMD_CG_SUPPORT_BIF_LS
|
1033 AMD_CG_SUPPORT_HDP_MGCG
|
1034 AMD_CG_SUPPORT_HDP_LS
|
1035 AMD_CG_SUPPORT_SDMA_MGCG
|
1036 AMD_CG_SUPPORT_SDMA_LS
|
1037 AMD_CG_SUPPORT_VCE_MGCG
;
1038 adev
->pg_flags
= AMD_PG_SUPPORT_GFX_PG
|
1039 AMD_PG_SUPPORT_GFX_SMG
|
1040 AMD_PG_SUPPORT_GFX_PIPELINE
|
1042 AMD_PG_SUPPORT_UVD
|
1044 adev
->external_rev_id
= adev
->rev_id
+ 0x61;
1047 /* FIXME: not supported yet */
1051 /* in early init stage, vbios code won't work */
1052 if (adev
->asic_funcs
->detect_hw_virtualization
)
1053 amdgpu_asic_detect_hw_virtualization(adev
);
1055 if (amdgpu_smc_load_fw
&& smc_enabled
)
1056 adev
->firmware
.smu_load
= true;
1058 amdgpu_get_pcie_info(adev
);
1063 static int vi_common_sw_init(void *handle
)
1068 static int vi_common_sw_fini(void *handle
)
1073 static int vi_common_hw_init(void *handle
)
1075 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1077 /* move the golden regs per IP block */
1078 vi_init_golden_registers(adev
);
1079 /* enable pcie gen2/3 link */
1080 vi_pcie_gen3_enable(adev
);
1082 vi_program_aspm(adev
);
1083 /* enable the doorbell aperture */
1084 vi_enable_doorbell_aperture(adev
, true);
1089 static int vi_common_hw_fini(void *handle
)
1091 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1093 /* enable the doorbell aperture */
1094 vi_enable_doorbell_aperture(adev
, false);
1099 static int vi_common_suspend(void *handle
)
1101 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1103 return vi_common_hw_fini(adev
);
1106 static int vi_common_resume(void *handle
)
1108 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1110 return vi_common_hw_init(adev
);
1113 static bool vi_common_is_idle(void *handle
)
1118 static int vi_common_wait_for_idle(void *handle
)
1123 static int vi_common_soft_reset(void *handle
)
1128 static void vi_update_bif_medium_grain_light_sleep(struct amdgpu_device
*adev
,
1131 uint32_t temp
, data
;
1133 temp
= data
= RREG32_PCIE(ixPCIE_CNTL2
);
1135 if (enable
&& (adev
->cg_flags
& AMD_CG_SUPPORT_BIF_LS
))
1136 data
|= PCIE_CNTL2__SLV_MEM_LS_EN_MASK
|
1137 PCIE_CNTL2__MST_MEM_LS_EN_MASK
|
1138 PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK
;
1140 data
&= ~(PCIE_CNTL2__SLV_MEM_LS_EN_MASK
|
1141 PCIE_CNTL2__MST_MEM_LS_EN_MASK
|
1142 PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK
);
1145 WREG32_PCIE(ixPCIE_CNTL2
, data
);
1148 static void vi_update_hdp_medium_grain_clock_gating(struct amdgpu_device
*adev
,
1151 uint32_t temp
, data
;
1153 temp
= data
= RREG32(mmHDP_HOST_PATH_CNTL
);
1155 if (enable
&& (adev
->cg_flags
& AMD_CG_SUPPORT_HDP_MGCG
))
1156 data
&= ~HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK
;
1158 data
|= HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK
;
1161 WREG32(mmHDP_HOST_PATH_CNTL
, data
);
1164 static void vi_update_hdp_light_sleep(struct amdgpu_device
*adev
,
1167 uint32_t temp
, data
;
1169 temp
= data
= RREG32(mmHDP_MEM_POWER_LS
);
1171 if (enable
&& (adev
->cg_flags
& AMD_CG_SUPPORT_HDP_LS
))
1172 data
|= HDP_MEM_POWER_LS__LS_ENABLE_MASK
;
1174 data
&= ~HDP_MEM_POWER_LS__LS_ENABLE_MASK
;
1177 WREG32(mmHDP_MEM_POWER_LS
, data
);
1180 static void vi_update_drm_light_sleep(struct amdgpu_device
*adev
,
1183 uint32_t temp
, data
;
1185 temp
= data
= RREG32(0x157a);
1187 if (enable
&& (adev
->cg_flags
& AMD_CG_SUPPORT_DRM_LS
))
1193 WREG32(0x157a, data
);
1197 static void vi_update_rom_medium_grain_clock_gating(struct amdgpu_device
*adev
,
1200 uint32_t temp
, data
;
1202 temp
= data
= RREG32_SMC(ixCGTT_ROM_CLK_CTRL0
);
1204 if (enable
&& (adev
->cg_flags
& AMD_CG_SUPPORT_ROM_MGCG
))
1205 data
&= ~(CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK
|
1206 CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK
);
1208 data
|= CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK
|
1209 CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK
;
1212 WREG32_SMC(ixCGTT_ROM_CLK_CTRL0
, data
);
1215 static int vi_common_set_clockgating_state_by_smu(void *handle
,
1216 enum amd_clockgating_state state
)
1218 uint32_t msg_id
, pp_state
= 0;
1219 uint32_t pp_support_state
= 0;
1220 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1221 void *pp_handle
= adev
->powerplay
.pp_handle
;
1223 if (adev
->cg_flags
& (AMD_CG_SUPPORT_MC_LS
| AMD_CG_SUPPORT_MC_MGCG
)) {
1224 if (adev
->cg_flags
& AMD_CG_SUPPORT_MC_LS
) {
1225 pp_support_state
= AMD_CG_SUPPORT_MC_LS
;
1226 pp_state
= PP_STATE_LS
;
1228 if (adev
->cg_flags
& AMD_CG_SUPPORT_MC_MGCG
) {
1229 pp_support_state
|= AMD_CG_SUPPORT_MC_MGCG
;
1230 pp_state
|= PP_STATE_CG
;
1232 if (state
== AMD_CG_STATE_UNGATE
)
1234 msg_id
= PP_CG_MSG_ID(PP_GROUP_SYS
,
1238 amd_set_clockgating_by_smu(pp_handle
, msg_id
);
1241 if (adev
->cg_flags
& (AMD_CG_SUPPORT_SDMA_LS
| AMD_CG_SUPPORT_SDMA_MGCG
)) {
1242 if (adev
->cg_flags
& AMD_CG_SUPPORT_SDMA_LS
) {
1243 pp_support_state
= AMD_CG_SUPPORT_SDMA_LS
;
1244 pp_state
= PP_STATE_LS
;
1246 if (adev
->cg_flags
& AMD_CG_SUPPORT_SDMA_MGCG
) {
1247 pp_support_state
|= AMD_CG_SUPPORT_SDMA_MGCG
;
1248 pp_state
|= PP_STATE_CG
;
1250 if (state
== AMD_CG_STATE_UNGATE
)
1252 msg_id
= PP_CG_MSG_ID(PP_GROUP_SYS
,
1256 amd_set_clockgating_by_smu(pp_handle
, msg_id
);
1259 if (adev
->cg_flags
& (AMD_CG_SUPPORT_HDP_LS
| AMD_CG_SUPPORT_HDP_MGCG
)) {
1260 if (adev
->cg_flags
& AMD_CG_SUPPORT_HDP_LS
) {
1261 pp_support_state
= AMD_CG_SUPPORT_HDP_LS
;
1262 pp_state
= PP_STATE_LS
;
1264 if (adev
->cg_flags
& AMD_CG_SUPPORT_HDP_MGCG
) {
1265 pp_support_state
|= AMD_CG_SUPPORT_HDP_MGCG
;
1266 pp_state
|= PP_STATE_CG
;
1268 if (state
== AMD_CG_STATE_UNGATE
)
1270 msg_id
= PP_CG_MSG_ID(PP_GROUP_SYS
,
1274 amd_set_clockgating_by_smu(pp_handle
, msg_id
);
1278 if (adev
->cg_flags
& AMD_CG_SUPPORT_BIF_LS
) {
1279 if (state
== AMD_CG_STATE_UNGATE
)
1282 pp_state
= PP_STATE_LS
;
1284 msg_id
= PP_CG_MSG_ID(PP_GROUP_SYS
,
1286 PP_STATE_SUPPORT_LS
,
1288 amd_set_clockgating_by_smu(pp_handle
, msg_id
);
1290 if (adev
->cg_flags
& AMD_CG_SUPPORT_BIF_MGCG
) {
1291 if (state
== AMD_CG_STATE_UNGATE
)
1294 pp_state
= PP_STATE_CG
;
1296 msg_id
= PP_CG_MSG_ID(PP_GROUP_SYS
,
1298 PP_STATE_SUPPORT_CG
,
1300 amd_set_clockgating_by_smu(pp_handle
, msg_id
);
1303 if (adev
->cg_flags
& AMD_CG_SUPPORT_DRM_LS
) {
1305 if (state
== AMD_CG_STATE_UNGATE
)
1308 pp_state
= PP_STATE_LS
;
1310 msg_id
= PP_CG_MSG_ID(PP_GROUP_SYS
,
1312 PP_STATE_SUPPORT_LS
,
1314 amd_set_clockgating_by_smu(pp_handle
, msg_id
);
1317 if (adev
->cg_flags
& AMD_CG_SUPPORT_ROM_MGCG
) {
1319 if (state
== AMD_CG_STATE_UNGATE
)
1322 pp_state
= PP_STATE_CG
;
1324 msg_id
= PP_CG_MSG_ID(PP_GROUP_SYS
,
1326 PP_STATE_SUPPORT_CG
,
1328 amd_set_clockgating_by_smu(pp_handle
, msg_id
);
1333 static int vi_common_set_clockgating_state(void *handle
,
1334 enum amd_clockgating_state state
)
1336 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1338 switch (adev
->asic_type
) {
1340 vi_update_bif_medium_grain_light_sleep(adev
,
1341 state
== AMD_CG_STATE_GATE
? true : false);
1342 vi_update_hdp_medium_grain_clock_gating(adev
,
1343 state
== AMD_CG_STATE_GATE
? true : false);
1344 vi_update_hdp_light_sleep(adev
,
1345 state
== AMD_CG_STATE_GATE
? true : false);
1346 vi_update_rom_medium_grain_clock_gating(adev
,
1347 state
== AMD_CG_STATE_GATE
? true : false);
1351 vi_update_bif_medium_grain_light_sleep(adev
,
1352 state
== AMD_CG_STATE_GATE
? true : false);
1353 vi_update_hdp_medium_grain_clock_gating(adev
,
1354 state
== AMD_CG_STATE_GATE
? true : false);
1355 vi_update_hdp_light_sleep(adev
,
1356 state
== AMD_CG_STATE_GATE
? true : false);
1357 vi_update_drm_light_sleep(adev
,
1358 state
== AMD_CG_STATE_GATE
? true : false);
1361 case CHIP_POLARIS10
:
1362 case CHIP_POLARIS11
:
1363 case CHIP_POLARIS12
:
1364 vi_common_set_clockgating_state_by_smu(adev
, state
);
1371 static int vi_common_set_powergating_state(void *handle
,
1372 enum amd_powergating_state state
)
1377 static const struct amd_ip_funcs vi_common_ip_funcs
= {
1378 .name
= "vi_common",
1379 .early_init
= vi_common_early_init
,
1381 .sw_init
= vi_common_sw_init
,
1382 .sw_fini
= vi_common_sw_fini
,
1383 .hw_init
= vi_common_hw_init
,
1384 .hw_fini
= vi_common_hw_fini
,
1385 .suspend
= vi_common_suspend
,
1386 .resume
= vi_common_resume
,
1387 .is_idle
= vi_common_is_idle
,
1388 .wait_for_idle
= vi_common_wait_for_idle
,
1389 .soft_reset
= vi_common_soft_reset
,
1390 .set_clockgating_state
= vi_common_set_clockgating_state
,
1391 .set_powergating_state
= vi_common_set_powergating_state
,
1394 static const struct amdgpu_ip_block_version vi_common_ip_block
=
1396 .type
= AMD_IP_BLOCK_TYPE_COMMON
,
1400 .funcs
= &vi_common_ip_funcs
,
1403 int vi_set_ip_blocks(struct amdgpu_device
*adev
)
1405 switch (adev
->asic_type
) {
1407 /* topaz has no DCE, UVD, VCE */
1408 amdgpu_ip_block_add(adev
, &vi_common_ip_block
);
1409 amdgpu_ip_block_add(adev
, &gmc_v7_4_ip_block
);
1410 amdgpu_ip_block_add(adev
, &iceland_ih_ip_block
);
1411 amdgpu_ip_block_add(adev
, &amdgpu_pp_ip_block
);
1412 if (adev
->enable_virtual_display
)
1413 amdgpu_ip_block_add(adev
, &dce_virtual_ip_block
);
1414 amdgpu_ip_block_add(adev
, &gfx_v8_0_ip_block
);
1415 amdgpu_ip_block_add(adev
, &sdma_v2_4_ip_block
);
1418 amdgpu_ip_block_add(adev
, &vi_common_ip_block
);
1419 amdgpu_ip_block_add(adev
, &gmc_v8_5_ip_block
);
1420 amdgpu_ip_block_add(adev
, &tonga_ih_ip_block
);
1421 amdgpu_ip_block_add(adev
, &amdgpu_pp_ip_block
);
1422 if (adev
->enable_virtual_display
)
1423 amdgpu_ip_block_add(adev
, &dce_virtual_ip_block
);
1425 amdgpu_ip_block_add(adev
, &dce_v10_1_ip_block
);
1426 amdgpu_ip_block_add(adev
, &gfx_v8_0_ip_block
);
1427 amdgpu_ip_block_add(adev
, &sdma_v3_0_ip_block
);
1428 amdgpu_ip_block_add(adev
, &uvd_v6_0_ip_block
);
1429 amdgpu_ip_block_add(adev
, &vce_v3_0_ip_block
);
1432 amdgpu_ip_block_add(adev
, &vi_common_ip_block
);
1433 amdgpu_ip_block_add(adev
, &gmc_v8_0_ip_block
);
1434 amdgpu_ip_block_add(adev
, &tonga_ih_ip_block
);
1435 amdgpu_ip_block_add(adev
, &amdgpu_pp_ip_block
);
1436 if (adev
->enable_virtual_display
)
1437 amdgpu_ip_block_add(adev
, &dce_virtual_ip_block
);
1439 amdgpu_ip_block_add(adev
, &dce_v10_0_ip_block
);
1440 amdgpu_ip_block_add(adev
, &gfx_v8_0_ip_block
);
1441 amdgpu_ip_block_add(adev
, &sdma_v3_0_ip_block
);
1442 amdgpu_ip_block_add(adev
, &uvd_v5_0_ip_block
);
1443 amdgpu_ip_block_add(adev
, &vce_v3_0_ip_block
);
1445 case CHIP_POLARIS11
:
1446 case CHIP_POLARIS10
:
1447 case CHIP_POLARIS12
:
1448 amdgpu_ip_block_add(adev
, &vi_common_ip_block
);
1449 amdgpu_ip_block_add(adev
, &gmc_v8_1_ip_block
);
1450 amdgpu_ip_block_add(adev
, &tonga_ih_ip_block
);
1451 amdgpu_ip_block_add(adev
, &amdgpu_pp_ip_block
);
1452 if (adev
->enable_virtual_display
)
1453 amdgpu_ip_block_add(adev
, &dce_virtual_ip_block
);
1455 amdgpu_ip_block_add(adev
, &dce_v11_2_ip_block
);
1456 amdgpu_ip_block_add(adev
, &gfx_v8_0_ip_block
);
1457 amdgpu_ip_block_add(adev
, &sdma_v3_1_ip_block
);
1458 amdgpu_ip_block_add(adev
, &uvd_v6_3_ip_block
);
1459 amdgpu_ip_block_add(adev
, &vce_v3_4_ip_block
);
1462 amdgpu_ip_block_add(adev
, &vi_common_ip_block
);
1463 amdgpu_ip_block_add(adev
, &gmc_v8_0_ip_block
);
1464 amdgpu_ip_block_add(adev
, &cz_ih_ip_block
);
1465 amdgpu_ip_block_add(adev
, &amdgpu_pp_ip_block
);
1466 if (adev
->enable_virtual_display
)
1467 amdgpu_ip_block_add(adev
, &dce_virtual_ip_block
);
1469 amdgpu_ip_block_add(adev
, &dce_v11_0_ip_block
);
1470 amdgpu_ip_block_add(adev
, &gfx_v8_0_ip_block
);
1471 amdgpu_ip_block_add(adev
, &sdma_v3_0_ip_block
);
1472 amdgpu_ip_block_add(adev
, &uvd_v6_0_ip_block
);
1473 amdgpu_ip_block_add(adev
, &vce_v3_1_ip_block
);
1474 #if defined(CONFIG_DRM_AMD_ACP)
1475 amdgpu_ip_block_add(adev
, &acp_ip_block
);
1479 amdgpu_ip_block_add(adev
, &vi_common_ip_block
);
1480 amdgpu_ip_block_add(adev
, &gmc_v8_0_ip_block
);
1481 amdgpu_ip_block_add(adev
, &cz_ih_ip_block
);
1482 amdgpu_ip_block_add(adev
, &amdgpu_pp_ip_block
);
1483 if (adev
->enable_virtual_display
)
1484 amdgpu_ip_block_add(adev
, &dce_virtual_ip_block
);
1486 amdgpu_ip_block_add(adev
, &dce_v11_0_ip_block
);
1487 amdgpu_ip_block_add(adev
, &gfx_v8_1_ip_block
);
1488 amdgpu_ip_block_add(adev
, &sdma_v3_0_ip_block
);
1489 amdgpu_ip_block_add(adev
, &uvd_v6_2_ip_block
);
1490 amdgpu_ip_block_add(adev
, &vce_v3_4_ip_block
);
1491 #if defined(CONFIG_DRM_AMD_ACP)
1492 amdgpu_ip_block_add(adev
, &acp_ip_block
);
1496 /* FIXME: not supported yet */