2 * Copyright 2016 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/firmware.h>
24 #include <linux/slab.h>
25 #include <linux/module.h>
26 #include <linux/pci.h>
29 #include "amdgpu_atombios.h"
30 #include "amdgpu_ih.h"
31 #include "amdgpu_uvd.h"
32 #include "amdgpu_vce.h"
33 #include "amdgpu_ucode.h"
34 #include "amdgpu_psp.h"
38 #include "uvd/uvd_7_0_offset.h"
39 #include "gc/gc_9_0_offset.h"
40 #include "gc/gc_9_0_sh_mask.h"
41 #include "sdma0/sdma0_4_0_offset.h"
42 #include "sdma1/sdma1_4_0_offset.h"
43 #include "hdp/hdp_4_0_offset.h"
44 #include "hdp/hdp_4_0_sh_mask.h"
45 #include "smuio/smuio_9_0_offset.h"
46 #include "smuio/smuio_9_0_sh_mask.h"
47 #include "nbio/nbio_7_0_default.h"
48 #include "nbio/nbio_7_0_offset.h"
49 #include "nbio/nbio_7_0_sh_mask.h"
50 #include "nbio/nbio_7_0_smn.h"
51 #include "mp/mp_9_0_offset.h"
54 #include "soc15_common.h"
57 #include "gfxhub_v1_0.h"
58 #include "mmhub_v1_0.h"
61 #include "nbio_v6_1.h"
62 #include "nbio_v7_0.h"
63 #include "nbio_v7_4.h"
64 #include "vega10_ih.h"
65 #include "sdma_v4_0.h"
70 #include "jpeg_v2_0.h"
72 #include "jpeg_v2_5.h"
73 #include "dce_virtual.h"
75 #include "amdgpu_smu.h"
76 #include "amdgpu_ras.h"
77 #include "amdgpu_xgmi.h"
78 #include <uapi/linux/kfd_ioctl.h>
80 #define mmMP0_MISC_CGTT_CTRL0 0x01b9
81 #define mmMP0_MISC_CGTT_CTRL0_BASE_IDX 0
82 #define mmMP0_MISC_LIGHT_SLEEP_CTRL 0x01ba
83 #define mmMP0_MISC_LIGHT_SLEEP_CTRL_BASE_IDX 0
85 /* for Vega20 register name change */
86 #define mmHDP_MEM_POWER_CTRL 0x00d4
87 #define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK 0x00000001L
88 #define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK 0x00000002L
89 #define HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK 0x00010000L
90 #define HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK 0x00020000L
91 #define mmHDP_MEM_POWER_CTRL_BASE_IDX 0
93 /* for Vega20/arcturus regiter offset change */
94 #define mmROM_INDEX_VG20 0x00e4
95 #define mmROM_INDEX_VG20_BASE_IDX 0
96 #define mmROM_DATA_VG20 0x00e5
97 #define mmROM_DATA_VG20_BASE_IDX 0
100 * Indirect registers accessor
102 static u32
soc15_pcie_rreg(struct amdgpu_device
*adev
, u32 reg
)
104 unsigned long flags
, address
, data
;
106 address
= adev
->nbio
.funcs
->get_pcie_index_offset(adev
);
107 data
= adev
->nbio
.funcs
->get_pcie_data_offset(adev
);
109 spin_lock_irqsave(&adev
->pcie_idx_lock
, flags
);
110 WREG32(address
, reg
);
111 (void)RREG32(address
);
113 spin_unlock_irqrestore(&adev
->pcie_idx_lock
, flags
);
117 static void soc15_pcie_wreg(struct amdgpu_device
*adev
, u32 reg
, u32 v
)
119 unsigned long flags
, address
, data
;
121 address
= adev
->nbio
.funcs
->get_pcie_index_offset(adev
);
122 data
= adev
->nbio
.funcs
->get_pcie_data_offset(adev
);
124 spin_lock_irqsave(&adev
->pcie_idx_lock
, flags
);
125 WREG32(address
, reg
);
126 (void)RREG32(address
);
129 spin_unlock_irqrestore(&adev
->pcie_idx_lock
, flags
);
132 static u64
soc15_pcie_rreg64(struct amdgpu_device
*adev
, u32 reg
)
134 unsigned long flags
, address
, data
;
136 address
= adev
->nbio
.funcs
->get_pcie_index_offset(adev
);
137 data
= adev
->nbio
.funcs
->get_pcie_data_offset(adev
);
139 spin_lock_irqsave(&adev
->pcie_idx_lock
, flags
);
140 /* read low 32 bit */
141 WREG32(address
, reg
);
142 (void)RREG32(address
);
145 /* read high 32 bit*/
146 WREG32(address
, reg
+ 4);
147 (void)RREG32(address
);
148 r
|= ((u64
)RREG32(data
) << 32);
149 spin_unlock_irqrestore(&adev
->pcie_idx_lock
, flags
);
153 static void soc15_pcie_wreg64(struct amdgpu_device
*adev
, u32 reg
, u64 v
)
155 unsigned long flags
, address
, data
;
157 address
= adev
->nbio
.funcs
->get_pcie_index_offset(adev
);
158 data
= adev
->nbio
.funcs
->get_pcie_data_offset(adev
);
160 spin_lock_irqsave(&adev
->pcie_idx_lock
, flags
);
161 /* write low 32 bit */
162 WREG32(address
, reg
);
163 (void)RREG32(address
);
164 WREG32(data
, (u32
)(v
& 0xffffffffULL
));
167 /* write high 32 bit */
168 WREG32(address
, reg
+ 4);
169 (void)RREG32(address
);
170 WREG32(data
, (u32
)(v
>> 32));
172 spin_unlock_irqrestore(&adev
->pcie_idx_lock
, flags
);
175 static u32
soc15_uvd_ctx_rreg(struct amdgpu_device
*adev
, u32 reg
)
177 unsigned long flags
, address
, data
;
180 address
= SOC15_REG_OFFSET(UVD
, 0, mmUVD_CTX_INDEX
);
181 data
= SOC15_REG_OFFSET(UVD
, 0, mmUVD_CTX_DATA
);
183 spin_lock_irqsave(&adev
->uvd_ctx_idx_lock
, flags
);
184 WREG32(address
, ((reg
) & 0x1ff));
186 spin_unlock_irqrestore(&adev
->uvd_ctx_idx_lock
, flags
);
190 static void soc15_uvd_ctx_wreg(struct amdgpu_device
*adev
, u32 reg
, u32 v
)
192 unsigned long flags
, address
, data
;
194 address
= SOC15_REG_OFFSET(UVD
, 0, mmUVD_CTX_INDEX
);
195 data
= SOC15_REG_OFFSET(UVD
, 0, mmUVD_CTX_DATA
);
197 spin_lock_irqsave(&adev
->uvd_ctx_idx_lock
, flags
);
198 WREG32(address
, ((reg
) & 0x1ff));
200 spin_unlock_irqrestore(&adev
->uvd_ctx_idx_lock
, flags
);
203 static u32
soc15_didt_rreg(struct amdgpu_device
*adev
, u32 reg
)
205 unsigned long flags
, address
, data
;
208 address
= SOC15_REG_OFFSET(GC
, 0, mmDIDT_IND_INDEX
);
209 data
= SOC15_REG_OFFSET(GC
, 0, mmDIDT_IND_DATA
);
211 spin_lock_irqsave(&adev
->didt_idx_lock
, flags
);
212 WREG32(address
, (reg
));
214 spin_unlock_irqrestore(&adev
->didt_idx_lock
, flags
);
218 static void soc15_didt_wreg(struct amdgpu_device
*adev
, u32 reg
, u32 v
)
220 unsigned long flags
, address
, data
;
222 address
= SOC15_REG_OFFSET(GC
, 0, mmDIDT_IND_INDEX
);
223 data
= SOC15_REG_OFFSET(GC
, 0, mmDIDT_IND_DATA
);
225 spin_lock_irqsave(&adev
->didt_idx_lock
, flags
);
226 WREG32(address
, (reg
));
228 spin_unlock_irqrestore(&adev
->didt_idx_lock
, flags
);
231 static u32
soc15_gc_cac_rreg(struct amdgpu_device
*adev
, u32 reg
)
236 spin_lock_irqsave(&adev
->gc_cac_idx_lock
, flags
);
237 WREG32_SOC15(GC
, 0, mmGC_CAC_IND_INDEX
, (reg
));
238 r
= RREG32_SOC15(GC
, 0, mmGC_CAC_IND_DATA
);
239 spin_unlock_irqrestore(&adev
->gc_cac_idx_lock
, flags
);
243 static void soc15_gc_cac_wreg(struct amdgpu_device
*adev
, u32 reg
, u32 v
)
247 spin_lock_irqsave(&adev
->gc_cac_idx_lock
, flags
);
248 WREG32_SOC15(GC
, 0, mmGC_CAC_IND_INDEX
, (reg
));
249 WREG32_SOC15(GC
, 0, mmGC_CAC_IND_DATA
, (v
));
250 spin_unlock_irqrestore(&adev
->gc_cac_idx_lock
, flags
);
253 static u32
soc15_se_cac_rreg(struct amdgpu_device
*adev
, u32 reg
)
258 spin_lock_irqsave(&adev
->se_cac_idx_lock
, flags
);
259 WREG32_SOC15(GC
, 0, mmSE_CAC_IND_INDEX
, (reg
));
260 r
= RREG32_SOC15(GC
, 0, mmSE_CAC_IND_DATA
);
261 spin_unlock_irqrestore(&adev
->se_cac_idx_lock
, flags
);
265 static void soc15_se_cac_wreg(struct amdgpu_device
*adev
, u32 reg
, u32 v
)
269 spin_lock_irqsave(&adev
->se_cac_idx_lock
, flags
);
270 WREG32_SOC15(GC
, 0, mmSE_CAC_IND_INDEX
, (reg
));
271 WREG32_SOC15(GC
, 0, mmSE_CAC_IND_DATA
, (v
));
272 spin_unlock_irqrestore(&adev
->se_cac_idx_lock
, flags
);
275 static u32
soc15_get_config_memsize(struct amdgpu_device
*adev
)
277 return adev
->nbio
.funcs
->get_memsize(adev
);
280 static u32
soc15_get_xclk(struct amdgpu_device
*adev
)
282 u32 reference_clock
= adev
->clock
.spll
.reference_freq
;
284 if (adev
->asic_type
== CHIP_RAVEN
)
285 return reference_clock
/ 4;
287 return reference_clock
;
291 void soc15_grbm_select(struct amdgpu_device
*adev
,
292 u32 me
, u32 pipe
, u32 queue
, u32 vmid
)
294 u32 grbm_gfx_cntl
= 0;
295 grbm_gfx_cntl
= REG_SET_FIELD(grbm_gfx_cntl
, GRBM_GFX_CNTL
, PIPEID
, pipe
);
296 grbm_gfx_cntl
= REG_SET_FIELD(grbm_gfx_cntl
, GRBM_GFX_CNTL
, MEID
, me
);
297 grbm_gfx_cntl
= REG_SET_FIELD(grbm_gfx_cntl
, GRBM_GFX_CNTL
, VMID
, vmid
);
298 grbm_gfx_cntl
= REG_SET_FIELD(grbm_gfx_cntl
, GRBM_GFX_CNTL
, QUEUEID
, queue
);
300 WREG32_SOC15_RLC_SHADOW(GC
, 0, mmGRBM_GFX_CNTL
, grbm_gfx_cntl
);
303 static void soc15_vga_set_state(struct amdgpu_device
*adev
, bool state
)
308 static bool soc15_read_disabled_bios(struct amdgpu_device
*adev
)
314 static bool soc15_read_bios_from_rom(struct amdgpu_device
*adev
,
315 u8
*bios
, u32 length_bytes
)
319 uint32_t rom_index_offset
;
320 uint32_t rom_data_offset
;
324 if (length_bytes
== 0)
326 /* APU vbios image is part of sbios image */
327 if (adev
->flags
& AMD_IS_APU
)
330 dw_ptr
= (u32
*)bios
;
331 length_dw
= ALIGN(length_bytes
, 4) / 4;
333 switch (adev
->asic_type
) {
336 rom_index_offset
= SOC15_REG_OFFSET(SMUIO
, 0, mmROM_INDEX_VG20
);
337 rom_data_offset
= SOC15_REG_OFFSET(SMUIO
, 0, mmROM_DATA_VG20
);
340 rom_index_offset
= SOC15_REG_OFFSET(SMUIO
, 0, mmROM_INDEX
);
341 rom_data_offset
= SOC15_REG_OFFSET(SMUIO
, 0, mmROM_DATA
);
345 /* set rom index to 0 */
346 WREG32(rom_index_offset
, 0);
347 /* read out the rom data */
348 for (i
= 0; i
< length_dw
; i
++)
349 dw_ptr
[i
] = RREG32(rom_data_offset
);
354 static struct soc15_allowed_register_entry soc15_allowed_read_registers
[] = {
355 { SOC15_REG_ENTRY(GC
, 0, mmGRBM_STATUS
)},
356 { SOC15_REG_ENTRY(GC
, 0, mmGRBM_STATUS2
)},
357 { SOC15_REG_ENTRY(GC
, 0, mmGRBM_STATUS_SE0
)},
358 { SOC15_REG_ENTRY(GC
, 0, mmGRBM_STATUS_SE1
)},
359 { SOC15_REG_ENTRY(GC
, 0, mmGRBM_STATUS_SE2
)},
360 { SOC15_REG_ENTRY(GC
, 0, mmGRBM_STATUS_SE3
)},
361 { SOC15_REG_ENTRY(SDMA0
, 0, mmSDMA0_STATUS_REG
)},
362 { SOC15_REG_ENTRY(SDMA1
, 0, mmSDMA1_STATUS_REG
)},
363 { SOC15_REG_ENTRY(GC
, 0, mmCP_STAT
)},
364 { SOC15_REG_ENTRY(GC
, 0, mmCP_STALLED_STAT1
)},
365 { SOC15_REG_ENTRY(GC
, 0, mmCP_STALLED_STAT2
)},
366 { SOC15_REG_ENTRY(GC
, 0, mmCP_STALLED_STAT3
)},
367 { SOC15_REG_ENTRY(GC
, 0, mmCP_CPF_BUSY_STAT
)},
368 { SOC15_REG_ENTRY(GC
, 0, mmCP_CPF_STALLED_STAT1
)},
369 { SOC15_REG_ENTRY(GC
, 0, mmCP_CPF_STATUS
)},
370 { SOC15_REG_ENTRY(GC
, 0, mmCP_CPC_BUSY_STAT
)},
371 { SOC15_REG_ENTRY(GC
, 0, mmCP_CPC_STALLED_STAT1
)},
372 { SOC15_REG_ENTRY(GC
, 0, mmCP_CPC_STATUS
)},
373 { SOC15_REG_ENTRY(GC
, 0, mmGB_ADDR_CONFIG
)},
374 { SOC15_REG_ENTRY(GC
, 0, mmDB_DEBUG2
)},
377 static uint32_t soc15_read_indexed_register(struct amdgpu_device
*adev
, u32 se_num
,
378 u32 sh_num
, u32 reg_offset
)
382 mutex_lock(&adev
->grbm_idx_mutex
);
383 if (se_num
!= 0xffffffff || sh_num
!= 0xffffffff)
384 amdgpu_gfx_select_se_sh(adev
, se_num
, sh_num
, 0xffffffff);
386 val
= RREG32(reg_offset
);
388 if (se_num
!= 0xffffffff || sh_num
!= 0xffffffff)
389 amdgpu_gfx_select_se_sh(adev
, 0xffffffff, 0xffffffff, 0xffffffff);
390 mutex_unlock(&adev
->grbm_idx_mutex
);
394 static uint32_t soc15_get_register_value(struct amdgpu_device
*adev
,
395 bool indexed
, u32 se_num
,
396 u32 sh_num
, u32 reg_offset
)
399 return soc15_read_indexed_register(adev
, se_num
, sh_num
, reg_offset
);
401 if (reg_offset
== SOC15_REG_OFFSET(GC
, 0, mmGB_ADDR_CONFIG
))
402 return adev
->gfx
.config
.gb_addr_config
;
403 else if (reg_offset
== SOC15_REG_OFFSET(GC
, 0, mmDB_DEBUG2
))
404 return adev
->gfx
.config
.db_debug2
;
405 return RREG32(reg_offset
);
409 static int soc15_read_register(struct amdgpu_device
*adev
, u32 se_num
,
410 u32 sh_num
, u32 reg_offset
, u32
*value
)
413 struct soc15_allowed_register_entry
*en
;
416 for (i
= 0; i
< ARRAY_SIZE(soc15_allowed_read_registers
); i
++) {
417 en
= &soc15_allowed_read_registers
[i
];
418 if (adev
->reg_offset
[en
->hwip
][en
->inst
] &&
419 reg_offset
!= (adev
->reg_offset
[en
->hwip
][en
->inst
][en
->seg
]
423 *value
= soc15_get_register_value(adev
,
424 soc15_allowed_read_registers
[i
].grbm_indexed
,
425 se_num
, sh_num
, reg_offset
);
433 * soc15_program_register_sequence - program an array of registers.
435 * @adev: amdgpu_device pointer
436 * @regs: pointer to the register array
437 * @array_size: size of the register array
439 * Programs an array or registers with and and or masks.
440 * This is a helper for setting golden registers.
443 void soc15_program_register_sequence(struct amdgpu_device
*adev
,
444 const struct soc15_reg_golden
*regs
,
445 const u32 array_size
)
447 const struct soc15_reg_golden
*entry
;
451 for (i
= 0; i
< array_size
; ++i
) {
453 reg
= adev
->reg_offset
[entry
->hwip
][entry
->instance
][entry
->segment
] + entry
->reg
;
455 if (entry
->and_mask
== 0xffffffff) {
456 tmp
= entry
->or_mask
;
459 tmp
&= ~(entry
->and_mask
);
460 tmp
|= (entry
->or_mask
& entry
->and_mask
);
463 if (reg
== SOC15_REG_OFFSET(GC
, 0, mmPA_SC_BINNER_EVENT_CNTL_3
) ||
464 reg
== SOC15_REG_OFFSET(GC
, 0, mmPA_SC_ENHANCE
) ||
465 reg
== SOC15_REG_OFFSET(GC
, 0, mmPA_SC_ENHANCE_1
) ||
466 reg
== SOC15_REG_OFFSET(GC
, 0, mmSH_MEM_CONFIG
))
467 WREG32_RLC(reg
, tmp
);
475 static int soc15_asic_mode1_reset(struct amdgpu_device
*adev
)
480 amdgpu_atombios_scratch_regs_engine_hung(adev
, true);
482 dev_info(adev
->dev
, "GPU mode1 reset\n");
485 pci_clear_master(adev
->pdev
);
487 pci_save_state(adev
->pdev
);
489 ret
= psp_gpu_reset(adev
);
491 dev_err(adev
->dev
, "GPU mode1 reset failed\n");
493 pci_restore_state(adev
->pdev
);
495 /* wait for asic to come out of reset */
496 for (i
= 0; i
< adev
->usec_timeout
; i
++) {
497 u32 memsize
= adev
->nbio
.funcs
->get_memsize(adev
);
499 if (memsize
!= 0xffffffff)
504 amdgpu_atombios_scratch_regs_engine_hung(adev
, false);
509 static int soc15_asic_baco_reset(struct amdgpu_device
*adev
)
511 struct amdgpu_ras
*ras
= amdgpu_ras_get_context(adev
);
514 /* avoid NBIF got stuck when do RAS recovery in BACO reset */
515 if (ras
&& ras
->supported
)
516 adev
->nbio
.funcs
->enable_doorbell_interrupt(adev
, false);
518 ret
= amdgpu_dpm_baco_reset(adev
);
522 /* re-enable doorbell interrupt after BACO exit */
523 if (ras
&& ras
->supported
)
524 adev
->nbio
.funcs
->enable_doorbell_interrupt(adev
, true);
529 static enum amd_reset_method
530 soc15_asic_reset_method(struct amdgpu_device
*adev
)
532 bool baco_reset
= false;
533 struct amdgpu_ras
*ras
= amdgpu_ras_get_context(adev
);
535 if (amdgpu_reset_method
== AMD_RESET_METHOD_MODE1
||
536 amdgpu_reset_method
== AMD_RESET_METHOD_MODE2
||
537 amdgpu_reset_method
== AMD_RESET_METHOD_BACO
)
538 return amdgpu_reset_method
;
540 if (amdgpu_reset_method
!= -1)
541 dev_warn(adev
->dev
, "Specified reset method:%d isn't supported, using AUTO instead.\n",
542 amdgpu_reset_method
);
544 switch (adev
->asic_type
) {
547 return AMD_RESET_METHOD_MODE2
;
551 baco_reset
= amdgpu_dpm_is_baco_supported(adev
);
554 if (adev
->psp
.sos_fw_version
>= 0x80067)
555 baco_reset
= amdgpu_dpm_is_baco_supported(adev
);
558 * 1. PMFW version > 0x284300: all cases use baco
559 * 2. PMFW version <= 0x284300: only sGPU w/o RAS use baco
561 if ((ras
&& ras
->supported
) && adev
->pm
.fw_version
<= 0x283400)
569 return AMD_RESET_METHOD_BACO
;
571 return AMD_RESET_METHOD_MODE1
;
574 static int soc15_asic_reset(struct amdgpu_device
*adev
)
576 /* original raven doesn't have full asic reset */
577 if ((adev
->apu_flags
& AMD_APU_IS_RAVEN
) &&
578 !(adev
->apu_flags
& AMD_APU_IS_RAVEN2
))
581 switch (soc15_asic_reset_method(adev
)) {
582 case AMD_RESET_METHOD_BACO
:
583 return soc15_asic_baco_reset(adev
);
584 case AMD_RESET_METHOD_MODE2
:
585 return amdgpu_dpm_mode2_reset(adev
);
587 return soc15_asic_mode1_reset(adev
);
591 static bool soc15_supports_baco(struct amdgpu_device
*adev
)
593 switch (adev
->asic_type
) {
597 return amdgpu_dpm_is_baco_supported(adev
);
599 if (adev
->psp
.sos_fw_version
>= 0x80067)
600 return amdgpu_dpm_is_baco_supported(adev
);
607 /*static int soc15_set_uvd_clock(struct amdgpu_device *adev, u32 clock,
608 u32 cntl_reg, u32 status_reg)
613 static int soc15_set_uvd_clocks(struct amdgpu_device
*adev
, u32 vclk
, u32 dclk
)
617 r = soc15_set_uvd_clock(adev, vclk, ixCG_VCLK_CNTL, ixCG_VCLK_STATUS);
621 r = soc15_set_uvd_clock(adev, dclk, ixCG_DCLK_CNTL, ixCG_DCLK_STATUS);
626 static int soc15_set_vce_clocks(struct amdgpu_device
*adev
, u32 evclk
, u32 ecclk
)
633 static void soc15_pcie_gen3_enable(struct amdgpu_device
*adev
)
635 if (pci_is_root_bus(adev
->pdev
->bus
))
638 if (amdgpu_pcie_gen2
== 0)
641 if (adev
->flags
& AMD_IS_APU
)
644 if (!(adev
->pm
.pcie_gen_mask
& (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2
|
645 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3
)))
651 static void soc15_program_aspm(struct amdgpu_device
*adev
)
654 if (amdgpu_aspm
== 0)
660 static void soc15_enable_doorbell_aperture(struct amdgpu_device
*adev
,
663 adev
->nbio
.funcs
->enable_doorbell_aperture(adev
, enable
);
664 adev
->nbio
.funcs
->enable_doorbell_selfring_aperture(adev
, enable
);
667 static const struct amdgpu_ip_block_version vega10_common_ip_block
=
669 .type
= AMD_IP_BLOCK_TYPE_COMMON
,
673 .funcs
= &soc15_common_ip_funcs
,
676 static uint32_t soc15_get_rev_id(struct amdgpu_device
*adev
)
678 return adev
->nbio
.funcs
->get_rev_id(adev
);
681 static void soc15_reg_base_init(struct amdgpu_device
*adev
)
685 /* Set IP register base before any HW register access */
686 switch (adev
->asic_type
) {
690 vega10_reg_base_init(adev
);
693 /* It's safe to do ip discovery here for Renior,
694 * it doesn't support SRIOV. */
695 if (amdgpu_discovery
) {
696 r
= amdgpu_discovery_reg_base_init(adev
);
698 DRM_WARN("failed to init reg base from ip discovery table, "
699 "fallback to legacy init method\n");
700 vega10_reg_base_init(adev
);
705 vega20_reg_base_init(adev
);
708 arct_reg_base_init(adev
);
711 DRM_ERROR("Unsupported asic type: %d!\n", adev
->asic_type
);
716 void soc15_set_virt_ops(struct amdgpu_device
*adev
)
718 adev
->virt
.ops
= &xgpu_ai_virt_ops
;
720 /* init soc15 reg base early enough so we can
721 * request request full access for sriov before
723 soc15_reg_base_init(adev
);
726 int soc15_set_ip_blocks(struct amdgpu_device
*adev
)
728 /* for bare metal case */
729 if (!amdgpu_sriov_vf(adev
))
730 soc15_reg_base_init(adev
);
732 if (adev
->asic_type
== CHIP_VEGA20
|| adev
->asic_type
== CHIP_ARCTURUS
)
733 adev
->gmc
.xgmi
.supported
= true;
735 if (adev
->flags
& AMD_IS_APU
) {
736 adev
->nbio
.funcs
= &nbio_v7_0_funcs
;
737 adev
->nbio
.hdp_flush_reg
= &nbio_v7_0_hdp_flush_reg
;
738 } else if (adev
->asic_type
== CHIP_VEGA20
||
739 adev
->asic_type
== CHIP_ARCTURUS
) {
740 adev
->nbio
.funcs
= &nbio_v7_4_funcs
;
741 adev
->nbio
.hdp_flush_reg
= &nbio_v7_4_hdp_flush_reg
;
743 adev
->nbio
.funcs
= &nbio_v6_1_funcs
;
744 adev
->nbio
.hdp_flush_reg
= &nbio_v6_1_hdp_flush_reg
;
747 if (adev
->asic_type
== CHIP_VEGA20
|| adev
->asic_type
== CHIP_ARCTURUS
)
748 adev
->df
.funcs
= &df_v3_6_funcs
;
750 adev
->df
.funcs
= &df_v1_7_funcs
;
752 adev
->rev_id
= soc15_get_rev_id(adev
);
754 switch (adev
->asic_type
) {
758 amdgpu_device_ip_block_add(adev
, &vega10_common_ip_block
);
759 amdgpu_device_ip_block_add(adev
, &gmc_v9_0_ip_block
);
761 /* For Vega10 SR-IOV, PSP need to be initialized before IH */
762 if (amdgpu_sriov_vf(adev
)) {
763 if (likely(adev
->firmware
.load_type
== AMDGPU_FW_LOAD_PSP
)) {
764 if (adev
->asic_type
== CHIP_VEGA20
)
765 amdgpu_device_ip_block_add(adev
, &psp_v11_0_ip_block
);
767 amdgpu_device_ip_block_add(adev
, &psp_v3_1_ip_block
);
769 amdgpu_device_ip_block_add(adev
, &vega10_ih_ip_block
);
771 amdgpu_device_ip_block_add(adev
, &vega10_ih_ip_block
);
772 if (likely(adev
->firmware
.load_type
== AMDGPU_FW_LOAD_PSP
)) {
773 if (adev
->asic_type
== CHIP_VEGA20
)
774 amdgpu_device_ip_block_add(adev
, &psp_v11_0_ip_block
);
776 amdgpu_device_ip_block_add(adev
, &psp_v3_1_ip_block
);
779 amdgpu_device_ip_block_add(adev
, &gfx_v9_0_ip_block
);
780 amdgpu_device_ip_block_add(adev
, &sdma_v4_0_ip_block
);
781 if (is_support_sw_smu(adev
)) {
782 if (!amdgpu_sriov_vf(adev
))
783 amdgpu_device_ip_block_add(adev
, &smu_v11_0_ip_block
);
785 amdgpu_device_ip_block_add(adev
, &pp_smu_ip_block
);
787 if (adev
->enable_virtual_display
|| amdgpu_sriov_vf(adev
))
788 amdgpu_device_ip_block_add(adev
, &dce_virtual_ip_block
);
789 #if defined(CONFIG_DRM_AMD_DC)
790 else if (amdgpu_device_has_dc_support(adev
))
791 amdgpu_device_ip_block_add(adev
, &dm_ip_block
);
793 if (!(adev
->asic_type
== CHIP_VEGA20
&& amdgpu_sriov_vf(adev
))) {
794 amdgpu_device_ip_block_add(adev
, &uvd_v7_0_ip_block
);
795 amdgpu_device_ip_block_add(adev
, &vce_v4_0_ip_block
);
799 amdgpu_device_ip_block_add(adev
, &vega10_common_ip_block
);
800 amdgpu_device_ip_block_add(adev
, &gmc_v9_0_ip_block
);
801 amdgpu_device_ip_block_add(adev
, &vega10_ih_ip_block
);
802 if (likely(adev
->firmware
.load_type
== AMDGPU_FW_LOAD_PSP
))
803 amdgpu_device_ip_block_add(adev
, &psp_v10_0_ip_block
);
804 amdgpu_device_ip_block_add(adev
, &gfx_v9_0_ip_block
);
805 amdgpu_device_ip_block_add(adev
, &sdma_v4_0_ip_block
);
806 amdgpu_device_ip_block_add(adev
, &pp_smu_ip_block
);
807 if (adev
->enable_virtual_display
|| amdgpu_sriov_vf(adev
))
808 amdgpu_device_ip_block_add(adev
, &dce_virtual_ip_block
);
809 #if defined(CONFIG_DRM_AMD_DC)
810 else if (amdgpu_device_has_dc_support(adev
))
811 amdgpu_device_ip_block_add(adev
, &dm_ip_block
);
813 amdgpu_device_ip_block_add(adev
, &vcn_v1_0_ip_block
);
816 amdgpu_device_ip_block_add(adev
, &vega10_common_ip_block
);
817 amdgpu_device_ip_block_add(adev
, &gmc_v9_0_ip_block
);
819 if (amdgpu_sriov_vf(adev
)) {
820 if (likely(adev
->firmware
.load_type
== AMDGPU_FW_LOAD_PSP
))
821 amdgpu_device_ip_block_add(adev
, &psp_v11_0_ip_block
);
822 amdgpu_device_ip_block_add(adev
, &vega10_ih_ip_block
);
824 amdgpu_device_ip_block_add(adev
, &vega10_ih_ip_block
);
825 if (likely(adev
->firmware
.load_type
== AMDGPU_FW_LOAD_PSP
))
826 amdgpu_device_ip_block_add(adev
, &psp_v11_0_ip_block
);
829 if (adev
->enable_virtual_display
|| amdgpu_sriov_vf(adev
))
830 amdgpu_device_ip_block_add(adev
, &dce_virtual_ip_block
);
831 amdgpu_device_ip_block_add(adev
, &gfx_v9_0_ip_block
);
832 amdgpu_device_ip_block_add(adev
, &sdma_v4_0_ip_block
);
833 amdgpu_device_ip_block_add(adev
, &smu_v11_0_ip_block
);
835 if (amdgpu_sriov_vf(adev
)) {
836 if (likely(adev
->firmware
.load_type
== AMDGPU_FW_LOAD_PSP
))
837 amdgpu_device_ip_block_add(adev
, &vcn_v2_5_ip_block
);
839 amdgpu_device_ip_block_add(adev
, &vcn_v2_5_ip_block
);
841 if (!amdgpu_sriov_vf(adev
))
842 amdgpu_device_ip_block_add(adev
, &jpeg_v2_5_ip_block
);
845 amdgpu_device_ip_block_add(adev
, &vega10_common_ip_block
);
846 amdgpu_device_ip_block_add(adev
, &gmc_v9_0_ip_block
);
847 amdgpu_device_ip_block_add(adev
, &vega10_ih_ip_block
);
848 if (likely(adev
->firmware
.load_type
== AMDGPU_FW_LOAD_PSP
))
849 amdgpu_device_ip_block_add(adev
, &psp_v12_0_ip_block
);
850 amdgpu_device_ip_block_add(adev
, &smu_v12_0_ip_block
);
851 amdgpu_device_ip_block_add(adev
, &gfx_v9_0_ip_block
);
852 amdgpu_device_ip_block_add(adev
, &sdma_v4_0_ip_block
);
853 if (adev
->enable_virtual_display
|| amdgpu_sriov_vf(adev
))
854 amdgpu_device_ip_block_add(adev
, &dce_virtual_ip_block
);
855 #if defined(CONFIG_DRM_AMD_DC)
856 else if (amdgpu_device_has_dc_support(adev
))
857 amdgpu_device_ip_block_add(adev
, &dm_ip_block
);
859 amdgpu_device_ip_block_add(adev
, &vcn_v2_0_ip_block
);
860 amdgpu_device_ip_block_add(adev
, &jpeg_v2_0_ip_block
);
869 static void soc15_flush_hdp(struct amdgpu_device
*adev
, struct amdgpu_ring
*ring
)
871 adev
->nbio
.funcs
->hdp_flush(adev
, ring
);
874 static void soc15_invalidate_hdp(struct amdgpu_device
*adev
,
875 struct amdgpu_ring
*ring
)
877 if (!ring
|| !ring
->funcs
->emit_wreg
)
878 WREG32_SOC15_NO_KIQ(HDP
, 0, mmHDP_READ_CACHE_INVALIDATE
, 1);
880 amdgpu_ring_emit_wreg(ring
, SOC15_REG_OFFSET(
881 HDP
, 0, mmHDP_READ_CACHE_INVALIDATE
), 1);
884 static bool soc15_need_full_reset(struct amdgpu_device
*adev
)
886 /* change this when we implement soft reset */
890 static void vega20_reset_hdp_ras_error_count(struct amdgpu_device
*adev
)
892 if (!amdgpu_ras_is_supported(adev
, AMDGPU_RAS_BLOCK__HDP
))
894 /*read back hdp ras counter to reset it to 0 */
895 RREG32_SOC15(HDP
, 0, mmHDP_EDC_CNT
);
898 static void soc15_get_pcie_usage(struct amdgpu_device
*adev
, uint64_t *count0
,
901 uint32_t perfctr
= 0;
902 uint64_t cnt0_of
, cnt1_of
;
905 /* This reports 0 on APUs, so return to avoid writing/reading registers
906 * that may or may not be different from their GPU counterparts
908 if (adev
->flags
& AMD_IS_APU
)
911 /* Set the 2 events that we wish to watch, defined above */
912 /* Reg 40 is # received msgs */
913 /* Reg 104 is # of posted requests sent */
914 perfctr
= REG_SET_FIELD(perfctr
, PCIE_PERF_CNTL_TXCLK
, EVENT0_SEL
, 40);
915 perfctr
= REG_SET_FIELD(perfctr
, PCIE_PERF_CNTL_TXCLK
, EVENT1_SEL
, 104);
917 /* Write to enable desired perf counters */
918 WREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK
, perfctr
);
919 /* Zero out and enable the perf counters
921 * Bit 0 = Start all counters(1)
922 * Bit 2 = Global counter reset enable(1)
924 WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL
, 0x00000005);
928 /* Load the shadow and disable the perf counters
930 * Bit 0 = Stop counters(0)
931 * Bit 1 = Load the shadow counters(1)
933 WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL
, 0x00000002);
935 /* Read register values to get any >32bit overflow */
936 tmp
= RREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK
);
937 cnt0_of
= REG_GET_FIELD(tmp
, PCIE_PERF_CNTL_TXCLK
, COUNTER0_UPPER
);
938 cnt1_of
= REG_GET_FIELD(tmp
, PCIE_PERF_CNTL_TXCLK
, COUNTER1_UPPER
);
940 /* Get the values and add the overflow */
941 *count0
= RREG32_PCIE(smnPCIE_PERF_COUNT0_TXCLK
) | (cnt0_of
<< 32);
942 *count1
= RREG32_PCIE(smnPCIE_PERF_COUNT1_TXCLK
) | (cnt1_of
<< 32);
945 static void vega20_get_pcie_usage(struct amdgpu_device
*adev
, uint64_t *count0
,
948 uint32_t perfctr
= 0;
949 uint64_t cnt0_of
, cnt1_of
;
952 /* This reports 0 on APUs, so return to avoid writing/reading registers
953 * that may or may not be different from their GPU counterparts
955 if (adev
->flags
& AMD_IS_APU
)
958 /* Set the 2 events that we wish to watch, defined above */
959 /* Reg 40 is # received msgs */
960 /* Reg 108 is # of posted requests sent on VG20 */
961 perfctr
= REG_SET_FIELD(perfctr
, PCIE_PERF_CNTL_TXCLK3
,
963 perfctr
= REG_SET_FIELD(perfctr
, PCIE_PERF_CNTL_TXCLK3
,
966 /* Write to enable desired perf counters */
967 WREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK3
, perfctr
);
968 /* Zero out and enable the perf counters
970 * Bit 0 = Start all counters(1)
971 * Bit 2 = Global counter reset enable(1)
973 WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL
, 0x00000005);
977 /* Load the shadow and disable the perf counters
979 * Bit 0 = Stop counters(0)
980 * Bit 1 = Load the shadow counters(1)
982 WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL
, 0x00000002);
984 /* Read register values to get any >32bit overflow */
985 tmp
= RREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK3
);
986 cnt0_of
= REG_GET_FIELD(tmp
, PCIE_PERF_CNTL_TXCLK3
, COUNTER0_UPPER
);
987 cnt1_of
= REG_GET_FIELD(tmp
, PCIE_PERF_CNTL_TXCLK3
, COUNTER1_UPPER
);
989 /* Get the values and add the overflow */
990 *count0
= RREG32_PCIE(smnPCIE_PERF_COUNT0_TXCLK3
) | (cnt0_of
<< 32);
991 *count1
= RREG32_PCIE(smnPCIE_PERF_COUNT1_TXCLK3
) | (cnt1_of
<< 32);
994 static bool soc15_need_reset_on_init(struct amdgpu_device
*adev
)
998 /* Just return false for soc15 GPUs. Reset does not seem to
1001 if (!amdgpu_passthrough(adev
))
1004 if (adev
->flags
& AMD_IS_APU
)
1007 /* Check sOS sign of life register to confirm sys driver and sOS
1008 * are already been loaded.
1010 sol_reg
= RREG32_SOC15(MP0
, 0, mmMP0_SMN_C2PMSG_81
);
1017 static uint64_t soc15_get_pcie_replay_count(struct amdgpu_device
*adev
)
1019 uint64_t nak_r
, nak_g
;
1021 /* Get the number of NAKs received and generated */
1022 nak_r
= RREG32_PCIE(smnPCIE_RX_NUM_NAK
);
1023 nak_g
= RREG32_PCIE(smnPCIE_RX_NUM_NAK_GENERATED
);
1025 /* Add the total number of NAKs, i.e the number of replays */
1026 return (nak_r
+ nak_g
);
1029 static const struct amdgpu_asic_funcs soc15_asic_funcs
=
1031 .read_disabled_bios
= &soc15_read_disabled_bios
,
1032 .read_bios_from_rom
= &soc15_read_bios_from_rom
,
1033 .read_register
= &soc15_read_register
,
1034 .reset
= &soc15_asic_reset
,
1035 .reset_method
= &soc15_asic_reset_method
,
1036 .set_vga_state
= &soc15_vga_set_state
,
1037 .get_xclk
= &soc15_get_xclk
,
1038 .set_uvd_clocks
= &soc15_set_uvd_clocks
,
1039 .set_vce_clocks
= &soc15_set_vce_clocks
,
1040 .get_config_memsize
= &soc15_get_config_memsize
,
1041 .flush_hdp
= &soc15_flush_hdp
,
1042 .invalidate_hdp
= &soc15_invalidate_hdp
,
1043 .need_full_reset
= &soc15_need_full_reset
,
1044 .init_doorbell_index
= &vega10_doorbell_index_init
,
1045 .get_pcie_usage
= &soc15_get_pcie_usage
,
1046 .need_reset_on_init
= &soc15_need_reset_on_init
,
1047 .get_pcie_replay_count
= &soc15_get_pcie_replay_count
,
1048 .supports_baco
= &soc15_supports_baco
,
1051 static const struct amdgpu_asic_funcs vega20_asic_funcs
=
1053 .read_disabled_bios
= &soc15_read_disabled_bios
,
1054 .read_bios_from_rom
= &soc15_read_bios_from_rom
,
1055 .read_register
= &soc15_read_register
,
1056 .reset
= &soc15_asic_reset
,
1057 .reset_method
= &soc15_asic_reset_method
,
1058 .set_vga_state
= &soc15_vga_set_state
,
1059 .get_xclk
= &soc15_get_xclk
,
1060 .set_uvd_clocks
= &soc15_set_uvd_clocks
,
1061 .set_vce_clocks
= &soc15_set_vce_clocks
,
1062 .get_config_memsize
= &soc15_get_config_memsize
,
1063 .flush_hdp
= &soc15_flush_hdp
,
1064 .invalidate_hdp
= &soc15_invalidate_hdp
,
1065 .reset_hdp_ras_error_count
= &vega20_reset_hdp_ras_error_count
,
1066 .need_full_reset
= &soc15_need_full_reset
,
1067 .init_doorbell_index
= &vega20_doorbell_index_init
,
1068 .get_pcie_usage
= &vega20_get_pcie_usage
,
1069 .need_reset_on_init
= &soc15_need_reset_on_init
,
1070 .get_pcie_replay_count
= &soc15_get_pcie_replay_count
,
1071 .supports_baco
= &soc15_supports_baco
,
1074 static int soc15_common_early_init(void *handle
)
1076 #define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE)
1077 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1079 adev
->rmmio_remap
.reg_offset
= MMIO_REG_HOLE_OFFSET
;
1080 adev
->rmmio_remap
.bus_addr
= adev
->rmmio_base
+ MMIO_REG_HOLE_OFFSET
;
1081 adev
->smc_rreg
= NULL
;
1082 adev
->smc_wreg
= NULL
;
1083 adev
->pcie_rreg
= &soc15_pcie_rreg
;
1084 adev
->pcie_wreg
= &soc15_pcie_wreg
;
1085 adev
->pcie_rreg64
= &soc15_pcie_rreg64
;
1086 adev
->pcie_wreg64
= &soc15_pcie_wreg64
;
1087 adev
->uvd_ctx_rreg
= &soc15_uvd_ctx_rreg
;
1088 adev
->uvd_ctx_wreg
= &soc15_uvd_ctx_wreg
;
1089 adev
->didt_rreg
= &soc15_didt_rreg
;
1090 adev
->didt_wreg
= &soc15_didt_wreg
;
1091 adev
->gc_cac_rreg
= &soc15_gc_cac_rreg
;
1092 adev
->gc_cac_wreg
= &soc15_gc_cac_wreg
;
1093 adev
->se_cac_rreg
= &soc15_se_cac_rreg
;
1094 adev
->se_cac_wreg
= &soc15_se_cac_wreg
;
1097 adev
->external_rev_id
= 0xFF;
1098 switch (adev
->asic_type
) {
1100 adev
->asic_funcs
= &soc15_asic_funcs
;
1101 adev
->cg_flags
= AMD_CG_SUPPORT_GFX_MGCG
|
1102 AMD_CG_SUPPORT_GFX_MGLS
|
1103 AMD_CG_SUPPORT_GFX_RLC_LS
|
1104 AMD_CG_SUPPORT_GFX_CP_LS
|
1105 AMD_CG_SUPPORT_GFX_3D_CGCG
|
1106 AMD_CG_SUPPORT_GFX_3D_CGLS
|
1107 AMD_CG_SUPPORT_GFX_CGCG
|
1108 AMD_CG_SUPPORT_GFX_CGLS
|
1109 AMD_CG_SUPPORT_BIF_MGCG
|
1110 AMD_CG_SUPPORT_BIF_LS
|
1111 AMD_CG_SUPPORT_HDP_LS
|
1112 AMD_CG_SUPPORT_DRM_MGCG
|
1113 AMD_CG_SUPPORT_DRM_LS
|
1114 AMD_CG_SUPPORT_ROM_MGCG
|
1115 AMD_CG_SUPPORT_DF_MGCG
|
1116 AMD_CG_SUPPORT_SDMA_MGCG
|
1117 AMD_CG_SUPPORT_SDMA_LS
|
1118 AMD_CG_SUPPORT_MC_MGCG
|
1119 AMD_CG_SUPPORT_MC_LS
;
1121 adev
->external_rev_id
= 0x1;
1124 adev
->asic_funcs
= &soc15_asic_funcs
;
1125 adev
->cg_flags
= AMD_CG_SUPPORT_GFX_MGCG
|
1126 AMD_CG_SUPPORT_GFX_MGLS
|
1127 AMD_CG_SUPPORT_GFX_CGCG
|
1128 AMD_CG_SUPPORT_GFX_CGLS
|
1129 AMD_CG_SUPPORT_GFX_3D_CGCG
|
1130 AMD_CG_SUPPORT_GFX_3D_CGLS
|
1131 AMD_CG_SUPPORT_GFX_CP_LS
|
1132 AMD_CG_SUPPORT_MC_LS
|
1133 AMD_CG_SUPPORT_MC_MGCG
|
1134 AMD_CG_SUPPORT_SDMA_MGCG
|
1135 AMD_CG_SUPPORT_SDMA_LS
|
1136 AMD_CG_SUPPORT_BIF_MGCG
|
1137 AMD_CG_SUPPORT_BIF_LS
|
1138 AMD_CG_SUPPORT_HDP_MGCG
|
1139 AMD_CG_SUPPORT_HDP_LS
|
1140 AMD_CG_SUPPORT_ROM_MGCG
|
1141 AMD_CG_SUPPORT_VCE_MGCG
|
1142 AMD_CG_SUPPORT_UVD_MGCG
;
1144 adev
->external_rev_id
= adev
->rev_id
+ 0x14;
1147 adev
->asic_funcs
= &vega20_asic_funcs
;
1148 adev
->cg_flags
= AMD_CG_SUPPORT_GFX_MGCG
|
1149 AMD_CG_SUPPORT_GFX_MGLS
|
1150 AMD_CG_SUPPORT_GFX_CGCG
|
1151 AMD_CG_SUPPORT_GFX_CGLS
|
1152 AMD_CG_SUPPORT_GFX_3D_CGCG
|
1153 AMD_CG_SUPPORT_GFX_3D_CGLS
|
1154 AMD_CG_SUPPORT_GFX_CP_LS
|
1155 AMD_CG_SUPPORT_MC_LS
|
1156 AMD_CG_SUPPORT_MC_MGCG
|
1157 AMD_CG_SUPPORT_SDMA_MGCG
|
1158 AMD_CG_SUPPORT_SDMA_LS
|
1159 AMD_CG_SUPPORT_BIF_MGCG
|
1160 AMD_CG_SUPPORT_BIF_LS
|
1161 AMD_CG_SUPPORT_HDP_MGCG
|
1162 AMD_CG_SUPPORT_HDP_LS
|
1163 AMD_CG_SUPPORT_ROM_MGCG
|
1164 AMD_CG_SUPPORT_VCE_MGCG
|
1165 AMD_CG_SUPPORT_UVD_MGCG
;
1167 adev
->external_rev_id
= adev
->rev_id
+ 0x28;
1170 adev
->asic_funcs
= &soc15_asic_funcs
;
1171 if (adev
->pdev
->device
== 0x15dd)
1172 adev
->apu_flags
|= AMD_APU_IS_RAVEN
;
1173 if (adev
->pdev
->device
== 0x15d8)
1174 adev
->apu_flags
|= AMD_APU_IS_PICASSO
;
1175 if (adev
->rev_id
>= 0x8)
1176 adev
->apu_flags
|= AMD_APU_IS_RAVEN2
;
1178 if (adev
->apu_flags
& AMD_APU_IS_RAVEN2
)
1179 adev
->external_rev_id
= adev
->rev_id
+ 0x79;
1180 else if (adev
->apu_flags
& AMD_APU_IS_PICASSO
)
1181 adev
->external_rev_id
= adev
->rev_id
+ 0x41;
1182 else if (adev
->rev_id
== 1)
1183 adev
->external_rev_id
= adev
->rev_id
+ 0x20;
1185 adev
->external_rev_id
= adev
->rev_id
+ 0x01;
1187 if (adev
->apu_flags
& AMD_APU_IS_RAVEN2
) {
1188 adev
->cg_flags
= AMD_CG_SUPPORT_GFX_MGCG
|
1189 AMD_CG_SUPPORT_GFX_MGLS
|
1190 AMD_CG_SUPPORT_GFX_CP_LS
|
1191 AMD_CG_SUPPORT_GFX_3D_CGCG
|
1192 AMD_CG_SUPPORT_GFX_3D_CGLS
|
1193 AMD_CG_SUPPORT_GFX_CGCG
|
1194 AMD_CG_SUPPORT_GFX_CGLS
|
1195 AMD_CG_SUPPORT_BIF_LS
|
1196 AMD_CG_SUPPORT_HDP_LS
|
1197 AMD_CG_SUPPORT_ROM_MGCG
|
1198 AMD_CG_SUPPORT_MC_MGCG
|
1199 AMD_CG_SUPPORT_MC_LS
|
1200 AMD_CG_SUPPORT_SDMA_MGCG
|
1201 AMD_CG_SUPPORT_SDMA_LS
|
1202 AMD_CG_SUPPORT_VCN_MGCG
;
1204 adev
->pg_flags
= AMD_PG_SUPPORT_SDMA
| AMD_PG_SUPPORT_VCN
;
1205 } else if (adev
->apu_flags
& AMD_APU_IS_PICASSO
) {
1206 adev
->cg_flags
= AMD_CG_SUPPORT_GFX_MGCG
|
1207 AMD_CG_SUPPORT_GFX_MGLS
|
1208 AMD_CG_SUPPORT_GFX_CP_LS
|
1209 AMD_CG_SUPPORT_GFX_3D_CGCG
|
1210 AMD_CG_SUPPORT_GFX_3D_CGLS
|
1211 AMD_CG_SUPPORT_GFX_CGCG
|
1212 AMD_CG_SUPPORT_GFX_CGLS
|
1213 AMD_CG_SUPPORT_BIF_LS
|
1214 AMD_CG_SUPPORT_HDP_LS
|
1215 AMD_CG_SUPPORT_ROM_MGCG
|
1216 AMD_CG_SUPPORT_MC_MGCG
|
1217 AMD_CG_SUPPORT_MC_LS
|
1218 AMD_CG_SUPPORT_SDMA_MGCG
|
1219 AMD_CG_SUPPORT_SDMA_LS
;
1221 adev
->pg_flags
= AMD_PG_SUPPORT_SDMA
|
1222 AMD_PG_SUPPORT_MMHUB
|
1223 AMD_PG_SUPPORT_VCN
|
1224 AMD_PG_SUPPORT_VCN_DPG
;
1226 adev
->cg_flags
= AMD_CG_SUPPORT_GFX_MGCG
|
1227 AMD_CG_SUPPORT_GFX_MGLS
|
1228 AMD_CG_SUPPORT_GFX_RLC_LS
|
1229 AMD_CG_SUPPORT_GFX_CP_LS
|
1230 AMD_CG_SUPPORT_GFX_3D_CGCG
|
1231 AMD_CG_SUPPORT_GFX_3D_CGLS
|
1232 AMD_CG_SUPPORT_GFX_CGCG
|
1233 AMD_CG_SUPPORT_GFX_CGLS
|
1234 AMD_CG_SUPPORT_BIF_MGCG
|
1235 AMD_CG_SUPPORT_BIF_LS
|
1236 AMD_CG_SUPPORT_HDP_MGCG
|
1237 AMD_CG_SUPPORT_HDP_LS
|
1238 AMD_CG_SUPPORT_DRM_MGCG
|
1239 AMD_CG_SUPPORT_DRM_LS
|
1240 AMD_CG_SUPPORT_ROM_MGCG
|
1241 AMD_CG_SUPPORT_MC_MGCG
|
1242 AMD_CG_SUPPORT_MC_LS
|
1243 AMD_CG_SUPPORT_SDMA_MGCG
|
1244 AMD_CG_SUPPORT_SDMA_LS
|
1245 AMD_CG_SUPPORT_VCN_MGCG
;
1247 adev
->pg_flags
= AMD_PG_SUPPORT_SDMA
| AMD_PG_SUPPORT_VCN
;
1251 adev
->asic_funcs
= &vega20_asic_funcs
;
1252 adev
->cg_flags
= AMD_CG_SUPPORT_GFX_MGCG
|
1253 AMD_CG_SUPPORT_GFX_MGLS
|
1254 AMD_CG_SUPPORT_GFX_CGCG
|
1255 AMD_CG_SUPPORT_GFX_CGLS
|
1256 AMD_CG_SUPPORT_GFX_CP_LS
|
1257 AMD_CG_SUPPORT_HDP_MGCG
|
1258 AMD_CG_SUPPORT_HDP_LS
|
1259 AMD_CG_SUPPORT_SDMA_MGCG
|
1260 AMD_CG_SUPPORT_SDMA_LS
|
1261 AMD_CG_SUPPORT_MC_MGCG
|
1262 AMD_CG_SUPPORT_MC_LS
|
1263 AMD_CG_SUPPORT_IH_CG
|
1264 AMD_CG_SUPPORT_VCN_MGCG
|
1265 AMD_CG_SUPPORT_JPEG_MGCG
;
1266 adev
->pg_flags
= AMD_PG_SUPPORT_VCN
| AMD_PG_SUPPORT_VCN_DPG
;
1267 adev
->external_rev_id
= adev
->rev_id
+ 0x32;
1270 adev
->asic_funcs
= &soc15_asic_funcs
;
1271 adev
->apu_flags
|= AMD_APU_IS_RENOIR
;
1272 adev
->cg_flags
= AMD_CG_SUPPORT_GFX_MGCG
|
1273 AMD_CG_SUPPORT_GFX_MGLS
|
1274 AMD_CG_SUPPORT_GFX_3D_CGCG
|
1275 AMD_CG_SUPPORT_GFX_3D_CGLS
|
1276 AMD_CG_SUPPORT_GFX_CGCG
|
1277 AMD_CG_SUPPORT_GFX_CGLS
|
1278 AMD_CG_SUPPORT_GFX_CP_LS
|
1279 AMD_CG_SUPPORT_MC_MGCG
|
1280 AMD_CG_SUPPORT_MC_LS
|
1281 AMD_CG_SUPPORT_SDMA_MGCG
|
1282 AMD_CG_SUPPORT_SDMA_LS
|
1283 AMD_CG_SUPPORT_BIF_LS
|
1284 AMD_CG_SUPPORT_HDP_LS
|
1285 AMD_CG_SUPPORT_ROM_MGCG
|
1286 AMD_CG_SUPPORT_VCN_MGCG
|
1287 AMD_CG_SUPPORT_JPEG_MGCG
|
1288 AMD_CG_SUPPORT_IH_CG
|
1289 AMD_CG_SUPPORT_ATHUB_LS
|
1290 AMD_CG_SUPPORT_ATHUB_MGCG
|
1291 AMD_CG_SUPPORT_DF_MGCG
;
1292 adev
->pg_flags
= AMD_PG_SUPPORT_SDMA
|
1293 AMD_PG_SUPPORT_VCN
|
1294 AMD_PG_SUPPORT_JPEG
|
1295 AMD_PG_SUPPORT_VCN_DPG
;
1296 adev
->external_rev_id
= adev
->rev_id
+ 0x91;
1299 /* FIXME: not supported yet */
1303 if (amdgpu_sriov_vf(adev
)) {
1304 amdgpu_virt_init_setting(adev
);
1305 xgpu_ai_mailbox_set_irq_funcs(adev
);
1311 static int soc15_common_late_init(void *handle
)
1313 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1316 if (amdgpu_sriov_vf(adev
))
1317 xgpu_ai_mailbox_get_irq(adev
);
1319 if (adev
->asic_funcs
&&
1320 adev
->asic_funcs
->reset_hdp_ras_error_count
)
1321 adev
->asic_funcs
->reset_hdp_ras_error_count(adev
);
1323 if (adev
->nbio
.funcs
->ras_late_init
)
1324 r
= adev
->nbio
.funcs
->ras_late_init(adev
);
1329 static int soc15_common_sw_init(void *handle
)
1331 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1333 if (amdgpu_sriov_vf(adev
))
1334 xgpu_ai_mailbox_add_irq_id(adev
);
1336 adev
->df
.funcs
->sw_init(adev
);
1341 static int soc15_common_sw_fini(void *handle
)
1343 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1345 amdgpu_nbio_ras_fini(adev
);
1346 adev
->df
.funcs
->sw_fini(adev
);
1350 static void soc15_doorbell_range_init(struct amdgpu_device
*adev
)
1353 struct amdgpu_ring
*ring
;
1355 /* sdma/ih doorbell range are programed by hypervisor */
1356 if (!amdgpu_sriov_vf(adev
)) {
1357 for (i
= 0; i
< adev
->sdma
.num_instances
; i
++) {
1358 ring
= &adev
->sdma
.instance
[i
].ring
;
1359 adev
->nbio
.funcs
->sdma_doorbell_range(adev
, i
,
1360 ring
->use_doorbell
, ring
->doorbell_index
,
1361 adev
->doorbell_index
.sdma_doorbell_range
);
1364 adev
->nbio
.funcs
->ih_doorbell_range(adev
, adev
->irq
.ih
.use_doorbell
,
1365 adev
->irq
.ih
.doorbell_index
);
1369 static int soc15_common_hw_init(void *handle
)
1371 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1373 /* enable pcie gen2/3 link */
1374 soc15_pcie_gen3_enable(adev
);
1376 soc15_program_aspm(adev
);
1377 /* setup nbio registers */
1378 adev
->nbio
.funcs
->init_registers(adev
);
1379 /* remap HDP registers to a hole in mmio space,
1380 * for the purpose of expose those registers
1383 if (adev
->nbio
.funcs
->remap_hdp_registers
)
1384 adev
->nbio
.funcs
->remap_hdp_registers(adev
);
1386 /* enable the doorbell aperture */
1387 soc15_enable_doorbell_aperture(adev
, true);
1388 /* HW doorbell routing policy: doorbell writing not
1389 * in SDMA/IH/MM/ACV range will be routed to CP. So
1390 * we need to init SDMA/IH/MM/ACV doorbell range prior
1391 * to CP ip block init and ring test.
1393 soc15_doorbell_range_init(adev
);
1398 static int soc15_common_hw_fini(void *handle
)
1400 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1402 /* disable the doorbell aperture */
1403 soc15_enable_doorbell_aperture(adev
, false);
1404 if (amdgpu_sriov_vf(adev
))
1405 xgpu_ai_mailbox_put_irq(adev
);
1407 if (adev
->nbio
.ras_if
&&
1408 amdgpu_ras_is_supported(adev
, adev
->nbio
.ras_if
->block
)) {
1409 if (adev
->nbio
.funcs
->init_ras_controller_interrupt
)
1410 amdgpu_irq_put(adev
, &adev
->nbio
.ras_controller_irq
, 0);
1411 if (adev
->nbio
.funcs
->init_ras_err_event_athub_interrupt
)
1412 amdgpu_irq_put(adev
, &adev
->nbio
.ras_err_event_athub_irq
, 0);
1418 static int soc15_common_suspend(void *handle
)
1420 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1422 return soc15_common_hw_fini(adev
);
1425 static int soc15_common_resume(void *handle
)
1427 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1429 return soc15_common_hw_init(adev
);
1432 static bool soc15_common_is_idle(void *handle
)
1437 static int soc15_common_wait_for_idle(void *handle
)
1442 static int soc15_common_soft_reset(void *handle
)
1447 static void soc15_update_hdp_light_sleep(struct amdgpu_device
*adev
, bool enable
)
1451 if (adev
->asic_type
== CHIP_VEGA20
||
1452 adev
->asic_type
== CHIP_ARCTURUS
) {
1453 def
= data
= RREG32(SOC15_REG_OFFSET(HDP
, 0, mmHDP_MEM_POWER_CTRL
));
1455 if (enable
&& (adev
->cg_flags
& AMD_CG_SUPPORT_HDP_LS
))
1456 data
|= HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK
|
1457 HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK
|
1458 HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK
|
1459 HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK
;
1461 data
&= ~(HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK
|
1462 HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK
|
1463 HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK
|
1464 HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK
);
1467 WREG32(SOC15_REG_OFFSET(HDP
, 0, mmHDP_MEM_POWER_CTRL
), data
);
1469 def
= data
= RREG32(SOC15_REG_OFFSET(HDP
, 0, mmHDP_MEM_POWER_LS
));
1471 if (enable
&& (adev
->cg_flags
& AMD_CG_SUPPORT_HDP_LS
))
1472 data
|= HDP_MEM_POWER_LS__LS_ENABLE_MASK
;
1474 data
&= ~HDP_MEM_POWER_LS__LS_ENABLE_MASK
;
1477 WREG32(SOC15_REG_OFFSET(HDP
, 0, mmHDP_MEM_POWER_LS
), data
);
1481 static void soc15_update_drm_clock_gating(struct amdgpu_device
*adev
, bool enable
)
1485 def
= data
= RREG32(SOC15_REG_OFFSET(MP0
, 0, mmMP0_MISC_CGTT_CTRL0
));
1487 if (enable
&& (adev
->cg_flags
& AMD_CG_SUPPORT_DRM_MGCG
))
1488 data
&= ~(0x01000000 |
1497 data
|= (0x01000000 |
1507 WREG32(SOC15_REG_OFFSET(MP0
, 0, mmMP0_MISC_CGTT_CTRL0
), data
);
1510 static void soc15_update_drm_light_sleep(struct amdgpu_device
*adev
, bool enable
)
1514 def
= data
= RREG32(SOC15_REG_OFFSET(MP0
, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL
));
1516 if (enable
&& (adev
->cg_flags
& AMD_CG_SUPPORT_DRM_LS
))
1522 WREG32(SOC15_REG_OFFSET(MP0
, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL
), data
);
1525 static void soc15_update_rom_medium_grain_clock_gating(struct amdgpu_device
*adev
,
1530 def
= data
= RREG32(SOC15_REG_OFFSET(SMUIO
, 0, mmCGTT_ROM_CLK_CTRL0
));
1532 if (enable
&& (adev
->cg_flags
& AMD_CG_SUPPORT_ROM_MGCG
))
1533 data
&= ~(CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK
|
1534 CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK
);
1536 data
|= CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK
|
1537 CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK
;
1540 WREG32(SOC15_REG_OFFSET(SMUIO
, 0, mmCGTT_ROM_CLK_CTRL0
), data
);
1543 static int soc15_common_set_clockgating_state(void *handle
,
1544 enum amd_clockgating_state state
)
1546 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1548 if (amdgpu_sriov_vf(adev
))
1551 switch (adev
->asic_type
) {
1555 adev
->nbio
.funcs
->update_medium_grain_clock_gating(adev
,
1556 state
== AMD_CG_STATE_GATE
);
1557 adev
->nbio
.funcs
->update_medium_grain_light_sleep(adev
,
1558 state
== AMD_CG_STATE_GATE
);
1559 soc15_update_hdp_light_sleep(adev
,
1560 state
== AMD_CG_STATE_GATE
);
1561 soc15_update_drm_clock_gating(adev
,
1562 state
== AMD_CG_STATE_GATE
);
1563 soc15_update_drm_light_sleep(adev
,
1564 state
== AMD_CG_STATE_GATE
);
1565 soc15_update_rom_medium_grain_clock_gating(adev
,
1566 state
== AMD_CG_STATE_GATE
);
1567 adev
->df
.funcs
->update_medium_grain_clock_gating(adev
,
1568 state
== AMD_CG_STATE_GATE
);
1572 adev
->nbio
.funcs
->update_medium_grain_clock_gating(adev
,
1573 state
== AMD_CG_STATE_GATE
);
1574 adev
->nbio
.funcs
->update_medium_grain_light_sleep(adev
,
1575 state
== AMD_CG_STATE_GATE
);
1576 soc15_update_hdp_light_sleep(adev
,
1577 state
== AMD_CG_STATE_GATE
);
1578 soc15_update_drm_clock_gating(adev
,
1579 state
== AMD_CG_STATE_GATE
);
1580 soc15_update_drm_light_sleep(adev
,
1581 state
== AMD_CG_STATE_GATE
);
1582 soc15_update_rom_medium_grain_clock_gating(adev
,
1583 state
== AMD_CG_STATE_GATE
);
1586 soc15_update_hdp_light_sleep(adev
,
1587 state
== AMD_CG_STATE_GATE
);
1595 static void soc15_common_get_clockgating_state(void *handle
, u32
*flags
)
1597 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1600 if (amdgpu_sriov_vf(adev
))
1603 adev
->nbio
.funcs
->get_clockgating_state(adev
, flags
);
1605 /* AMD_CG_SUPPORT_HDP_LS */
1606 data
= RREG32(SOC15_REG_OFFSET(HDP
, 0, mmHDP_MEM_POWER_LS
));
1607 if (data
& HDP_MEM_POWER_LS__LS_ENABLE_MASK
)
1608 *flags
|= AMD_CG_SUPPORT_HDP_LS
;
1610 /* AMD_CG_SUPPORT_DRM_MGCG */
1611 data
= RREG32(SOC15_REG_OFFSET(MP0
, 0, mmMP0_MISC_CGTT_CTRL0
));
1612 if (!(data
& 0x01000000))
1613 *flags
|= AMD_CG_SUPPORT_DRM_MGCG
;
1615 /* AMD_CG_SUPPORT_DRM_LS */
1616 data
= RREG32(SOC15_REG_OFFSET(MP0
, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL
));
1618 *flags
|= AMD_CG_SUPPORT_DRM_LS
;
1620 /* AMD_CG_SUPPORT_ROM_MGCG */
1621 data
= RREG32(SOC15_REG_OFFSET(SMUIO
, 0, mmCGTT_ROM_CLK_CTRL0
));
1622 if (!(data
& CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK
))
1623 *flags
|= AMD_CG_SUPPORT_ROM_MGCG
;
1625 adev
->df
.funcs
->get_clockgating_state(adev
, flags
);
1628 static int soc15_common_set_powergating_state(void *handle
,
1629 enum amd_powergating_state state
)
1635 const struct amd_ip_funcs soc15_common_ip_funcs
= {
1636 .name
= "soc15_common",
1637 .early_init
= soc15_common_early_init
,
1638 .late_init
= soc15_common_late_init
,
1639 .sw_init
= soc15_common_sw_init
,
1640 .sw_fini
= soc15_common_sw_fini
,
1641 .hw_init
= soc15_common_hw_init
,
1642 .hw_fini
= soc15_common_hw_fini
,
1643 .suspend
= soc15_common_suspend
,
1644 .resume
= soc15_common_resume
,
1645 .is_idle
= soc15_common_is_idle
,
1646 .wait_for_idle
= soc15_common_wait_for_idle
,
1647 .soft_reset
= soc15_common_soft_reset
,
1648 .set_clockgating_state
= soc15_common_set_clockgating_state
,
1649 .set_powergating_state
= soc15_common_set_powergating_state
,
1650 .get_clockgating_state
= soc15_common_get_clockgating_state
,