]>
Commit | Line | Data |
---|---|---|
220ab9bd KW |
1 | /* |
2 | * Copyright 2016 Advanced Micro Devices, Inc. | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice shall be included in | |
12 | * all copies or substantial portions of the Software. | |
13 | * | |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
20 | * OTHER DEALINGS IN THE SOFTWARE. | |
21 | * | |
22 | */ | |
23 | #include <linux/firmware.h> | |
24 | #include <linux/slab.h> | |
25 | #include <linux/module.h> | |
248a1d6f | 26 | #include <drm/drmP.h> |
220ab9bd | 27 | #include "amdgpu.h" |
d05da0e2 | 28 | #include "amdgpu_atombios.h" |
220ab9bd KW |
29 | #include "amdgpu_ih.h" |
30 | #include "amdgpu_uvd.h" | |
31 | #include "amdgpu_vce.h" | |
32 | #include "amdgpu_ucode.h" | |
33 | #include "amdgpu_psp.h" | |
34 | #include "atom.h" | |
35 | #include "amd_pcie.h" | |
36 | ||
37 | #include "vega10/soc15ip.h" | |
38 | #include "vega10/UVD/uvd_7_0_offset.h" | |
39 | #include "vega10/GC/gc_9_0_offset.h" | |
40 | #include "vega10/GC/gc_9_0_sh_mask.h" | |
41 | #include "vega10/SDMA0/sdma0_4_0_offset.h" | |
42 | #include "vega10/SDMA1/sdma1_4_0_offset.h" | |
43 | #include "vega10/HDP/hdp_4_0_offset.h" | |
44 | #include "vega10/HDP/hdp_4_0_sh_mask.h" | |
45 | #include "vega10/MP/mp_9_0_offset.h" | |
46 | #include "vega10/MP/mp_9_0_sh_mask.h" | |
47 | #include "vega10/SMUIO/smuio_9_0_offset.h" | |
48 | #include "vega10/SMUIO/smuio_9_0_sh_mask.h" | |
49 | ||
50 | #include "soc15.h" | |
51 | #include "soc15_common.h" | |
52 | #include "gfx_v9_0.h" | |
53 | #include "gmc_v9_0.h" | |
54 | #include "gfxhub_v1_0.h" | |
55 | #include "mmhub_v1_0.h" | |
56 | #include "vega10_ih.h" | |
57 | #include "sdma_v4_0.h" | |
58 | #include "uvd_v7_0.h" | |
59 | #include "vce_v4_0.h" | |
f2d7e707 | 60 | #include "vcn_v1_0.h" |
220ab9bd | 61 | #include "amdgpu_powerplay.h" |
796b6568 | 62 | #include "dce_virtual.h" |
f1a34465 | 63 | #include "mxgpu_ai.h" |
220ab9bd | 64 | |
220ab9bd KW |
65 | #define mmFabricConfigAccessControl 0x0410 |
66 | #define mmFabricConfigAccessControl_BASE_IDX 0 | |
67 | #define mmFabricConfigAccessControl_DEFAULT 0x00000000 | |
68 | //FabricConfigAccessControl | |
69 | #define FabricConfigAccessControl__CfgRegInstAccEn__SHIFT 0x0 | |
70 | #define FabricConfigAccessControl__CfgRegInstAccRegLock__SHIFT 0x1 | |
71 | #define FabricConfigAccessControl__CfgRegInstID__SHIFT 0x10 | |
72 | #define FabricConfigAccessControl__CfgRegInstAccEn_MASK 0x00000001L | |
73 | #define FabricConfigAccessControl__CfgRegInstAccRegLock_MASK 0x00000002L | |
74 | #define FabricConfigAccessControl__CfgRegInstID_MASK 0x00FF0000L | |
75 | ||
76 | ||
77 | #define mmDF_PIE_AON0_DfGlobalClkGater 0x00fc | |
78 | #define mmDF_PIE_AON0_DfGlobalClkGater_BASE_IDX 0 | |
79 | //DF_PIE_AON0_DfGlobalClkGater | |
80 | #define DF_PIE_AON0_DfGlobalClkGater__MGCGMode__SHIFT 0x0 | |
81 | #define DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK 0x0000000FL | |
82 | ||
83 | enum { | |
84 | DF_MGCG_DISABLE = 0, | |
85 | DF_MGCG_ENABLE_00_CYCLE_DELAY =1, | |
86 | DF_MGCG_ENABLE_01_CYCLE_DELAY =2, | |
87 | DF_MGCG_ENABLE_15_CYCLE_DELAY =13, | |
88 | DF_MGCG_ENABLE_31_CYCLE_DELAY =14, | |
89 | DF_MGCG_ENABLE_63_CYCLE_DELAY =15 | |
90 | }; | |
91 | ||
92 | #define mmMP0_MISC_CGTT_CTRL0 0x01b9 | |
93 | #define mmMP0_MISC_CGTT_CTRL0_BASE_IDX 0 | |
94 | #define mmMP0_MISC_LIGHT_SLEEP_CTRL 0x01ba | |
95 | #define mmMP0_MISC_LIGHT_SLEEP_CTRL_BASE_IDX 0 | |
96 | ||
97 | /* | |
98 | * Indirect registers accessor | |
99 | */ | |
100 | static u32 soc15_pcie_rreg(struct amdgpu_device *adev, u32 reg) | |
101 | { | |
102 | unsigned long flags, address, data; | |
103 | u32 r; | |
104 | struct nbio_pcie_index_data *nbio_pcie_id; | |
105 | ||
aecbe64f CZ |
106 | if (adev->flags & AMD_IS_APU) |
107 | nbio_pcie_id = &nbio_v7_0_pcie_index_data; | |
1fdc639b | 108 | else |
aecbe64f | 109 | nbio_pcie_id = &nbio_v6_1_pcie_index_data; |
220ab9bd KW |
110 | |
111 | address = nbio_pcie_id->index_offset; | |
112 | data = nbio_pcie_id->data_offset; | |
113 | ||
114 | spin_lock_irqsave(&adev->pcie_idx_lock, flags); | |
115 | WREG32(address, reg); | |
116 | (void)RREG32(address); | |
117 | r = RREG32(data); | |
118 | spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); | |
119 | return r; | |
120 | } | |
121 | ||
122 | static void soc15_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v) | |
123 | { | |
124 | unsigned long flags, address, data; | |
125 | struct nbio_pcie_index_data *nbio_pcie_id; | |
126 | ||
aecbe64f CZ |
127 | if (adev->flags & AMD_IS_APU) |
128 | nbio_pcie_id = &nbio_v7_0_pcie_index_data; | |
1fdc639b | 129 | else |
aecbe64f | 130 | nbio_pcie_id = &nbio_v6_1_pcie_index_data; |
220ab9bd KW |
131 | |
132 | address = nbio_pcie_id->index_offset; | |
133 | data = nbio_pcie_id->data_offset; | |
134 | ||
135 | spin_lock_irqsave(&adev->pcie_idx_lock, flags); | |
136 | WREG32(address, reg); | |
137 | (void)RREG32(address); | |
138 | WREG32(data, v); | |
139 | (void)RREG32(data); | |
140 | spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); | |
141 | } | |
142 | ||
143 | static u32 soc15_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg) | |
144 | { | |
145 | unsigned long flags, address, data; | |
146 | u32 r; | |
147 | ||
148 | address = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_INDEX); | |
149 | data = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_DATA); | |
150 | ||
151 | spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags); | |
152 | WREG32(address, ((reg) & 0x1ff)); | |
153 | r = RREG32(data); | |
154 | spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags); | |
155 | return r; | |
156 | } | |
157 | ||
158 | static void soc15_uvd_ctx_wreg(struct amdgpu_device *adev, u32 reg, u32 v) | |
159 | { | |
160 | unsigned long flags, address, data; | |
161 | ||
162 | address = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_INDEX); | |
163 | data = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_DATA); | |
164 | ||
165 | spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags); | |
166 | WREG32(address, ((reg) & 0x1ff)); | |
167 | WREG32(data, (v)); | |
168 | spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags); | |
169 | } | |
170 | ||
171 | static u32 soc15_didt_rreg(struct amdgpu_device *adev, u32 reg) | |
172 | { | |
173 | unsigned long flags, address, data; | |
174 | u32 r; | |
175 | ||
176 | address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX); | |
177 | data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA); | |
178 | ||
179 | spin_lock_irqsave(&adev->didt_idx_lock, flags); | |
180 | WREG32(address, (reg)); | |
181 | r = RREG32(data); | |
182 | spin_unlock_irqrestore(&adev->didt_idx_lock, flags); | |
183 | return r; | |
184 | } | |
185 | ||
186 | static void soc15_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v) | |
187 | { | |
188 | unsigned long flags, address, data; | |
189 | ||
190 | address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX); | |
191 | data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA); | |
192 | ||
193 | spin_lock_irqsave(&adev->didt_idx_lock, flags); | |
194 | WREG32(address, (reg)); | |
195 | WREG32(data, (v)); | |
196 | spin_unlock_irqrestore(&adev->didt_idx_lock, flags); | |
197 | } | |
198 | ||
560460f2 EQ |
199 | static u32 soc15_gc_cac_rreg(struct amdgpu_device *adev, u32 reg) |
200 | { | |
201 | unsigned long flags; | |
202 | u32 r; | |
203 | ||
204 | spin_lock_irqsave(&adev->gc_cac_idx_lock, flags); | |
205 | WREG32_SOC15(GC, 0, mmGC_CAC_IND_INDEX, (reg)); | |
206 | r = RREG32_SOC15(GC, 0, mmGC_CAC_IND_DATA); | |
207 | spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags); | |
208 | return r; | |
209 | } | |
210 | ||
211 | static void soc15_gc_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v) | |
212 | { | |
213 | unsigned long flags; | |
214 | ||
215 | spin_lock_irqsave(&adev->gc_cac_idx_lock, flags); | |
216 | WREG32_SOC15(GC, 0, mmGC_CAC_IND_INDEX, (reg)); | |
217 | WREG32_SOC15(GC, 0, mmGC_CAC_IND_DATA, (v)); | |
218 | spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags); | |
219 | } | |
220 | ||
2f11fb02 EQ |
221 | static u32 soc15_se_cac_rreg(struct amdgpu_device *adev, u32 reg) |
222 | { | |
223 | unsigned long flags; | |
224 | u32 r; | |
225 | ||
226 | spin_lock_irqsave(&adev->se_cac_idx_lock, flags); | |
227 | WREG32_SOC15(GC, 0, mmSE_CAC_IND_INDEX, (reg)); | |
228 | r = RREG32_SOC15(GC, 0, mmSE_CAC_IND_DATA); | |
229 | spin_unlock_irqrestore(&adev->se_cac_idx_lock, flags); | |
230 | return r; | |
231 | } | |
232 | ||
233 | static void soc15_se_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v) | |
234 | { | |
235 | unsigned long flags; | |
236 | ||
237 | spin_lock_irqsave(&adev->se_cac_idx_lock, flags); | |
238 | WREG32_SOC15(GC, 0, mmSE_CAC_IND_INDEX, (reg)); | |
239 | WREG32_SOC15(GC, 0, mmSE_CAC_IND_DATA, (v)); | |
240 | spin_unlock_irqrestore(&adev->se_cac_idx_lock, flags); | |
241 | } | |
242 | ||
220ab9bd KW |
243 | static u32 soc15_get_config_memsize(struct amdgpu_device *adev) |
244 | { | |
aecbe64f CZ |
245 | if (adev->flags & AMD_IS_APU) |
246 | return nbio_v7_0_get_memsize(adev); | |
247 | else | |
248 | return nbio_v6_1_get_memsize(adev); | |
220ab9bd KW |
249 | } |
250 | ||
251 | static const u32 vega10_golden_init[] = | |
252 | { | |
253 | }; | |
254 | ||
e0ab9578 CZ |
255 | static const u32 raven_golden_init[] = |
256 | { | |
257 | }; | |
258 | ||
220ab9bd KW |
259 | static void soc15_init_golden_registers(struct amdgpu_device *adev) |
260 | { | |
261 | /* Some of the registers might be dependent on GRBM_GFX_INDEX */ | |
262 | mutex_lock(&adev->grbm_idx_mutex); | |
263 | ||
264 | switch (adev->asic_type) { | |
265 | case CHIP_VEGA10: | |
266 | amdgpu_program_register_sequence(adev, | |
267 | vega10_golden_init, | |
268 | (const u32)ARRAY_SIZE(vega10_golden_init)); | |
269 | break; | |
e0ab9578 CZ |
270 | case CHIP_RAVEN: |
271 | amdgpu_program_register_sequence(adev, | |
272 | raven_golden_init, | |
273 | (const u32)ARRAY_SIZE(raven_golden_init)); | |
274 | break; | |
220ab9bd KW |
275 | default: |
276 | break; | |
277 | } | |
278 | mutex_unlock(&adev->grbm_idx_mutex); | |
279 | } | |
280 | static u32 soc15_get_xclk(struct amdgpu_device *adev) | |
281 | { | |
282 | if (adev->asic_type == CHIP_VEGA10) | |
283 | return adev->clock.spll.reference_freq/4; | |
284 | else | |
285 | return adev->clock.spll.reference_freq; | |
286 | } | |
287 | ||
288 | ||
289 | void soc15_grbm_select(struct amdgpu_device *adev, | |
290 | u32 me, u32 pipe, u32 queue, u32 vmid) | |
291 | { | |
292 | u32 grbm_gfx_cntl = 0; | |
293 | grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, PIPEID, pipe); | |
294 | grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, MEID, me); | |
295 | grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, VMID, vmid); | |
296 | grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, QUEUEID, queue); | |
297 | ||
298 | WREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_CNTL), grbm_gfx_cntl); | |
299 | } | |
300 | ||
301 | static void soc15_vga_set_state(struct amdgpu_device *adev, bool state) | |
302 | { | |
303 | /* todo */ | |
304 | } | |
305 | ||
306 | static bool soc15_read_disabled_bios(struct amdgpu_device *adev) | |
307 | { | |
308 | /* todo */ | |
309 | return false; | |
310 | } | |
311 | ||
312 | static bool soc15_read_bios_from_rom(struct amdgpu_device *adev, | |
313 | u8 *bios, u32 length_bytes) | |
314 | { | |
315 | u32 *dw_ptr; | |
316 | u32 i, length_dw; | |
317 | ||
318 | if (bios == NULL) | |
319 | return false; | |
320 | if (length_bytes == 0) | |
321 | return false; | |
322 | /* APU vbios image is part of sbios image */ | |
323 | if (adev->flags & AMD_IS_APU) | |
324 | return false; | |
325 | ||
326 | dw_ptr = (u32 *)bios; | |
327 | length_dw = ALIGN(length_bytes, 4) / 4; | |
328 | ||
329 | /* set rom index to 0 */ | |
330 | WREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX), 0); | |
331 | /* read out the rom data */ | |
332 | for (i = 0; i < length_dw; i++) | |
333 | dw_ptr[i] = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA)); | |
334 | ||
335 | return true; | |
336 | } | |
337 | ||
220ab9bd | 338 | static struct amdgpu_allowed_register_entry soc15_allowed_read_registers[] = { |
97fcc76b CK |
339 | { SOC15_REG_OFFSET(GC, 0, mmGRBM_STATUS)}, |
340 | { SOC15_REG_OFFSET(GC, 0, mmGRBM_STATUS2)}, | |
341 | { SOC15_REG_OFFSET(GC, 0, mmGRBM_STATUS_SE0)}, | |
342 | { SOC15_REG_OFFSET(GC, 0, mmGRBM_STATUS_SE1)}, | |
343 | { SOC15_REG_OFFSET(GC, 0, mmGRBM_STATUS_SE2)}, | |
344 | { SOC15_REG_OFFSET(GC, 0, mmGRBM_STATUS_SE3)}, | |
345 | { SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_STATUS_REG)}, | |
346 | { SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_STATUS_REG)}, | |
347 | { SOC15_REG_OFFSET(GC, 0, mmCP_STAT)}, | |
348 | { SOC15_REG_OFFSET(GC, 0, mmCP_STALLED_STAT1)}, | |
349 | { SOC15_REG_OFFSET(GC, 0, mmCP_STALLED_STAT2)}, | |
350 | { SOC15_REG_OFFSET(GC, 0, mmCP_STALLED_STAT3)}, | |
351 | { SOC15_REG_OFFSET(GC, 0, mmCP_CPF_BUSY_STAT)}, | |
352 | { SOC15_REG_OFFSET(GC, 0, mmCP_CPF_STALLED_STAT1)}, | |
353 | { SOC15_REG_OFFSET(GC, 0, mmCP_CPF_STATUS)}, | |
354 | { SOC15_REG_OFFSET(GC, 0, mmCP_CPC_STALLED_STAT1)}, | |
355 | { SOC15_REG_OFFSET(GC, 0, mmCP_CPC_STATUS)}, | |
356 | { SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG)}, | |
220ab9bd KW |
357 | }; |
358 | ||
359 | static uint32_t soc15_read_indexed_register(struct amdgpu_device *adev, u32 se_num, | |
360 | u32 sh_num, u32 reg_offset) | |
361 | { | |
362 | uint32_t val; | |
363 | ||
364 | mutex_lock(&adev->grbm_idx_mutex); | |
365 | if (se_num != 0xffffffff || sh_num != 0xffffffff) | |
366 | amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff); | |
367 | ||
368 | val = RREG32(reg_offset); | |
369 | ||
370 | if (se_num != 0xffffffff || sh_num != 0xffffffff) | |
371 | amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); | |
372 | mutex_unlock(&adev->grbm_idx_mutex); | |
373 | return val; | |
374 | } | |
375 | ||
c013cea2 AD |
376 | static uint32_t soc15_get_register_value(struct amdgpu_device *adev, |
377 | bool indexed, u32 se_num, | |
378 | u32 sh_num, u32 reg_offset) | |
379 | { | |
380 | if (indexed) { | |
381 | return soc15_read_indexed_register(adev, se_num, sh_num, reg_offset); | |
382 | } else { | |
383 | switch (reg_offset) { | |
384 | case SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG): | |
385 | return adev->gfx.config.gb_addr_config; | |
386 | default: | |
387 | return RREG32(reg_offset); | |
388 | } | |
389 | } | |
390 | } | |
391 | ||
220ab9bd KW |
392 | static int soc15_read_register(struct amdgpu_device *adev, u32 se_num, |
393 | u32 sh_num, u32 reg_offset, u32 *value) | |
394 | { | |
3032f350 | 395 | uint32_t i; |
220ab9bd KW |
396 | |
397 | *value = 0; | |
220ab9bd KW |
398 | for (i = 0; i < ARRAY_SIZE(soc15_allowed_read_registers); i++) { |
399 | if (reg_offset != soc15_allowed_read_registers[i].reg_offset) | |
400 | continue; | |
401 | ||
97fcc76b CK |
402 | *value = soc15_get_register_value(adev, |
403 | soc15_allowed_read_registers[i].grbm_indexed, | |
404 | se_num, sh_num, reg_offset); | |
220ab9bd KW |
405 | return 0; |
406 | } | |
407 | return -EINVAL; | |
408 | } | |
409 | ||
410 | static void soc15_gpu_pci_config_reset(struct amdgpu_device *adev) | |
411 | { | |
412 | u32 i; | |
413 | ||
414 | dev_info(adev->dev, "GPU pci config reset\n"); | |
415 | ||
416 | /* disable BM */ | |
417 | pci_clear_master(adev->pdev); | |
418 | /* reset */ | |
419 | amdgpu_pci_config_reset(adev); | |
420 | ||
421 | udelay(100); | |
422 | ||
423 | /* wait for asic to come out of reset */ | |
424 | for (i = 0; i < adev->usec_timeout; i++) { | |
aecbe64f CZ |
425 | u32 memsize = (adev->flags & AMD_IS_APU) ? |
426 | nbio_v7_0_get_memsize(adev) : | |
427 | nbio_v6_1_get_memsize(adev); | |
428 | if (memsize != 0xffffffff) | |
220ab9bd KW |
429 | break; |
430 | udelay(1); | |
431 | } | |
432 | ||
433 | } | |
434 | ||
435 | static int soc15_asic_reset(struct amdgpu_device *adev) | |
436 | { | |
d05da0e2 | 437 | amdgpu_atombios_scratch_regs_engine_hung(adev, true); |
220ab9bd KW |
438 | |
439 | soc15_gpu_pci_config_reset(adev); | |
440 | ||
d05da0e2 | 441 | amdgpu_atombios_scratch_regs_engine_hung(adev, false); |
220ab9bd KW |
442 | |
443 | return 0; | |
444 | } | |
445 | ||
446 | /*static int soc15_set_uvd_clock(struct amdgpu_device *adev, u32 clock, | |
447 | u32 cntl_reg, u32 status_reg) | |
448 | { | |
449 | return 0; | |
450 | }*/ | |
451 | ||
452 | static int soc15_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk) | |
453 | { | |
454 | /*int r; | |
455 | ||
456 | r = soc15_set_uvd_clock(adev, vclk, ixCG_VCLK_CNTL, ixCG_VCLK_STATUS); | |
457 | if (r) | |
458 | return r; | |
459 | ||
460 | r = soc15_set_uvd_clock(adev, dclk, ixCG_DCLK_CNTL, ixCG_DCLK_STATUS); | |
461 | */ | |
462 | return 0; | |
463 | } | |
464 | ||
465 | static int soc15_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk) | |
466 | { | |
467 | /* todo */ | |
468 | ||
469 | return 0; | |
470 | } | |
471 | ||
472 | static void soc15_pcie_gen3_enable(struct amdgpu_device *adev) | |
473 | { | |
474 | if (pci_is_root_bus(adev->pdev->bus)) | |
475 | return; | |
476 | ||
477 | if (amdgpu_pcie_gen2 == 0) | |
478 | return; | |
479 | ||
480 | if (adev->flags & AMD_IS_APU) | |
481 | return; | |
482 | ||
483 | if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 | | |
484 | CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3))) | |
485 | return; | |
486 | ||
487 | /* todo */ | |
488 | } | |
489 | ||
490 | static void soc15_program_aspm(struct amdgpu_device *adev) | |
491 | { | |
492 | ||
493 | if (amdgpu_aspm == 0) | |
494 | return; | |
495 | ||
496 | /* todo */ | |
497 | } | |
498 | ||
499 | static void soc15_enable_doorbell_aperture(struct amdgpu_device *adev, | |
500 | bool enable) | |
501 | { | |
aecbe64f CZ |
502 | if (adev->flags & AMD_IS_APU) { |
503 | nbio_v7_0_enable_doorbell_aperture(adev, enable); | |
504 | } else { | |
505 | nbio_v6_1_enable_doorbell_aperture(adev, enable); | |
506 | nbio_v6_1_enable_doorbell_selfring_aperture(adev, enable); | |
507 | } | |
220ab9bd KW |
508 | } |
509 | ||
510 | static const struct amdgpu_ip_block_version vega10_common_ip_block = | |
511 | { | |
512 | .type = AMD_IP_BLOCK_TYPE_COMMON, | |
513 | .major = 2, | |
514 | .minor = 0, | |
515 | .rev = 0, | |
516 | .funcs = &soc15_common_ip_funcs, | |
517 | }; | |
518 | ||
519 | int soc15_set_ip_blocks(struct amdgpu_device *adev) | |
520 | { | |
1b922423 XY |
521 | nbio_v6_1_detect_hw_virt(adev); |
522 | ||
f1a34465 XY |
523 | if (amdgpu_sriov_vf(adev)) |
524 | adev->virt.ops = &xgpu_ai_virt_ops; | |
525 | ||
220ab9bd KW |
526 | switch (adev->asic_type) { |
527 | case CHIP_VEGA10: | |
528 | amdgpu_ip_block_add(adev, &vega10_common_ip_block); | |
220ab9bd KW |
529 | amdgpu_ip_block_add(adev, &gmc_v9_0_ip_block); |
530 | amdgpu_ip_block_add(adev, &vega10_ih_ip_block); | |
bb5c9ca5 ML |
531 | if (amdgpu_fw_load_type == 2 || amdgpu_fw_load_type == -1) |
532 | amdgpu_ip_block_add(adev, &psp_v3_1_ip_block); | |
c6f3e7cb | 533 | if (!amdgpu_sriov_vf(adev)) |
cfd83733 | 534 | amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block); |
f8445307 | 535 | if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) |
796b6568 | 536 | amdgpu_ip_block_add(adev, &dce_virtual_ip_block); |
220ab9bd KW |
537 | amdgpu_ip_block_add(adev, &gfx_v9_0_ip_block); |
538 | amdgpu_ip_block_add(adev, &sdma_v4_0_ip_block); | |
91faed9e | 539 | amdgpu_ip_block_add(adev, &uvd_v7_0_ip_block); |
220ab9bd KW |
540 | amdgpu_ip_block_add(adev, &vce_v4_0_ip_block); |
541 | break; | |
1023b797 CZ |
542 | case CHIP_RAVEN: |
543 | amdgpu_ip_block_add(adev, &vega10_common_ip_block); | |
1023b797 CZ |
544 | amdgpu_ip_block_add(adev, &gmc_v9_0_ip_block); |
545 | amdgpu_ip_block_add(adev, &vega10_ih_ip_block); | |
9e2837f6 | 546 | amdgpu_ip_block_add(adev, &psp_v10_0_ip_block); |
30db095f | 547 | amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block); |
d67fed16 AD |
548 | if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) |
549 | amdgpu_ip_block_add(adev, &dce_virtual_ip_block); | |
1023b797 CZ |
550 | amdgpu_ip_block_add(adev, &gfx_v9_0_ip_block); |
551 | amdgpu_ip_block_add(adev, &sdma_v4_0_ip_block); | |
f2d7e707 | 552 | amdgpu_ip_block_add(adev, &vcn_v1_0_ip_block); |
1023b797 | 553 | break; |
220ab9bd KW |
554 | default: |
555 | return -EINVAL; | |
556 | } | |
557 | ||
558 | return 0; | |
559 | } | |
560 | ||
561 | static uint32_t soc15_get_rev_id(struct amdgpu_device *adev) | |
562 | { | |
aecbe64f CZ |
563 | if (adev->flags & AMD_IS_APU) |
564 | return nbio_v7_0_get_rev_id(adev); | |
565 | else | |
566 | return nbio_v6_1_get_rev_id(adev); | |
220ab9bd KW |
567 | } |
568 | ||
569 | ||
570 | int gmc_v9_0_mc_wait_for_idle(struct amdgpu_device *adev) | |
571 | { | |
572 | /* to be implemented in MC IP*/ | |
573 | return 0; | |
574 | } | |
575 | ||
576 | static const struct amdgpu_asic_funcs soc15_asic_funcs = | |
577 | { | |
578 | .read_disabled_bios = &soc15_read_disabled_bios, | |
579 | .read_bios_from_rom = &soc15_read_bios_from_rom, | |
580 | .read_register = &soc15_read_register, | |
581 | .reset = &soc15_asic_reset, | |
582 | .set_vga_state = &soc15_vga_set_state, | |
583 | .get_xclk = &soc15_get_xclk, | |
584 | .set_uvd_clocks = &soc15_set_uvd_clocks, | |
585 | .set_vce_clocks = &soc15_set_vce_clocks, | |
586 | .get_config_memsize = &soc15_get_config_memsize, | |
587 | }; | |
588 | ||
589 | static int soc15_common_early_init(void *handle) | |
590 | { | |
591 | bool psp_enabled = false; | |
592 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
593 | ||
594 | adev->smc_rreg = NULL; | |
595 | adev->smc_wreg = NULL; | |
596 | adev->pcie_rreg = &soc15_pcie_rreg; | |
597 | adev->pcie_wreg = &soc15_pcie_wreg; | |
598 | adev->uvd_ctx_rreg = &soc15_uvd_ctx_rreg; | |
599 | adev->uvd_ctx_wreg = &soc15_uvd_ctx_wreg; | |
600 | adev->didt_rreg = &soc15_didt_rreg; | |
601 | adev->didt_wreg = &soc15_didt_wreg; | |
560460f2 EQ |
602 | adev->gc_cac_rreg = &soc15_gc_cac_rreg; |
603 | adev->gc_cac_wreg = &soc15_gc_cac_wreg; | |
2f11fb02 EQ |
604 | adev->se_cac_rreg = &soc15_se_cac_rreg; |
605 | adev->se_cac_wreg = &soc15_se_cac_wreg; | |
220ab9bd KW |
606 | |
607 | adev->asic_funcs = &soc15_asic_funcs; | |
608 | ||
609 | if (amdgpu_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP) && | |
610 | (amdgpu_ip_block_mask & (1 << AMD_IP_BLOCK_TYPE_PSP))) | |
611 | psp_enabled = true; | |
612 | ||
613 | /* | |
614 | * nbio need be used for both sdma and gfx9, but only | |
615 | * initializes once | |
616 | */ | |
617 | switch(adev->asic_type) { | |
618 | case CHIP_VEGA10: | |
619 | nbio_v6_1_init(adev); | |
620 | break; | |
aecbe64f CZ |
621 | case CHIP_RAVEN: |
622 | nbio_v7_0_init(adev); | |
623 | break; | |
220ab9bd KW |
624 | default: |
625 | return -EINVAL; | |
626 | } | |
627 | ||
628 | adev->rev_id = soc15_get_rev_id(adev); | |
629 | adev->external_rev_id = 0xFF; | |
630 | switch (adev->asic_type) { | |
631 | case CHIP_VEGA10: | |
632 | adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | | |
633 | AMD_CG_SUPPORT_GFX_MGLS | | |
634 | AMD_CG_SUPPORT_GFX_RLC_LS | | |
635 | AMD_CG_SUPPORT_GFX_CP_LS | | |
636 | AMD_CG_SUPPORT_GFX_3D_CGCG | | |
637 | AMD_CG_SUPPORT_GFX_3D_CGLS | | |
638 | AMD_CG_SUPPORT_GFX_CGCG | | |
639 | AMD_CG_SUPPORT_GFX_CGLS | | |
640 | AMD_CG_SUPPORT_BIF_MGCG | | |
641 | AMD_CG_SUPPORT_BIF_LS | | |
642 | AMD_CG_SUPPORT_HDP_LS | | |
643 | AMD_CG_SUPPORT_DRM_MGCG | | |
644 | AMD_CG_SUPPORT_DRM_LS | | |
645 | AMD_CG_SUPPORT_ROM_MGCG | | |
646 | AMD_CG_SUPPORT_DF_MGCG | | |
647 | AMD_CG_SUPPORT_SDMA_MGCG | | |
648 | AMD_CG_SUPPORT_SDMA_LS | | |
649 | AMD_CG_SUPPORT_MC_MGCG | | |
650 | AMD_CG_SUPPORT_MC_LS; | |
651 | adev->pg_flags = 0; | |
652 | adev->external_rev_id = 0x1; | |
653 | break; | |
957c6fe1 | 654 | case CHIP_RAVEN: |
5c5928a2 HR |
655 | adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | |
656 | AMD_CG_SUPPORT_GFX_MGLS | | |
657 | AMD_CG_SUPPORT_GFX_RLC_LS | | |
658 | AMD_CG_SUPPORT_GFX_CP_LS | | |
659 | AMD_CG_SUPPORT_GFX_3D_CGCG | | |
660 | AMD_CG_SUPPORT_GFX_3D_CGLS | | |
661 | AMD_CG_SUPPORT_GFX_CGCG | | |
662 | AMD_CG_SUPPORT_GFX_CGLS | | |
663 | AMD_CG_SUPPORT_BIF_MGCG | | |
664 | AMD_CG_SUPPORT_BIF_LS | | |
665 | AMD_CG_SUPPORT_HDP_MGCG | | |
666 | AMD_CG_SUPPORT_HDP_LS | | |
667 | AMD_CG_SUPPORT_DRM_MGCG | | |
668 | AMD_CG_SUPPORT_DRM_LS | | |
c2cdb0ec HR |
669 | AMD_CG_SUPPORT_ROM_MGCG | |
670 | AMD_CG_SUPPORT_MC_MGCG | | |
fe1a3b2e HR |
671 | AMD_CG_SUPPORT_MC_LS | |
672 | AMD_CG_SUPPORT_SDMA_MGCG | | |
673 | AMD_CG_SUPPORT_SDMA_LS; | |
32622ad8 HZ |
674 | adev->pg_flags = AMD_PG_SUPPORT_SDMA | |
675 | AMD_PG_SUPPORT_MMHUB; | |
957c6fe1 HZ |
676 | adev->external_rev_id = 0x1; |
677 | break; | |
220ab9bd KW |
678 | default: |
679 | /* FIXME: not supported yet */ | |
680 | return -EINVAL; | |
681 | } | |
682 | ||
ab276632 XY |
683 | if (amdgpu_sriov_vf(adev)) { |
684 | amdgpu_virt_init_setting(adev); | |
685 | xgpu_ai_mailbox_set_irq_funcs(adev); | |
686 | } | |
687 | ||
220ab9bd KW |
688 | adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type); |
689 | ||
690 | amdgpu_get_pcie_info(adev); | |
691 | ||
692 | return 0; | |
693 | } | |
694 | ||
81758c55 ML |
695 | static int soc15_common_late_init(void *handle) |
696 | { | |
697 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
698 | ||
699 | if (amdgpu_sriov_vf(adev)) | |
700 | xgpu_ai_mailbox_get_irq(adev); | |
701 | ||
702 | return 0; | |
703 | } | |
704 | ||
220ab9bd KW |
705 | static int soc15_common_sw_init(void *handle) |
706 | { | |
81758c55 ML |
707 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
708 | ||
709 | if (amdgpu_sriov_vf(adev)) | |
710 | xgpu_ai_mailbox_add_irq_id(adev); | |
711 | ||
220ab9bd KW |
712 | return 0; |
713 | } | |
714 | ||
715 | static int soc15_common_sw_fini(void *handle) | |
716 | { | |
717 | return 0; | |
718 | } | |
719 | ||
720 | static int soc15_common_hw_init(void *handle) | |
721 | { | |
722 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
723 | ||
724 | /* move the golden regs per IP block */ | |
725 | soc15_init_golden_registers(adev); | |
726 | /* enable pcie gen2/3 link */ | |
727 | soc15_pcie_gen3_enable(adev); | |
728 | /* enable aspm */ | |
729 | soc15_program_aspm(adev); | |
833fa075 AD |
730 | /* setup nbio registers */ |
731 | if (!(adev->flags & AMD_IS_APU)) | |
732 | nbio_v6_1_init_registers(adev); | |
220ab9bd KW |
733 | /* enable the doorbell aperture */ |
734 | soc15_enable_doorbell_aperture(adev, true); | |
735 | ||
736 | return 0; | |
737 | } | |
738 | ||
739 | static int soc15_common_hw_fini(void *handle) | |
740 | { | |
741 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
742 | ||
743 | /* disable the doorbell aperture */ | |
744 | soc15_enable_doorbell_aperture(adev, false); | |
81758c55 ML |
745 | if (amdgpu_sriov_vf(adev)) |
746 | xgpu_ai_mailbox_put_irq(adev); | |
220ab9bd KW |
747 | |
748 | return 0; | |
749 | } | |
750 | ||
751 | static int soc15_common_suspend(void *handle) | |
752 | { | |
753 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
754 | ||
755 | return soc15_common_hw_fini(adev); | |
756 | } | |
757 | ||
758 | static int soc15_common_resume(void *handle) | |
759 | { | |
760 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
761 | ||
762 | return soc15_common_hw_init(adev); | |
763 | } | |
764 | ||
765 | static bool soc15_common_is_idle(void *handle) | |
766 | { | |
767 | return true; | |
768 | } | |
769 | ||
770 | static int soc15_common_wait_for_idle(void *handle) | |
771 | { | |
772 | return 0; | |
773 | } | |
774 | ||
775 | static int soc15_common_soft_reset(void *handle) | |
776 | { | |
777 | return 0; | |
778 | } | |
779 | ||
780 | static void soc15_update_hdp_light_sleep(struct amdgpu_device *adev, bool enable) | |
781 | { | |
782 | uint32_t def, data; | |
783 | ||
784 | def = data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS)); | |
785 | ||
786 | if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS)) | |
787 | data |= HDP_MEM_POWER_LS__LS_ENABLE_MASK; | |
788 | else | |
789 | data &= ~HDP_MEM_POWER_LS__LS_ENABLE_MASK; | |
790 | ||
791 | if (def != data) | |
792 | WREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS), data); | |
793 | } | |
794 | ||
795 | static void soc15_update_drm_clock_gating(struct amdgpu_device *adev, bool enable) | |
796 | { | |
797 | uint32_t def, data; | |
798 | ||
799 | def = data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0)); | |
800 | ||
801 | if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DRM_MGCG)) | |
802 | data &= ~(0x01000000 | | |
803 | 0x02000000 | | |
804 | 0x04000000 | | |
805 | 0x08000000 | | |
806 | 0x10000000 | | |
807 | 0x20000000 | | |
808 | 0x40000000 | | |
809 | 0x80000000); | |
810 | else | |
811 | data |= (0x01000000 | | |
812 | 0x02000000 | | |
813 | 0x04000000 | | |
814 | 0x08000000 | | |
815 | 0x10000000 | | |
816 | 0x20000000 | | |
817 | 0x40000000 | | |
818 | 0x80000000); | |
819 | ||
820 | if (def != data) | |
821 | WREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0), data); | |
822 | } | |
823 | ||
824 | static void soc15_update_drm_light_sleep(struct amdgpu_device *adev, bool enable) | |
825 | { | |
826 | uint32_t def, data; | |
827 | ||
828 | def = data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL)); | |
829 | ||
830 | if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DRM_LS)) | |
831 | data |= 1; | |
832 | else | |
833 | data &= ~1; | |
834 | ||
835 | if (def != data) | |
836 | WREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL), data); | |
837 | } | |
838 | ||
839 | static void soc15_update_rom_medium_grain_clock_gating(struct amdgpu_device *adev, | |
840 | bool enable) | |
841 | { | |
842 | uint32_t def, data; | |
843 | ||
844 | def = data = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0)); | |
845 | ||
846 | if (enable && (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG)) | |
847 | data &= ~(CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK | | |
848 | CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK); | |
849 | else | |
850 | data |= CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK | | |
851 | CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK; | |
852 | ||
853 | if (def != data) | |
854 | WREG32(SOC15_REG_OFFSET(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0), data); | |
855 | } | |
856 | ||
857 | static void soc15_update_df_medium_grain_clock_gating(struct amdgpu_device *adev, | |
858 | bool enable) | |
859 | { | |
860 | uint32_t data; | |
861 | ||
862 | /* Put DF on broadcast mode */ | |
863 | data = RREG32(SOC15_REG_OFFSET(DF, 0, mmFabricConfigAccessControl)); | |
864 | data &= ~FabricConfigAccessControl__CfgRegInstAccEn_MASK; | |
865 | WREG32(SOC15_REG_OFFSET(DF, 0, mmFabricConfigAccessControl), data); | |
866 | ||
867 | if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DF_MGCG)) { | |
868 | data = RREG32(SOC15_REG_OFFSET(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater)); | |
869 | data &= ~DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK; | |
870 | data |= DF_MGCG_ENABLE_15_CYCLE_DELAY; | |
871 | WREG32(SOC15_REG_OFFSET(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater), data); | |
872 | } else { | |
873 | data = RREG32(SOC15_REG_OFFSET(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater)); | |
874 | data &= ~DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK; | |
875 | data |= DF_MGCG_DISABLE; | |
876 | WREG32(SOC15_REG_OFFSET(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater), data); | |
877 | } | |
878 | ||
879 | WREG32(SOC15_REG_OFFSET(DF, 0, mmFabricConfigAccessControl), | |
880 | mmFabricConfigAccessControl_DEFAULT); | |
881 | } | |
882 | ||
883 | static int soc15_common_set_clockgating_state(void *handle, | |
884 | enum amd_clockgating_state state) | |
885 | { | |
886 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
887 | ||
6e9dc861 ML |
888 | if (amdgpu_sriov_vf(adev)) |
889 | return 0; | |
890 | ||
220ab9bd KW |
891 | switch (adev->asic_type) { |
892 | case CHIP_VEGA10: | |
893 | nbio_v6_1_update_medium_grain_clock_gating(adev, | |
894 | state == AMD_CG_STATE_GATE ? true : false); | |
895 | nbio_v6_1_update_medium_grain_light_sleep(adev, | |
896 | state == AMD_CG_STATE_GATE ? true : false); | |
897 | soc15_update_hdp_light_sleep(adev, | |
898 | state == AMD_CG_STATE_GATE ? true : false); | |
899 | soc15_update_drm_clock_gating(adev, | |
900 | state == AMD_CG_STATE_GATE ? true : false); | |
901 | soc15_update_drm_light_sleep(adev, | |
902 | state == AMD_CG_STATE_GATE ? true : false); | |
903 | soc15_update_rom_medium_grain_clock_gating(adev, | |
904 | state == AMD_CG_STATE_GATE ? true : false); | |
905 | soc15_update_df_medium_grain_clock_gating(adev, | |
906 | state == AMD_CG_STATE_GATE ? true : false); | |
907 | break; | |
9e5a9eb4 | 908 | case CHIP_RAVEN: |
7fda6eca | 909 | nbio_v7_0_update_medium_grain_clock_gating(adev, |
9e5a9eb4 HR |
910 | state == AMD_CG_STATE_GATE ? true : false); |
911 | nbio_v6_1_update_medium_grain_light_sleep(adev, | |
912 | state == AMD_CG_STATE_GATE ? true : false); | |
913 | soc15_update_hdp_light_sleep(adev, | |
914 | state == AMD_CG_STATE_GATE ? true : false); | |
915 | soc15_update_drm_clock_gating(adev, | |
916 | state == AMD_CG_STATE_GATE ? true : false); | |
917 | soc15_update_drm_light_sleep(adev, | |
918 | state == AMD_CG_STATE_GATE ? true : false); | |
919 | soc15_update_rom_medium_grain_clock_gating(adev, | |
920 | state == AMD_CG_STATE_GATE ? true : false); | |
921 | break; | |
220ab9bd KW |
922 | default: |
923 | break; | |
924 | } | |
925 | return 0; | |
926 | } | |
927 | ||
f9abe35c HR |
928 | static void soc15_common_get_clockgating_state(void *handle, u32 *flags) |
929 | { | |
930 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
931 | int data; | |
932 | ||
933 | if (amdgpu_sriov_vf(adev)) | |
934 | *flags = 0; | |
935 | ||
936 | nbio_v6_1_get_clockgating_state(adev, flags); | |
937 | ||
938 | /* AMD_CG_SUPPORT_HDP_LS */ | |
939 | data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS)); | |
940 | if (data & HDP_MEM_POWER_LS__LS_ENABLE_MASK) | |
941 | *flags |= AMD_CG_SUPPORT_HDP_LS; | |
942 | ||
943 | /* AMD_CG_SUPPORT_DRM_MGCG */ | |
944 | data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0)); | |
945 | if (!(data & 0x01000000)) | |
946 | *flags |= AMD_CG_SUPPORT_DRM_MGCG; | |
947 | ||
948 | /* AMD_CG_SUPPORT_DRM_LS */ | |
949 | data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL)); | |
950 | if (data & 0x1) | |
951 | *flags |= AMD_CG_SUPPORT_DRM_LS; | |
952 | ||
953 | /* AMD_CG_SUPPORT_ROM_MGCG */ | |
954 | data = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0)); | |
955 | if (!(data & CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK)) | |
956 | *flags |= AMD_CG_SUPPORT_ROM_MGCG; | |
957 | ||
958 | /* AMD_CG_SUPPORT_DF_MGCG */ | |
959 | data = RREG32(SOC15_REG_OFFSET(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater)); | |
960 | if (data & DF_MGCG_ENABLE_15_CYCLE_DELAY) | |
961 | *flags |= AMD_CG_SUPPORT_DF_MGCG; | |
962 | } | |
963 | ||
220ab9bd KW |
964 | static int soc15_common_set_powergating_state(void *handle, |
965 | enum amd_powergating_state state) | |
966 | { | |
967 | /* todo */ | |
968 | return 0; | |
969 | } | |
970 | ||
971 | const struct amd_ip_funcs soc15_common_ip_funcs = { | |
972 | .name = "soc15_common", | |
973 | .early_init = soc15_common_early_init, | |
81758c55 | 974 | .late_init = soc15_common_late_init, |
220ab9bd KW |
975 | .sw_init = soc15_common_sw_init, |
976 | .sw_fini = soc15_common_sw_fini, | |
977 | .hw_init = soc15_common_hw_init, | |
978 | .hw_fini = soc15_common_hw_fini, | |
979 | .suspend = soc15_common_suspend, | |
980 | .resume = soc15_common_resume, | |
981 | .is_idle = soc15_common_is_idle, | |
982 | .wait_for_idle = soc15_common_wait_for_idle, | |
983 | .soft_reset = soc15_common_soft_reset, | |
984 | .set_clockgating_state = soc15_common_set_clockgating_state, | |
985 | .set_powergating_state = soc15_common_set_powergating_state, | |
f9abe35c | 986 | .get_clockgating_state= soc15_common_get_clockgating_state, |
220ab9bd | 987 | }; |