2 * Copyright 2016 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/delay.h>
26 #include <linux/module.h>
27 #include <linux/slab.h>
30 #include "amd_powerplay.h"
31 #include "vega10_smumgr.h"
32 #include "hardwaremanager.h"
33 #include "ppatomfwctrl.h"
34 #include "atomfirmware.h"
35 #include "cgs_common.h"
36 #include "vega10_powertune.h"
38 #include "smu9_driver_if.h"
39 #include "vega10_inc.h"
41 #include "pppcielanes.h"
42 #include "vega10_hwmgr.h"
43 #include "vega10_processpptables.h"
44 #include "vega10_pptable.h"
45 #include "vega10_thermal.h"
48 #include "amd_pcie_helpers.h"
49 #include "cgs_linux.h"
50 #include "ppinterrupt.h"
51 #include "pp_overdriver.h"
53 #define VOLTAGE_SCALE 4
54 #define VOLTAGE_VID_OFFSET_SCALE1 625
55 #define VOLTAGE_VID_OFFSET_SCALE2 100
57 #define HBM_MEMORY_CHANNEL_WIDTH 128
59 static const uint32_t channel_number
[] = {1, 2, 0, 4, 0, 8, 0, 16, 2};
61 #define MEM_FREQ_LOW_LATENCY 25000
62 #define MEM_FREQ_HIGH_LATENCY 80000
63 #define MEM_LATENCY_HIGH 245
64 #define MEM_LATENCY_LOW 35
65 #define MEM_LATENCY_ERR 0xFFFF
67 #define mmDF_CS_AON0_DramBaseAddress0 0x0044
68 #define mmDF_CS_AON0_DramBaseAddress0_BASE_IDX 0
70 //DF_CS_AON0_DramBaseAddress0
71 #define DF_CS_AON0_DramBaseAddress0__AddrRngVal__SHIFT 0x0
72 #define DF_CS_AON0_DramBaseAddress0__LgcyMmioHoleEn__SHIFT 0x1
73 #define DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT 0x4
74 #define DF_CS_AON0_DramBaseAddress0__IntLvAddrSel__SHIFT 0x8
75 #define DF_CS_AON0_DramBaseAddress0__DramBaseAddr__SHIFT 0xc
76 #define DF_CS_AON0_DramBaseAddress0__AddrRngVal_MASK 0x00000001L
77 #define DF_CS_AON0_DramBaseAddress0__LgcyMmioHoleEn_MASK 0x00000002L
78 #define DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK 0x000000F0L
79 #define DF_CS_AON0_DramBaseAddress0__IntLvAddrSel_MASK 0x00000700L
80 #define DF_CS_AON0_DramBaseAddress0__DramBaseAddr_MASK 0xFFFFF000L
81 static int vega10_force_clock_level(struct pp_hwmgr
*hwmgr
,
82 enum pp_clock_type type
, uint32_t mask
);
84 static const ULONG PhwVega10_Magic
= (ULONG
)(PHM_VIslands_Magic
);
86 struct vega10_power_state
*cast_phw_vega10_power_state(
87 struct pp_hw_power_state
*hw_ps
)
89 PP_ASSERT_WITH_CODE((PhwVega10_Magic
== hw_ps
->magic
),
90 "Invalid Powerstate Type!",
93 return (struct vega10_power_state
*)hw_ps
;
96 const struct vega10_power_state
*cast_const_phw_vega10_power_state(
97 const struct pp_hw_power_state
*hw_ps
)
99 PP_ASSERT_WITH_CODE((PhwVega10_Magic
== hw_ps
->magic
),
100 "Invalid Powerstate Type!",
103 return (const struct vega10_power_state
*)hw_ps
;
106 static void vega10_set_default_registry_data(struct pp_hwmgr
*hwmgr
)
108 struct vega10_hwmgr
*data
=
109 (struct vega10_hwmgr
*)(hwmgr
->backend
);
111 data
->registry_data
.sclk_dpm_key_disabled
=
112 hwmgr
->feature_mask
& PP_SCLK_DPM_MASK
? false : true;
113 data
->registry_data
.socclk_dpm_key_disabled
=
114 hwmgr
->feature_mask
& PP_SOCCLK_DPM_MASK
? false : true;
115 data
->registry_data
.mclk_dpm_key_disabled
=
116 hwmgr
->feature_mask
& PP_MCLK_DPM_MASK
? false : true;
117 data
->registry_data
.pcie_dpm_key_disabled
=
118 hwmgr
->feature_mask
& PP_PCIE_DPM_MASK
? false : true;
120 data
->registry_data
.dcefclk_dpm_key_disabled
=
121 hwmgr
->feature_mask
& PP_DCEFCLK_DPM_MASK
? false : true;
123 if (hwmgr
->feature_mask
& PP_POWER_CONTAINMENT_MASK
) {
124 data
->registry_data
.power_containment_support
= 1;
125 data
->registry_data
.enable_pkg_pwr_tracking_feature
= 1;
126 data
->registry_data
.enable_tdc_limit_feature
= 1;
129 data
->registry_data
.clock_stretcher_support
=
130 hwmgr
->feature_mask
& PP_CLOCK_STRETCH_MASK
? true : false;
132 data
->registry_data
.ulv_support
=
133 hwmgr
->feature_mask
& PP_ULV_MASK
? true : false;
135 data
->registry_data
.sclk_deep_sleep_support
=
136 hwmgr
->feature_mask
& PP_SCLK_DEEP_SLEEP_MASK
? true : false;
138 data
->registry_data
.disable_water_mark
= 0;
140 data
->registry_data
.fan_control_support
= 1;
141 data
->registry_data
.thermal_support
= 1;
142 data
->registry_data
.fw_ctf_enabled
= 1;
144 data
->registry_data
.avfs_support
= 1;
145 data
->registry_data
.led_dpm_enabled
= 1;
147 data
->registry_data
.vr0hot_enabled
= 1;
148 data
->registry_data
.vr1hot_enabled
= 1;
149 data
->registry_data
.regulator_hot_gpio_support
= 1;
151 data
->registry_data
.didt_support
= 1;
152 if (data
->registry_data
.didt_support
) {
153 data
->registry_data
.didt_mode
= 6;
154 data
->registry_data
.sq_ramping_support
= 1;
155 data
->registry_data
.db_ramping_support
= 0;
156 data
->registry_data
.td_ramping_support
= 0;
157 data
->registry_data
.tcp_ramping_support
= 0;
158 data
->registry_data
.dbr_ramping_support
= 0;
159 data
->registry_data
.edc_didt_support
= 1;
160 data
->registry_data
.gc_didt_support
= 0;
161 data
->registry_data
.psm_didt_support
= 0;
164 data
->display_voltage_mode
= PPVEGA10_VEGA10DISPLAYVOLTAGEMODE_DFLT
;
165 data
->dcef_clk_quad_eqn_a
= PPREGKEY_VEGA10QUADRATICEQUATION_DFLT
;
166 data
->dcef_clk_quad_eqn_b
= PPREGKEY_VEGA10QUADRATICEQUATION_DFLT
;
167 data
->dcef_clk_quad_eqn_c
= PPREGKEY_VEGA10QUADRATICEQUATION_DFLT
;
168 data
->disp_clk_quad_eqn_a
= PPREGKEY_VEGA10QUADRATICEQUATION_DFLT
;
169 data
->disp_clk_quad_eqn_b
= PPREGKEY_VEGA10QUADRATICEQUATION_DFLT
;
170 data
->disp_clk_quad_eqn_c
= PPREGKEY_VEGA10QUADRATICEQUATION_DFLT
;
171 data
->pixel_clk_quad_eqn_a
= PPREGKEY_VEGA10QUADRATICEQUATION_DFLT
;
172 data
->pixel_clk_quad_eqn_b
= PPREGKEY_VEGA10QUADRATICEQUATION_DFLT
;
173 data
->pixel_clk_quad_eqn_c
= PPREGKEY_VEGA10QUADRATICEQUATION_DFLT
;
174 data
->phy_clk_quad_eqn_a
= PPREGKEY_VEGA10QUADRATICEQUATION_DFLT
;
175 data
->phy_clk_quad_eqn_b
= PPREGKEY_VEGA10QUADRATICEQUATION_DFLT
;
176 data
->phy_clk_quad_eqn_c
= PPREGKEY_VEGA10QUADRATICEQUATION_DFLT
;
178 data
->gfxclk_average_alpha
= PPVEGA10_VEGA10GFXCLKAVERAGEALPHA_DFLT
;
179 data
->socclk_average_alpha
= PPVEGA10_VEGA10SOCCLKAVERAGEALPHA_DFLT
;
180 data
->uclk_average_alpha
= PPVEGA10_VEGA10UCLKCLKAVERAGEALPHA_DFLT
;
181 data
->gfx_activity_average_alpha
= PPVEGA10_VEGA10GFXACTIVITYAVERAGEALPHA_DFLT
;
184 static int vega10_set_features_platform_caps(struct pp_hwmgr
*hwmgr
)
186 struct vega10_hwmgr
*data
=
187 (struct vega10_hwmgr
*)(hwmgr
->backend
);
188 struct phm_ppt_v2_information
*table_info
=
189 (struct phm_ppt_v2_information
*)hwmgr
->pptable
;
190 struct cgs_system_info sys_info
= {0};
193 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
194 PHM_PlatformCaps_SclkDeepSleep
);
196 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
197 PHM_PlatformCaps_DynamicPatchPowerState
);
199 if (data
->vddci_control
== VEGA10_VOLTAGE_CONTROL_NONE
)
200 phm_cap_unset(hwmgr
->platform_descriptor
.platformCaps
,
201 PHM_PlatformCaps_ControlVDDCI
);
203 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
204 PHM_PlatformCaps_EnableSMU7ThermalManagement
);
206 sys_info
.size
= sizeof(struct cgs_system_info
);
207 sys_info
.info_id
= CGS_SYSTEM_INFO_PG_FLAGS
;
208 result
= cgs_query_system_info(hwmgr
->device
, &sys_info
);
210 if (!result
&& (sys_info
.value
& AMD_PG_SUPPORT_UVD
))
211 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
212 PHM_PlatformCaps_UVDPowerGating
);
214 if (!result
&& (sys_info
.value
& AMD_PG_SUPPORT_VCE
))
215 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
216 PHM_PlatformCaps_VCEPowerGating
);
218 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
219 PHM_PlatformCaps_UnTabledHardwareInterface
);
221 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
222 PHM_PlatformCaps_FanSpeedInTableIsRPM
);
224 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
225 PHM_PlatformCaps_ODFuzzyFanControlSupport
);
227 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
228 PHM_PlatformCaps_DynamicPowerManagement
);
230 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
231 PHM_PlatformCaps_SMC
);
233 /* power tune caps */
234 /* assume disabled */
235 phm_cap_unset(hwmgr
->platform_descriptor
.platformCaps
,
236 PHM_PlatformCaps_PowerContainment
);
237 phm_cap_unset(hwmgr
->platform_descriptor
.platformCaps
,
238 PHM_PlatformCaps_DiDtSupport
);
239 phm_cap_unset(hwmgr
->platform_descriptor
.platformCaps
,
240 PHM_PlatformCaps_SQRamping
);
241 phm_cap_unset(hwmgr
->platform_descriptor
.platformCaps
,
242 PHM_PlatformCaps_DBRamping
);
243 phm_cap_unset(hwmgr
->platform_descriptor
.platformCaps
,
244 PHM_PlatformCaps_TDRamping
);
245 phm_cap_unset(hwmgr
->platform_descriptor
.platformCaps
,
246 PHM_PlatformCaps_TCPRamping
);
247 phm_cap_unset(hwmgr
->platform_descriptor
.platformCaps
,
248 PHM_PlatformCaps_DBRRamping
);
249 phm_cap_unset(hwmgr
->platform_descriptor
.platformCaps
,
250 PHM_PlatformCaps_DiDtEDCEnable
);
251 phm_cap_unset(hwmgr
->platform_descriptor
.platformCaps
,
252 PHM_PlatformCaps_GCEDC
);
253 phm_cap_unset(hwmgr
->platform_descriptor
.platformCaps
,
254 PHM_PlatformCaps_PSM
);
256 if (data
->registry_data
.didt_support
) {
257 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
, PHM_PlatformCaps_DiDtSupport
);
258 if (data
->registry_data
.sq_ramping_support
)
259 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
, PHM_PlatformCaps_SQRamping
);
260 if (data
->registry_data
.db_ramping_support
)
261 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
, PHM_PlatformCaps_DBRamping
);
262 if (data
->registry_data
.td_ramping_support
)
263 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
, PHM_PlatformCaps_TDRamping
);
264 if (data
->registry_data
.tcp_ramping_support
)
265 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
, PHM_PlatformCaps_TCPRamping
);
266 if (data
->registry_data
.dbr_ramping_support
)
267 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
, PHM_PlatformCaps_DBRRamping
);
268 if (data
->registry_data
.edc_didt_support
)
269 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
, PHM_PlatformCaps_DiDtEDCEnable
);
270 if (data
->registry_data
.gc_didt_support
)
271 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
, PHM_PlatformCaps_GCEDC
);
272 if (data
->registry_data
.psm_didt_support
)
273 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
, PHM_PlatformCaps_PSM
);
276 if (data
->registry_data
.power_containment_support
)
277 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
278 PHM_PlatformCaps_PowerContainment
);
279 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
280 PHM_PlatformCaps_CAC
);
282 if (table_info
->tdp_table
->usClockStretchAmount
&&
283 data
->registry_data
.clock_stretcher_support
)
284 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
285 PHM_PlatformCaps_ClockStretcher
);
287 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
288 PHM_PlatformCaps_RegulatorHot
);
289 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
290 PHM_PlatformCaps_AutomaticDCTransition
);
292 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
293 PHM_PlatformCaps_UVDDPM
);
294 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
295 PHM_PlatformCaps_VCEDPM
);
300 static void vega10_init_dpm_defaults(struct pp_hwmgr
*hwmgr
)
302 struct vega10_hwmgr
*data
= (struct vega10_hwmgr
*)(hwmgr
->backend
);
305 vega10_initialize_power_tune_defaults(hwmgr
);
307 for (i
= 0; i
< GNLD_FEATURES_MAX
; i
++) {
308 data
->smu_features
[i
].smu_feature_id
= 0xffff;
309 data
->smu_features
[i
].smu_feature_bitmap
= 1 << i
;
310 data
->smu_features
[i
].enabled
= false;
311 data
->smu_features
[i
].supported
= false;
314 data
->smu_features
[GNLD_DPM_PREFETCHER
].smu_feature_id
=
315 FEATURE_DPM_PREFETCHER_BIT
;
316 data
->smu_features
[GNLD_DPM_GFXCLK
].smu_feature_id
=
317 FEATURE_DPM_GFXCLK_BIT
;
318 data
->smu_features
[GNLD_DPM_UCLK
].smu_feature_id
=
319 FEATURE_DPM_UCLK_BIT
;
320 data
->smu_features
[GNLD_DPM_SOCCLK
].smu_feature_id
=
321 FEATURE_DPM_SOCCLK_BIT
;
322 data
->smu_features
[GNLD_DPM_UVD
].smu_feature_id
=
324 data
->smu_features
[GNLD_DPM_VCE
].smu_feature_id
=
326 data
->smu_features
[GNLD_DPM_MP0CLK
].smu_feature_id
=
327 FEATURE_DPM_MP0CLK_BIT
;
328 data
->smu_features
[GNLD_DPM_LINK
].smu_feature_id
=
329 FEATURE_DPM_LINK_BIT
;
330 data
->smu_features
[GNLD_DPM_DCEFCLK
].smu_feature_id
=
331 FEATURE_DPM_DCEFCLK_BIT
;
332 data
->smu_features
[GNLD_ULV
].smu_feature_id
=
334 data
->smu_features
[GNLD_AVFS
].smu_feature_id
=
336 data
->smu_features
[GNLD_DS_GFXCLK
].smu_feature_id
=
337 FEATURE_DS_GFXCLK_BIT
;
338 data
->smu_features
[GNLD_DS_SOCCLK
].smu_feature_id
=
339 FEATURE_DS_SOCCLK_BIT
;
340 data
->smu_features
[GNLD_DS_LCLK
].smu_feature_id
=
342 data
->smu_features
[GNLD_PPT
].smu_feature_id
=
344 data
->smu_features
[GNLD_TDC
].smu_feature_id
=
346 data
->smu_features
[GNLD_THERMAL
].smu_feature_id
=
348 data
->smu_features
[GNLD_GFX_PER_CU_CG
].smu_feature_id
=
349 FEATURE_GFX_PER_CU_CG_BIT
;
350 data
->smu_features
[GNLD_RM
].smu_feature_id
=
352 data
->smu_features
[GNLD_DS_DCEFCLK
].smu_feature_id
=
353 FEATURE_DS_DCEFCLK_BIT
;
354 data
->smu_features
[GNLD_ACDC
].smu_feature_id
=
356 data
->smu_features
[GNLD_VR0HOT
].smu_feature_id
=
358 data
->smu_features
[GNLD_VR1HOT
].smu_feature_id
=
360 data
->smu_features
[GNLD_FW_CTF
].smu_feature_id
=
362 data
->smu_features
[GNLD_LED_DISPLAY
].smu_feature_id
=
363 FEATURE_LED_DISPLAY_BIT
;
364 data
->smu_features
[GNLD_FAN_CONTROL
].smu_feature_id
=
365 FEATURE_FAN_CONTROL_BIT
;
366 data
->smu_features
[GNLD_ACG
].smu_feature_id
= FEATURE_ACG_BIT
;
367 data
->smu_features
[GNLD_DIDT
].smu_feature_id
= FEATURE_GFX_EDC_BIT
;
369 if (!data
->registry_data
.prefetcher_dpm_key_disabled
)
370 data
->smu_features
[GNLD_DPM_PREFETCHER
].supported
= true;
372 if (!data
->registry_data
.sclk_dpm_key_disabled
)
373 data
->smu_features
[GNLD_DPM_GFXCLK
].supported
= true;
375 if (!data
->registry_data
.mclk_dpm_key_disabled
)
376 data
->smu_features
[GNLD_DPM_UCLK
].supported
= true;
378 if (!data
->registry_data
.socclk_dpm_key_disabled
)
379 data
->smu_features
[GNLD_DPM_SOCCLK
].supported
= true;
381 if (PP_CAP(PHM_PlatformCaps_UVDDPM
))
382 data
->smu_features
[GNLD_DPM_UVD
].supported
= true;
384 if (PP_CAP(PHM_PlatformCaps_VCEDPM
))
385 data
->smu_features
[GNLD_DPM_VCE
].supported
= true;
387 if (!data
->registry_data
.pcie_dpm_key_disabled
)
388 data
->smu_features
[GNLD_DPM_LINK
].supported
= true;
390 if (!data
->registry_data
.dcefclk_dpm_key_disabled
)
391 data
->smu_features
[GNLD_DPM_DCEFCLK
].supported
= true;
393 if (PP_CAP(PHM_PlatformCaps_SclkDeepSleep
) &&
394 data
->registry_data
.sclk_deep_sleep_support
) {
395 data
->smu_features
[GNLD_DS_GFXCLK
].supported
= true;
396 data
->smu_features
[GNLD_DS_SOCCLK
].supported
= true;
397 data
->smu_features
[GNLD_DS_LCLK
].supported
= true;
398 data
->smu_features
[GNLD_DS_DCEFCLK
].supported
= true;
401 if (data
->registry_data
.enable_pkg_pwr_tracking_feature
)
402 data
->smu_features
[GNLD_PPT
].supported
= true;
404 if (data
->registry_data
.enable_tdc_limit_feature
)
405 data
->smu_features
[GNLD_TDC
].supported
= true;
407 if (data
->registry_data
.thermal_support
)
408 data
->smu_features
[GNLD_THERMAL
].supported
= true;
410 if (data
->registry_data
.fan_control_support
)
411 data
->smu_features
[GNLD_FAN_CONTROL
].supported
= true;
413 if (data
->registry_data
.fw_ctf_enabled
)
414 data
->smu_features
[GNLD_FW_CTF
].supported
= true;
416 if (data
->registry_data
.avfs_support
)
417 data
->smu_features
[GNLD_AVFS
].supported
= true;
419 if (data
->registry_data
.led_dpm_enabled
)
420 data
->smu_features
[GNLD_LED_DISPLAY
].supported
= true;
422 if (data
->registry_data
.vr1hot_enabled
)
423 data
->smu_features
[GNLD_VR1HOT
].supported
= true;
425 if (data
->registry_data
.vr0hot_enabled
)
426 data
->smu_features
[GNLD_VR0HOT
].supported
= true;
428 smum_send_msg_to_smc(hwmgr
, PPSMC_MSG_GetSmuVersion
);
429 vega10_read_arg_from_smc(hwmgr
, &(data
->smu_version
));
430 /* ACG firmware has major version 5 */
431 if ((data
->smu_version
& 0xff000000) == 0x5000000)
432 data
->smu_features
[GNLD_ACG
].supported
= true;
434 if (data
->registry_data
.didt_support
)
435 data
->smu_features
[GNLD_DIDT
].supported
= true;
439 #ifdef PPLIB_VEGA10_EVV_SUPPORT
440 static int vega10_get_socclk_for_voltage_evv(struct pp_hwmgr
*hwmgr
,
441 phm_ppt_v1_voltage_lookup_table
*lookup_table
,
442 uint16_t virtual_voltage_id
, int32_t *socclk
)
446 struct phm_ppt_v2_information
*table_info
=
447 (struct phm_ppt_v2_information
*)(hwmgr
->pptable
);
449 PP_ASSERT_WITH_CODE(lookup_table
->count
!= 0,
450 "Lookup table is empty",
453 /* search for leakage voltage ID 0xff01 ~ 0xff08 and sclk */
454 for (entry_id
= 0; entry_id
< table_info
->vdd_dep_on_sclk
->count
; entry_id
++) {
455 voltage_id
= table_info
->vdd_dep_on_socclk
->entries
[entry_id
].vddInd
;
456 if (lookup_table
->entries
[voltage_id
].us_vdd
== virtual_voltage_id
)
460 PP_ASSERT_WITH_CODE(entry_id
< table_info
->vdd_dep_on_socclk
->count
,
461 "Can't find requested voltage id in vdd_dep_on_socclk table!",
464 *socclk
= table_info
->vdd_dep_on_socclk
->entries
[entry_id
].clk
;
469 #define ATOM_VIRTUAL_VOLTAGE_ID0 0xff01
471 * Get Leakage VDDC based on leakage ID.
473 * @param hwmgr the address of the powerplay hardware manager.
476 static int vega10_get_evv_voltages(struct pp_hwmgr
*hwmgr
)
478 struct vega10_hwmgr
*data
= (struct vega10_hwmgr
*)(hwmgr
->backend
);
483 struct phm_ppt_v2_information
*table_info
=
484 (struct phm_ppt_v2_information
*)hwmgr
->pptable
;
485 struct phm_ppt_v1_clock_voltage_dependency_table
*socclk_table
=
486 table_info
->vdd_dep_on_socclk
;
489 for (i
= 0; i
< VEGA10_MAX_LEAKAGE_COUNT
; i
++) {
490 vv_id
= ATOM_VIRTUAL_VOLTAGE_ID0
+ i
;
492 if (!vega10_get_socclk_for_voltage_evv(hwmgr
,
493 table_info
->vddc_lookup_table
, vv_id
, &sclk
)) {
494 if (PP_CAP(PHM_PlatformCaps_ClockStretcher
)) {
495 for (j
= 1; j
< socclk_table
->count
; j
++) {
496 if (socclk_table
->entries
[j
].clk
== sclk
&&
497 socclk_table
->entries
[j
].cks_enable
== 0) {
504 PP_ASSERT_WITH_CODE(!atomctrl_get_voltage_evv_on_sclk_ai(hwmgr
,
505 VOLTAGE_TYPE_VDDC
, sclk
, vv_id
, &vddc
),
506 "Error retrieving EVV voltage value!",
510 /* need to make sure vddc is less than 2v or else, it could burn the ASIC. */
511 PP_ASSERT_WITH_CODE((vddc
< 2000 && vddc
!= 0),
512 "Invalid VDDC value", result
= -EINVAL
;);
514 /* the voltage should not be zero nor equal to leakage ID */
515 if (vddc
!= 0 && vddc
!= vv_id
) {
516 data
->vddc_leakage
.actual_voltage
[data
->vddc_leakage
.count
] = (uint16_t)(vddc
/100);
517 data
->vddc_leakage
.leakage_id
[data
->vddc_leakage
.count
] = vv_id
;
518 data
->vddc_leakage
.count
++;
527 * Change virtual leakage voltage to actual value.
529 * @param hwmgr the address of the powerplay hardware manager.
530 * @param pointer to changing voltage
531 * @param pointer to leakage table
533 static void vega10_patch_with_vdd_leakage(struct pp_hwmgr
*hwmgr
,
534 uint16_t *voltage
, struct vega10_leakage_voltage
*leakage_table
)
538 /* search for leakage voltage ID 0xff01 ~ 0xff08 */
539 for (index
= 0; index
< leakage_table
->count
; index
++) {
540 /* if this voltage matches a leakage voltage ID */
541 /* patch with actual leakage voltage */
542 if (leakage_table
->leakage_id
[index
] == *voltage
) {
543 *voltage
= leakage_table
->actual_voltage
[index
];
548 if (*voltage
> ATOM_VIRTUAL_VOLTAGE_ID0
)
549 pr_info("Voltage value looks like a Leakage ID \
550 but it's not patched\n");
554 * Patch voltage lookup table by EVV leakages.
556 * @param hwmgr the address of the powerplay hardware manager.
557 * @param pointer to voltage lookup table
558 * @param pointer to leakage table
561 static int vega10_patch_lookup_table_with_leakage(struct pp_hwmgr
*hwmgr
,
562 phm_ppt_v1_voltage_lookup_table
*lookup_table
,
563 struct vega10_leakage_voltage
*leakage_table
)
567 for (i
= 0; i
< lookup_table
->count
; i
++)
568 vega10_patch_with_vdd_leakage(hwmgr
,
569 &lookup_table
->entries
[i
].us_vdd
, leakage_table
);
574 static int vega10_patch_clock_voltage_limits_with_vddc_leakage(
575 struct pp_hwmgr
*hwmgr
, struct vega10_leakage_voltage
*leakage_table
,
578 vega10_patch_with_vdd_leakage(hwmgr
, (uint16_t *)vddc
, leakage_table
);
584 static int vega10_patch_voltage_dependency_tables_with_lookup_table(
585 struct pp_hwmgr
*hwmgr
)
587 uint8_t entry_id
, voltage_id
;
589 struct phm_ppt_v2_information
*table_info
=
590 (struct phm_ppt_v2_information
*)(hwmgr
->pptable
);
591 struct phm_ppt_v1_mm_clock_voltage_dependency_table
*mm_table
=
592 table_info
->mm_dep_table
;
593 struct phm_ppt_v1_clock_voltage_dependency_table
*mclk_table
=
594 table_info
->vdd_dep_on_mclk
;
596 for (i
= 0; i
< 6; i
++) {
597 struct phm_ppt_v1_clock_voltage_dependency_table
*vdt
;
599 case 0: vdt
= table_info
->vdd_dep_on_socclk
; break;
600 case 1: vdt
= table_info
->vdd_dep_on_sclk
; break;
601 case 2: vdt
= table_info
->vdd_dep_on_dcefclk
; break;
602 case 3: vdt
= table_info
->vdd_dep_on_pixclk
; break;
603 case 4: vdt
= table_info
->vdd_dep_on_dispclk
; break;
604 case 5: vdt
= table_info
->vdd_dep_on_phyclk
; break;
607 for (entry_id
= 0; entry_id
< vdt
->count
; entry_id
++) {
608 voltage_id
= vdt
->entries
[entry_id
].vddInd
;
609 vdt
->entries
[entry_id
].vddc
=
610 table_info
->vddc_lookup_table
->entries
[voltage_id
].us_vdd
;
614 for (entry_id
= 0; entry_id
< mm_table
->count
; ++entry_id
) {
615 voltage_id
= mm_table
->entries
[entry_id
].vddcInd
;
616 mm_table
->entries
[entry_id
].vddc
=
617 table_info
->vddc_lookup_table
->entries
[voltage_id
].us_vdd
;
620 for (entry_id
= 0; entry_id
< mclk_table
->count
; ++entry_id
) {
621 voltage_id
= mclk_table
->entries
[entry_id
].vddInd
;
622 mclk_table
->entries
[entry_id
].vddc
=
623 table_info
->vddc_lookup_table
->entries
[voltage_id
].us_vdd
;
624 voltage_id
= mclk_table
->entries
[entry_id
].vddciInd
;
625 mclk_table
->entries
[entry_id
].vddci
=
626 table_info
->vddci_lookup_table
->entries
[voltage_id
].us_vdd
;
627 voltage_id
= mclk_table
->entries
[entry_id
].mvddInd
;
628 mclk_table
->entries
[entry_id
].mvdd
=
629 table_info
->vddmem_lookup_table
->entries
[voltage_id
].us_vdd
;
637 static int vega10_sort_lookup_table(struct pp_hwmgr
*hwmgr
,
638 struct phm_ppt_v1_voltage_lookup_table
*lookup_table
)
640 uint32_t table_size
, i
, j
;
641 struct phm_ppt_v1_voltage_lookup_record tmp_voltage_lookup_record
;
643 PP_ASSERT_WITH_CODE(lookup_table
&& lookup_table
->count
,
644 "Lookup table is empty", return -EINVAL
);
646 table_size
= lookup_table
->count
;
648 /* Sorting voltages */
649 for (i
= 0; i
< table_size
- 1; i
++) {
650 for (j
= i
+ 1; j
> 0; j
--) {
651 if (lookup_table
->entries
[j
].us_vdd
<
652 lookup_table
->entries
[j
- 1].us_vdd
) {
653 tmp_voltage_lookup_record
= lookup_table
->entries
[j
- 1];
654 lookup_table
->entries
[j
- 1] = lookup_table
->entries
[j
];
655 lookup_table
->entries
[j
] = tmp_voltage_lookup_record
;
663 static int vega10_complete_dependency_tables(struct pp_hwmgr
*hwmgr
)
667 struct phm_ppt_v2_information
*table_info
=
668 (struct phm_ppt_v2_information
*)(hwmgr
->pptable
);
669 #ifdef PPLIB_VEGA10_EVV_SUPPORT
670 struct vega10_hwmgr
*data
= (struct vega10_hwmgr
*)(hwmgr
->backend
);
672 tmp_result
= vega10_patch_lookup_table_with_leakage(hwmgr
,
673 table_info
->vddc_lookup_table
, &(data
->vddc_leakage
));
677 tmp_result
= vega10_patch_clock_voltage_limits_with_vddc_leakage(hwmgr
,
678 &(data
->vddc_leakage
), &table_info
->max_clock_voltage_on_dc
.vddc
);
683 tmp_result
= vega10_patch_voltage_dependency_tables_with_lookup_table(hwmgr
);
687 tmp_result
= vega10_sort_lookup_table(hwmgr
, table_info
->vddc_lookup_table
);
694 static int vega10_set_private_data_based_on_pptable(struct pp_hwmgr
*hwmgr
)
696 struct phm_ppt_v2_information
*table_info
=
697 (struct phm_ppt_v2_information
*)(hwmgr
->pptable
);
698 struct phm_ppt_v1_clock_voltage_dependency_table
*allowed_sclk_vdd_table
=
699 table_info
->vdd_dep_on_socclk
;
700 struct phm_ppt_v1_clock_voltage_dependency_table
*allowed_mclk_vdd_table
=
701 table_info
->vdd_dep_on_mclk
;
703 PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table
,
704 "VDD dependency on SCLK table is missing. \
705 This table is mandatory", return -EINVAL
);
706 PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table
->count
>= 1,
707 "VDD dependency on SCLK table is empty. \
708 This table is mandatory", return -EINVAL
);
710 PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table
,
711 "VDD dependency on MCLK table is missing. \
712 This table is mandatory", return -EINVAL
);
713 PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table
->count
>= 1,
714 "VDD dependency on MCLK table is empty. \
715 This table is mandatory", return -EINVAL
);
717 table_info
->max_clock_voltage_on_ac
.sclk
=
718 allowed_sclk_vdd_table
->entries
[allowed_sclk_vdd_table
->count
- 1].clk
;
719 table_info
->max_clock_voltage_on_ac
.mclk
=
720 allowed_mclk_vdd_table
->entries
[allowed_mclk_vdd_table
->count
- 1].clk
;
721 table_info
->max_clock_voltage_on_ac
.vddc
=
722 allowed_sclk_vdd_table
->entries
[allowed_sclk_vdd_table
->count
- 1].vddc
;
723 table_info
->max_clock_voltage_on_ac
.vddci
=
724 allowed_mclk_vdd_table
->entries
[allowed_mclk_vdd_table
->count
- 1].vddci
;
726 hwmgr
->dyn_state
.max_clock_voltage_on_ac
.sclk
=
727 table_info
->max_clock_voltage_on_ac
.sclk
;
728 hwmgr
->dyn_state
.max_clock_voltage_on_ac
.mclk
=
729 table_info
->max_clock_voltage_on_ac
.mclk
;
730 hwmgr
->dyn_state
.max_clock_voltage_on_ac
.vddc
=
731 table_info
->max_clock_voltage_on_ac
.vddc
;
732 hwmgr
->dyn_state
.max_clock_voltage_on_ac
.vddci
=
733 table_info
->max_clock_voltage_on_ac
.vddci
;
738 static int vega10_hwmgr_backend_fini(struct pp_hwmgr
*hwmgr
)
740 kfree(hwmgr
->dyn_state
.vddc_dep_on_dal_pwrl
);
741 hwmgr
->dyn_state
.vddc_dep_on_dal_pwrl
= NULL
;
743 kfree(hwmgr
->backend
);
744 hwmgr
->backend
= NULL
;
749 static int vega10_hwmgr_backend_init(struct pp_hwmgr
*hwmgr
)
752 struct vega10_hwmgr
*data
;
753 uint32_t config_telemetry
= 0;
754 struct pp_atomfwctrl_voltage_table vol_table
;
755 struct cgs_system_info sys_info
= {0};
757 data
= kzalloc(sizeof(struct vega10_hwmgr
), GFP_KERNEL
);
761 hwmgr
->backend
= data
;
763 vega10_set_default_registry_data(hwmgr
);
765 data
->disable_dpm_mask
= 0xff;
766 data
->workload_mask
= 0xff;
768 /* need to set voltage control types before EVV patching */
769 data
->vddc_control
= VEGA10_VOLTAGE_CONTROL_NONE
;
770 data
->mvdd_control
= VEGA10_VOLTAGE_CONTROL_NONE
;
771 data
->vddci_control
= VEGA10_VOLTAGE_CONTROL_NONE
;
774 if (pp_atomfwctrl_is_voltage_controlled_by_gpio_v4(hwmgr
,
775 VOLTAGE_TYPE_VDDC
, VOLTAGE_OBJ_SVID2
)) {
776 if (!pp_atomfwctrl_get_voltage_table_v4(hwmgr
,
777 VOLTAGE_TYPE_VDDC
, VOLTAGE_OBJ_SVID2
,
779 config_telemetry
= ((vol_table
.telemetry_slope
<< 8) & 0xff00) |
780 (vol_table
.telemetry_offset
& 0xff);
781 data
->vddc_control
= VEGA10_VOLTAGE_CONTROL_BY_SVID2
;
784 kfree(hwmgr
->backend
);
785 hwmgr
->backend
= NULL
;
786 PP_ASSERT_WITH_CODE(false,
787 "VDDCR_SOC is not SVID2!",
792 if (pp_atomfwctrl_is_voltage_controlled_by_gpio_v4(hwmgr
,
793 VOLTAGE_TYPE_MVDDC
, VOLTAGE_OBJ_SVID2
)) {
794 if (!pp_atomfwctrl_get_voltage_table_v4(hwmgr
,
795 VOLTAGE_TYPE_MVDDC
, VOLTAGE_OBJ_SVID2
,
798 ((vol_table
.telemetry_slope
<< 24) & 0xff000000) |
799 ((vol_table
.telemetry_offset
<< 16) & 0xff0000);
800 data
->mvdd_control
= VEGA10_VOLTAGE_CONTROL_BY_SVID2
;
805 if (PP_CAP(PHM_PlatformCaps_ControlVDDCI
)) {
806 if (pp_atomfwctrl_is_voltage_controlled_by_gpio_v4(hwmgr
,
807 VOLTAGE_TYPE_VDDCI
, VOLTAGE_OBJ_GPIO_LUT
))
808 data
->vddci_control
= VEGA10_VOLTAGE_CONTROL_BY_GPIO
;
811 data
->config_telemetry
= config_telemetry
;
813 vega10_set_features_platform_caps(hwmgr
);
815 vega10_init_dpm_defaults(hwmgr
);
817 #ifdef PPLIB_VEGA10_EVV_SUPPORT
818 /* Get leakage voltage based on leakage ID. */
819 PP_ASSERT_WITH_CODE(!vega10_get_evv_voltages(hwmgr
),
820 "Get EVV Voltage Failed. Abort Driver loading!",
824 /* Patch our voltage dependency table with actual leakage voltage
825 * We need to perform leakage translation before it's used by other functions
827 vega10_complete_dependency_tables(hwmgr
);
829 /* Parse pptable data read from VBIOS */
830 vega10_set_private_data_based_on_pptable(hwmgr
);
832 data
->is_tlu_enabled
= false;
834 hwmgr
->platform_descriptor
.hardwareActivityPerformanceLevels
=
835 VEGA10_MAX_HARDWARE_POWERLEVELS
;
836 hwmgr
->platform_descriptor
.hardwarePerformanceLevels
= 2;
837 hwmgr
->platform_descriptor
.minimumClocksReductionPercentage
= 50;
839 hwmgr
->platform_descriptor
.vbiosInterruptId
= 0x20000400; /* IRQ_SOURCE1_SW_INT */
840 /* The true clock step depends on the frequency, typically 4.5 or 9 MHz. Here we use 5. */
841 hwmgr
->platform_descriptor
.clockStep
.engineClock
= 500;
842 hwmgr
->platform_descriptor
.clockStep
.memoryClock
= 500;
844 sys_info
.size
= sizeof(struct cgs_system_info
);
845 sys_info
.info_id
= CGS_SYSTEM_INFO_GFX_CU_INFO
;
846 result
= cgs_query_system_info(hwmgr
->device
, &sys_info
);
847 data
->total_active_cus
= sys_info
.value
;
848 /* Setup default Overdrive Fan control settings */
849 data
->odn_fan_table
.target_fan_speed
=
850 hwmgr
->thermal_controller
.advanceFanControlParameters
.usMaxFanRPM
;
851 data
->odn_fan_table
.target_temperature
=
852 hwmgr
->thermal_controller
.
853 advanceFanControlParameters
.ucTargetTemperature
;
854 data
->odn_fan_table
.min_performance_clock
=
855 hwmgr
->thermal_controller
.advanceFanControlParameters
.
856 ulMinFanSCLKAcousticLimit
;
857 data
->odn_fan_table
.min_fan_limit
=
858 hwmgr
->thermal_controller
.
859 advanceFanControlParameters
.usFanPWMMinLimit
*
860 hwmgr
->thermal_controller
.fanInfo
.ulMaxRPM
/ 100;
865 static int vega10_init_sclk_threshold(struct pp_hwmgr
*hwmgr
)
867 struct vega10_hwmgr
*data
=
868 (struct vega10_hwmgr
*)(hwmgr
->backend
);
870 data
->low_sclk_interrupt_threshold
= 0;
875 static int vega10_setup_dpm_led_config(struct pp_hwmgr
*hwmgr
)
877 struct vega10_hwmgr
*data
=
878 (struct vega10_hwmgr
*)(hwmgr
->backend
);
879 PPTable_t
*pp_table
= &(data
->smc_state_table
.pp_table
);
881 struct pp_atomfwctrl_voltage_table table
;
887 ret
= pp_atomfwctrl_get_voltage_table_v4(hwmgr
, VOLTAGE_TYPE_LEDDPM
,
888 VOLTAGE_OBJ_GPIO_LUT
, &table
);
891 tmp
= table
.mask_low
;
892 for (i
= 0, j
= 0; i
< 32; i
++) {
894 mask
|= (uint32_t)(i
<< (8 * j
));
902 pp_table
->LedPin0
= (uint8_t)(mask
& 0xff);
903 pp_table
->LedPin1
= (uint8_t)((mask
>> 8) & 0xff);
904 pp_table
->LedPin2
= (uint8_t)((mask
>> 16) & 0xff);
908 static int vega10_setup_asic_task(struct pp_hwmgr
*hwmgr
)
910 PP_ASSERT_WITH_CODE(!vega10_init_sclk_threshold(hwmgr
),
911 "Failed to init sclk threshold!",
914 PP_ASSERT_WITH_CODE(!vega10_setup_dpm_led_config(hwmgr
),
915 "Failed to set up led dpm config!",
921 static bool vega10_is_dpm_running(struct pp_hwmgr
*hwmgr
)
923 uint32_t features_enabled
;
925 if (!vega10_get_smc_features(hwmgr
, &features_enabled
)) {
926 if (features_enabled
& SMC_DPM_FEATURES
)
933 * Remove repeated voltage values and create table with unique values.
935 * @param hwmgr the address of the powerplay hardware manager.
936 * @param vol_table the pointer to changing voltage table
937 * @return 0 in success
940 static int vega10_trim_voltage_table(struct pp_hwmgr
*hwmgr
,
941 struct pp_atomfwctrl_voltage_table
*vol_table
)
946 struct pp_atomfwctrl_voltage_table
*table
;
948 PP_ASSERT_WITH_CODE(vol_table
,
949 "Voltage Table empty.", return -EINVAL
);
950 table
= kzalloc(sizeof(struct pp_atomfwctrl_voltage_table
),
956 table
->mask_low
= vol_table
->mask_low
;
957 table
->phase_delay
= vol_table
->phase_delay
;
959 for (i
= 0; i
< vol_table
->count
; i
++) {
960 vvalue
= vol_table
->entries
[i
].value
;
963 for (j
= 0; j
< table
->count
; j
++) {
964 if (vvalue
== table
->entries
[j
].value
) {
971 table
->entries
[table
->count
].value
= vvalue
;
972 table
->entries
[table
->count
].smio_low
=
973 vol_table
->entries
[i
].smio_low
;
978 memcpy(vol_table
, table
, sizeof(struct pp_atomfwctrl_voltage_table
));
984 static int vega10_get_mvdd_voltage_table(struct pp_hwmgr
*hwmgr
,
985 phm_ppt_v1_clock_voltage_dependency_table
*dep_table
,
986 struct pp_atomfwctrl_voltage_table
*vol_table
)
990 PP_ASSERT_WITH_CODE(dep_table
->count
,
991 "Voltage Dependency Table empty.",
994 vol_table
->mask_low
= 0;
995 vol_table
->phase_delay
= 0;
996 vol_table
->count
= dep_table
->count
;
998 for (i
= 0; i
< vol_table
->count
; i
++) {
999 vol_table
->entries
[i
].value
= dep_table
->entries
[i
].mvdd
;
1000 vol_table
->entries
[i
].smio_low
= 0;
1003 PP_ASSERT_WITH_CODE(!vega10_trim_voltage_table(hwmgr
,
1005 "Failed to trim MVDD Table!",
1011 static int vega10_get_vddci_voltage_table(struct pp_hwmgr
*hwmgr
,
1012 phm_ppt_v1_clock_voltage_dependency_table
*dep_table
,
1013 struct pp_atomfwctrl_voltage_table
*vol_table
)
1017 PP_ASSERT_WITH_CODE(dep_table
->count
,
1018 "Voltage Dependency Table empty.",
1021 vol_table
->mask_low
= 0;
1022 vol_table
->phase_delay
= 0;
1023 vol_table
->count
= dep_table
->count
;
1025 for (i
= 0; i
< dep_table
->count
; i
++) {
1026 vol_table
->entries
[i
].value
= dep_table
->entries
[i
].vddci
;
1027 vol_table
->entries
[i
].smio_low
= 0;
1030 PP_ASSERT_WITH_CODE(!vega10_trim_voltage_table(hwmgr
, vol_table
),
1031 "Failed to trim VDDCI table.",
1037 static int vega10_get_vdd_voltage_table(struct pp_hwmgr
*hwmgr
,
1038 phm_ppt_v1_clock_voltage_dependency_table
*dep_table
,
1039 struct pp_atomfwctrl_voltage_table
*vol_table
)
1043 PP_ASSERT_WITH_CODE(dep_table
->count
,
1044 "Voltage Dependency Table empty.",
1047 vol_table
->mask_low
= 0;
1048 vol_table
->phase_delay
= 0;
1049 vol_table
->count
= dep_table
->count
;
1051 for (i
= 0; i
< vol_table
->count
; i
++) {
1052 vol_table
->entries
[i
].value
= dep_table
->entries
[i
].vddc
;
1053 vol_table
->entries
[i
].smio_low
= 0;
1059 /* ---- Voltage Tables ----
1060 * If the voltage table would be bigger than
1061 * what will fit into the state table on
1062 * the SMC keep only the higher entries.
1064 static void vega10_trim_voltage_table_to_fit_state_table(
1065 struct pp_hwmgr
*hwmgr
,
1066 uint32_t max_vol_steps
,
1067 struct pp_atomfwctrl_voltage_table
*vol_table
)
1069 unsigned int i
, diff
;
1071 if (vol_table
->count
<= max_vol_steps
)
1074 diff
= vol_table
->count
- max_vol_steps
;
1076 for (i
= 0; i
< max_vol_steps
; i
++)
1077 vol_table
->entries
[i
] = vol_table
->entries
[i
+ diff
];
1079 vol_table
->count
= max_vol_steps
;
1083 * Create Voltage Tables.
1085 * @param hwmgr the address of the powerplay hardware manager.
1088 static int vega10_construct_voltage_tables(struct pp_hwmgr
*hwmgr
)
1090 struct vega10_hwmgr
*data
= (struct vega10_hwmgr
*)(hwmgr
->backend
);
1091 struct phm_ppt_v2_information
*table_info
=
1092 (struct phm_ppt_v2_information
*)hwmgr
->pptable
;
1095 if (data
->mvdd_control
== VEGA10_VOLTAGE_CONTROL_BY_SVID2
||
1096 data
->mvdd_control
== VEGA10_VOLTAGE_CONTROL_NONE
) {
1097 result
= vega10_get_mvdd_voltage_table(hwmgr
,
1098 table_info
->vdd_dep_on_mclk
,
1099 &(data
->mvdd_voltage_table
));
1100 PP_ASSERT_WITH_CODE(!result
,
1101 "Failed to retrieve MVDDC table!",
1105 if (data
->vddci_control
== VEGA10_VOLTAGE_CONTROL_NONE
) {
1106 result
= vega10_get_vddci_voltage_table(hwmgr
,
1107 table_info
->vdd_dep_on_mclk
,
1108 &(data
->vddci_voltage_table
));
1109 PP_ASSERT_WITH_CODE(!result
,
1110 "Failed to retrieve VDDCI_MEM table!",
1114 if (data
->vddc_control
== VEGA10_VOLTAGE_CONTROL_BY_SVID2
||
1115 data
->vddc_control
== VEGA10_VOLTAGE_CONTROL_NONE
) {
1116 result
= vega10_get_vdd_voltage_table(hwmgr
,
1117 table_info
->vdd_dep_on_sclk
,
1118 &(data
->vddc_voltage_table
));
1119 PP_ASSERT_WITH_CODE(!result
,
1120 "Failed to retrieve VDDCR_SOC table!",
1124 PP_ASSERT_WITH_CODE(data
->vddc_voltage_table
.count
<= 16,
1125 "Too many voltage values for VDDC. Trimming to fit state table.",
1126 vega10_trim_voltage_table_to_fit_state_table(hwmgr
,
1127 16, &(data
->vddc_voltage_table
)));
1129 PP_ASSERT_WITH_CODE(data
->vddci_voltage_table
.count
<= 16,
1130 "Too many voltage values for VDDCI. Trimming to fit state table.",
1131 vega10_trim_voltage_table_to_fit_state_table(hwmgr
,
1132 16, &(data
->vddci_voltage_table
)));
1134 PP_ASSERT_WITH_CODE(data
->mvdd_voltage_table
.count
<= 16,
1135 "Too many voltage values for MVDD. Trimming to fit state table.",
1136 vega10_trim_voltage_table_to_fit_state_table(hwmgr
,
1137 16, &(data
->mvdd_voltage_table
)));
1144 * @fn vega10_init_dpm_state
1145 * @brief Function to initialize all Soft Min/Max and Hard Min/Max to 0xff.
1147 * @param dpm_state - the address of the DPM Table to initiailize.
1150 static void vega10_init_dpm_state(struct vega10_dpm_state
*dpm_state
)
1152 dpm_state
->soft_min_level
= 0xff;
1153 dpm_state
->soft_max_level
= 0xff;
1154 dpm_state
->hard_min_level
= 0xff;
1155 dpm_state
->hard_max_level
= 0xff;
1158 static void vega10_setup_default_single_dpm_table(struct pp_hwmgr
*hwmgr
,
1159 struct vega10_single_dpm_table
*dpm_table
,
1160 struct phm_ppt_v1_clock_voltage_dependency_table
*dep_table
)
1164 dpm_table
->count
= 0;
1166 for (i
= 0; i
< dep_table
->count
; i
++) {
1167 if (i
== 0 || dpm_table
->dpm_levels
[dpm_table
->count
- 1].value
<=
1168 dep_table
->entries
[i
].clk
) {
1169 dpm_table
->dpm_levels
[dpm_table
->count
].value
=
1170 dep_table
->entries
[i
].clk
;
1171 dpm_table
->dpm_levels
[dpm_table
->count
].enabled
= true;
1176 static int vega10_setup_default_pcie_table(struct pp_hwmgr
*hwmgr
)
1178 struct vega10_hwmgr
*data
=
1179 (struct vega10_hwmgr
*)(hwmgr
->backend
);
1180 struct vega10_pcie_table
*pcie_table
= &(data
->dpm_table
.pcie_table
);
1181 struct phm_ppt_v2_information
*table_info
=
1182 (struct phm_ppt_v2_information
*)(hwmgr
->pptable
);
1183 struct phm_ppt_v1_pcie_table
*bios_pcie_table
=
1184 table_info
->pcie_table
;
1187 PP_ASSERT_WITH_CODE(bios_pcie_table
->count
,
1188 "Incorrect number of PCIE States from VBIOS!",
1191 for (i
= 0; i
< NUM_LINK_LEVELS
; i
++) {
1192 if (data
->registry_data
.pcieSpeedOverride
)
1193 pcie_table
->pcie_gen
[i
] =
1194 data
->registry_data
.pcieSpeedOverride
;
1196 pcie_table
->pcie_gen
[i
] =
1197 bios_pcie_table
->entries
[i
].gen_speed
;
1199 if (data
->registry_data
.pcieLaneOverride
)
1200 pcie_table
->pcie_lane
[i
] = (uint8_t)encode_pcie_lane_width(
1201 data
->registry_data
.pcieLaneOverride
);
1203 pcie_table
->pcie_lane
[i
] = (uint8_t)encode_pcie_lane_width(
1204 bios_pcie_table
->entries
[i
].lane_width
);
1205 if (data
->registry_data
.pcieClockOverride
)
1206 pcie_table
->lclk
[i
] =
1207 data
->registry_data
.pcieClockOverride
;
1209 pcie_table
->lclk
[i
] =
1210 bios_pcie_table
->entries
[i
].pcie_sclk
;
1213 pcie_table
->count
= NUM_LINK_LEVELS
;
1219 * This function is to initialize all DPM state tables
1220 * for SMU based on the dependency table.
1221 * Dynamic state patching function will then trim these
1222 * state tables to the allowed range based
1223 * on the power policy or external client requests,
1224 * such as UVD request, etc.
1226 static int vega10_setup_default_dpm_tables(struct pp_hwmgr
*hwmgr
)
1228 struct vega10_hwmgr
*data
=
1229 (struct vega10_hwmgr
*)(hwmgr
->backend
);
1230 struct phm_ppt_v2_information
*table_info
=
1231 (struct phm_ppt_v2_information
*)(hwmgr
->pptable
);
1232 struct vega10_single_dpm_table
*dpm_table
;
1235 struct phm_ppt_v1_clock_voltage_dependency_table
*dep_soc_table
=
1236 table_info
->vdd_dep_on_socclk
;
1237 struct phm_ppt_v1_clock_voltage_dependency_table
*dep_gfx_table
=
1238 table_info
->vdd_dep_on_sclk
;
1239 struct phm_ppt_v1_clock_voltage_dependency_table
*dep_mclk_table
=
1240 table_info
->vdd_dep_on_mclk
;
1241 struct phm_ppt_v1_mm_clock_voltage_dependency_table
*dep_mm_table
=
1242 table_info
->mm_dep_table
;
1243 struct phm_ppt_v1_clock_voltage_dependency_table
*dep_dcef_table
=
1244 table_info
->vdd_dep_on_dcefclk
;
1245 struct phm_ppt_v1_clock_voltage_dependency_table
*dep_pix_table
=
1246 table_info
->vdd_dep_on_pixclk
;
1247 struct phm_ppt_v1_clock_voltage_dependency_table
*dep_disp_table
=
1248 table_info
->vdd_dep_on_dispclk
;
1249 struct phm_ppt_v1_clock_voltage_dependency_table
*dep_phy_table
=
1250 table_info
->vdd_dep_on_phyclk
;
1252 PP_ASSERT_WITH_CODE(dep_soc_table
,
1253 "SOCCLK dependency table is missing. This table is mandatory",
1255 PP_ASSERT_WITH_CODE(dep_soc_table
->count
>= 1,
1256 "SOCCLK dependency table is empty. This table is mandatory",
1259 PP_ASSERT_WITH_CODE(dep_gfx_table
,
1260 "GFXCLK dependency table is missing. This table is mandatory",
1262 PP_ASSERT_WITH_CODE(dep_gfx_table
->count
>= 1,
1263 "GFXCLK dependency table is empty. This table is mandatory",
1266 PP_ASSERT_WITH_CODE(dep_mclk_table
,
1267 "MCLK dependency table is missing. This table is mandatory",
1269 PP_ASSERT_WITH_CODE(dep_mclk_table
->count
>= 1,
1270 "MCLK dependency table has to have is missing. This table is mandatory",
1273 /* Initialize Sclk DPM table based on allow Sclk values */
1274 dpm_table
= &(data
->dpm_table
.soc_table
);
1275 vega10_setup_default_single_dpm_table(hwmgr
,
1279 vega10_init_dpm_state(&(dpm_table
->dpm_state
));
1281 dpm_table
= &(data
->dpm_table
.gfx_table
);
1282 vega10_setup_default_single_dpm_table(hwmgr
,
1285 vega10_init_dpm_state(&(dpm_table
->dpm_state
));
1287 /* Initialize Mclk DPM table based on allow Mclk values */
1288 data
->dpm_table
.mem_table
.count
= 0;
1289 dpm_table
= &(data
->dpm_table
.mem_table
);
1290 vega10_setup_default_single_dpm_table(hwmgr
,
1293 vega10_init_dpm_state(&(dpm_table
->dpm_state
));
1295 data
->dpm_table
.eclk_table
.count
= 0;
1296 dpm_table
= &(data
->dpm_table
.eclk_table
);
1297 for (i
= 0; i
< dep_mm_table
->count
; i
++) {
1298 if (i
== 0 || dpm_table
->dpm_levels
1299 [dpm_table
->count
- 1].value
<=
1300 dep_mm_table
->entries
[i
].eclk
) {
1301 dpm_table
->dpm_levels
[dpm_table
->count
].value
=
1302 dep_mm_table
->entries
[i
].eclk
;
1303 dpm_table
->dpm_levels
[dpm_table
->count
].enabled
=
1304 (i
== 0) ? true : false;
1308 vega10_init_dpm_state(&(dpm_table
->dpm_state
));
1310 data
->dpm_table
.vclk_table
.count
= 0;
1311 data
->dpm_table
.dclk_table
.count
= 0;
1312 dpm_table
= &(data
->dpm_table
.vclk_table
);
1313 for (i
= 0; i
< dep_mm_table
->count
; i
++) {
1314 if (i
== 0 || dpm_table
->dpm_levels
1315 [dpm_table
->count
- 1].value
<=
1316 dep_mm_table
->entries
[i
].vclk
) {
1317 dpm_table
->dpm_levels
[dpm_table
->count
].value
=
1318 dep_mm_table
->entries
[i
].vclk
;
1319 dpm_table
->dpm_levels
[dpm_table
->count
].enabled
=
1320 (i
== 0) ? true : false;
1324 vega10_init_dpm_state(&(dpm_table
->dpm_state
));
1326 dpm_table
= &(data
->dpm_table
.dclk_table
);
1327 for (i
= 0; i
< dep_mm_table
->count
; i
++) {
1328 if (i
== 0 || dpm_table
->dpm_levels
1329 [dpm_table
->count
- 1].value
<=
1330 dep_mm_table
->entries
[i
].dclk
) {
1331 dpm_table
->dpm_levels
[dpm_table
->count
].value
=
1332 dep_mm_table
->entries
[i
].dclk
;
1333 dpm_table
->dpm_levels
[dpm_table
->count
].enabled
=
1334 (i
== 0) ? true : false;
1338 vega10_init_dpm_state(&(dpm_table
->dpm_state
));
1340 /* Assume there is no headless Vega10 for now */
1341 dpm_table
= &(data
->dpm_table
.dcef_table
);
1342 vega10_setup_default_single_dpm_table(hwmgr
,
1346 vega10_init_dpm_state(&(dpm_table
->dpm_state
));
1348 dpm_table
= &(data
->dpm_table
.pixel_table
);
1349 vega10_setup_default_single_dpm_table(hwmgr
,
1353 vega10_init_dpm_state(&(dpm_table
->dpm_state
));
1355 dpm_table
= &(data
->dpm_table
.display_table
);
1356 vega10_setup_default_single_dpm_table(hwmgr
,
1360 vega10_init_dpm_state(&(dpm_table
->dpm_state
));
1362 dpm_table
= &(data
->dpm_table
.phy_table
);
1363 vega10_setup_default_single_dpm_table(hwmgr
,
1367 vega10_init_dpm_state(&(dpm_table
->dpm_state
));
1369 vega10_setup_default_pcie_table(hwmgr
);
1371 /* save a copy of the default DPM table */
1372 memcpy(&(data
->golden_dpm_table
), &(data
->dpm_table
),
1373 sizeof(struct vega10_dpm_table
));
1375 if (PP_CAP(PHM_PlatformCaps_ODNinACSupport
) ||
1376 PP_CAP(PHM_PlatformCaps_ODNinDCSupport
)) {
1377 data
->odn_dpm_table
.odn_core_clock_dpm_levels
.
1378 number_of_performance_levels
= data
->dpm_table
.gfx_table
.count
;
1379 for (i
= 0; i
< data
->dpm_table
.gfx_table
.count
; i
++) {
1380 data
->odn_dpm_table
.odn_core_clock_dpm_levels
.
1381 performance_level_entries
[i
].clock
=
1382 data
->dpm_table
.gfx_table
.dpm_levels
[i
].value
;
1383 data
->odn_dpm_table
.odn_core_clock_dpm_levels
.
1384 performance_level_entries
[i
].enabled
= true;
1387 data
->odn_dpm_table
.vdd_dependency_on_sclk
.count
=
1388 dep_gfx_table
->count
;
1389 for (i
= 0; i
< dep_gfx_table
->count
; i
++) {
1390 data
->odn_dpm_table
.vdd_dependency_on_sclk
.entries
[i
].clk
=
1391 dep_gfx_table
->entries
[i
].clk
;
1392 data
->odn_dpm_table
.vdd_dependency_on_sclk
.entries
[i
].vddInd
=
1393 dep_gfx_table
->entries
[i
].vddInd
;
1394 data
->odn_dpm_table
.vdd_dependency_on_sclk
.entries
[i
].cks_enable
=
1395 dep_gfx_table
->entries
[i
].cks_enable
;
1396 data
->odn_dpm_table
.vdd_dependency_on_sclk
.entries
[i
].cks_voffset
=
1397 dep_gfx_table
->entries
[i
].cks_voffset
;
1400 data
->odn_dpm_table
.odn_memory_clock_dpm_levels
.
1401 number_of_performance_levels
= data
->dpm_table
.mem_table
.count
;
1402 for (i
= 0; i
< data
->dpm_table
.mem_table
.count
; i
++) {
1403 data
->odn_dpm_table
.odn_memory_clock_dpm_levels
.
1404 performance_level_entries
[i
].clock
=
1405 data
->dpm_table
.mem_table
.dpm_levels
[i
].value
;
1406 data
->odn_dpm_table
.odn_memory_clock_dpm_levels
.
1407 performance_level_entries
[i
].enabled
= true;
1410 data
->odn_dpm_table
.vdd_dependency_on_mclk
.count
= dep_mclk_table
->count
;
1411 for (i
= 0; i
< dep_mclk_table
->count
; i
++) {
1412 data
->odn_dpm_table
.vdd_dependency_on_mclk
.entries
[i
].clk
=
1413 dep_mclk_table
->entries
[i
].clk
;
1414 data
->odn_dpm_table
.vdd_dependency_on_mclk
.entries
[i
].vddInd
=
1415 dep_mclk_table
->entries
[i
].vddInd
;
1416 data
->odn_dpm_table
.vdd_dependency_on_mclk
.entries
[i
].vddci
=
1417 dep_mclk_table
->entries
[i
].vddci
;
1425 * @fn vega10_populate_ulv_state
1426 * @brief Function to provide parameters for Utral Low Voltage state to SMC.
1428 * @param hwmgr - the address of the hardware manager.
1431 static int vega10_populate_ulv_state(struct pp_hwmgr
*hwmgr
)
1433 struct vega10_hwmgr
*data
=
1434 (struct vega10_hwmgr
*)(hwmgr
->backend
);
1435 struct phm_ppt_v2_information
*table_info
=
1436 (struct phm_ppt_v2_information
*)(hwmgr
->pptable
);
1438 data
->smc_state_table
.pp_table
.UlvOffsetVid
=
1439 (uint8_t)table_info
->us_ulv_voltage_offset
;
1441 data
->smc_state_table
.pp_table
.UlvSmnclkDid
=
1442 (uint8_t)(table_info
->us_ulv_smnclk_did
);
1443 data
->smc_state_table
.pp_table
.UlvMp1clkDid
=
1444 (uint8_t)(table_info
->us_ulv_mp1clk_did
);
1445 data
->smc_state_table
.pp_table
.UlvGfxclkBypass
=
1446 (uint8_t)(table_info
->us_ulv_gfxclk_bypass
);
1447 data
->smc_state_table
.pp_table
.UlvPhaseSheddingPsi0
=
1448 (uint8_t)(data
->vddc_voltage_table
.psi0_enable
);
1449 data
->smc_state_table
.pp_table
.UlvPhaseSheddingPsi1
=
1450 (uint8_t)(data
->vddc_voltage_table
.psi1_enable
);
1455 static int vega10_populate_single_lclk_level(struct pp_hwmgr
*hwmgr
,
1456 uint32_t lclock
, uint8_t *curr_lclk_did
)
1458 struct pp_atomfwctrl_clock_dividers_soc15 dividers
;
1460 PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(
1462 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK
,
1464 "Failed to get LCLK clock settings from VBIOS!",
1467 *curr_lclk_did
= dividers
.ulDid
;
1472 static int vega10_populate_smc_link_levels(struct pp_hwmgr
*hwmgr
)
1475 struct vega10_hwmgr
*data
=
1476 (struct vega10_hwmgr
*)(hwmgr
->backend
);
1477 PPTable_t
*pp_table
= &(data
->smc_state_table
.pp_table
);
1478 struct vega10_pcie_table
*pcie_table
=
1479 &(data
->dpm_table
.pcie_table
);
1482 for (i
= 0; i
< pcie_table
->count
; i
++) {
1483 pp_table
->PcieGenSpeed
[i
] = pcie_table
->pcie_gen
[i
];
1484 pp_table
->PcieLaneCount
[i
] = pcie_table
->pcie_lane
[i
];
1486 result
= vega10_populate_single_lclk_level(hwmgr
,
1487 pcie_table
->lclk
[i
], &(pp_table
->LclkDid
[i
]));
1489 pr_info("Populate LClock Level %d Failed!\n", i
);
1495 while (i
< NUM_LINK_LEVELS
) {
1496 pp_table
->PcieGenSpeed
[i
] = pcie_table
->pcie_gen
[j
];
1497 pp_table
->PcieLaneCount
[i
] = pcie_table
->pcie_lane
[j
];
1499 result
= vega10_populate_single_lclk_level(hwmgr
,
1500 pcie_table
->lclk
[j
], &(pp_table
->LclkDid
[i
]));
1502 pr_info("Populate LClock Level %d Failed!\n", i
);
1512 * Populates single SMC GFXSCLK structure using the provided engine clock
1514 * @param hwmgr the address of the hardware manager
1515 * @param gfx_clock the GFX clock to use to populate the structure.
1516 * @param current_gfxclk_level location in PPTable for the SMC GFXCLK structure.
1519 static int vega10_populate_single_gfx_level(struct pp_hwmgr
*hwmgr
,
1520 uint32_t gfx_clock
, PllSetting_t
*current_gfxclk_level
,
1523 struct phm_ppt_v2_information
*table_info
=
1524 (struct phm_ppt_v2_information
*)(hwmgr
->pptable
);
1525 struct phm_ppt_v1_clock_voltage_dependency_table
*dep_on_sclk
=
1526 table_info
->vdd_dep_on_sclk
;
1527 struct vega10_hwmgr
*data
=
1528 (struct vega10_hwmgr
*)(hwmgr
->backend
);
1529 struct pp_atomfwctrl_clock_dividers_soc15 dividers
;
1530 uint32_t gfx_max_clock
=
1531 hwmgr
->platform_descriptor
.overdriveLimit
.engineClock
;
1534 if (data
->apply_overdrive_next_settings_mask
&
1535 DPMTABLE_OD_UPDATE_VDDC
)
1536 dep_on_sclk
= (struct phm_ppt_v1_clock_voltage_dependency_table
*)
1537 &(data
->odn_dpm_table
.vdd_dependency_on_sclk
);
1539 PP_ASSERT_WITH_CODE(dep_on_sclk
,
1540 "Invalid SOC_VDD-GFX_CLK Dependency Table!",
1543 if (data
->need_update_dpm_table
& DPMTABLE_OD_UPDATE_SCLK
)
1544 gfx_clock
= gfx_clock
> gfx_max_clock
? gfx_max_clock
: gfx_clock
;
1546 for (i
= 0; i
< dep_on_sclk
->count
; i
++) {
1547 if (dep_on_sclk
->entries
[i
].clk
== gfx_clock
)
1550 PP_ASSERT_WITH_CODE(dep_on_sclk
->count
> i
,
1551 "Cannot find gfx_clk in SOC_VDD-GFX_CLK!",
1555 PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(hwmgr
,
1556 COMPUTE_GPUCLK_INPUT_FLAG_GFXCLK
,
1557 gfx_clock
, ÷rs
),
1558 "Failed to get GFX Clock settings from VBIOS!",
1561 /* Feedback Multiplier: bit 0:8 int, bit 15:12 post_div, bit 31:16 frac */
1562 current_gfxclk_level
->FbMult
=
1563 cpu_to_le32(dividers
.ulPll_fb_mult
);
1564 /* Spread FB Multiplier bit: bit 0:8 int, bit 31:16 frac */
1565 current_gfxclk_level
->SsOn
= dividers
.ucPll_ss_enable
;
1566 current_gfxclk_level
->SsFbMult
=
1567 cpu_to_le32(dividers
.ulPll_ss_fbsmult
);
1568 current_gfxclk_level
->SsSlewFrac
=
1569 cpu_to_le16(dividers
.usPll_ss_slew_frac
);
1570 current_gfxclk_level
->Did
= (uint8_t)(dividers
.ulDid
);
1572 *acg_freq
= gfx_clock
/ 100; /* 100 Khz to Mhz conversion */
1578 * @brief Populates single SMC SOCCLK structure using the provided clock.
1580 * @param hwmgr - the address of the hardware manager.
1581 * @param soc_clock - the SOC clock to use to populate the structure.
1582 * @param current_socclk_level - location in PPTable for the SMC SOCCLK structure.
1583 * @return 0 on success..
1585 static int vega10_populate_single_soc_level(struct pp_hwmgr
*hwmgr
,
1586 uint32_t soc_clock
, uint8_t *current_soc_did
,
1587 uint8_t *current_vol_index
)
1589 struct phm_ppt_v2_information
*table_info
=
1590 (struct phm_ppt_v2_information
*)(hwmgr
->pptable
);
1591 struct phm_ppt_v1_clock_voltage_dependency_table
*dep_on_soc
=
1592 table_info
->vdd_dep_on_socclk
;
1593 struct pp_atomfwctrl_clock_dividers_soc15 dividers
;
1596 PP_ASSERT_WITH_CODE(dep_on_soc
,
1597 "Invalid SOC_VDD-SOC_CLK Dependency Table!",
1599 for (i
= 0; i
< dep_on_soc
->count
; i
++) {
1600 if (dep_on_soc
->entries
[i
].clk
== soc_clock
)
1603 PP_ASSERT_WITH_CODE(dep_on_soc
->count
> i
,
1604 "Cannot find SOC_CLK in SOC_VDD-SOC_CLK Dependency Table",
1606 PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(hwmgr
,
1607 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK
,
1608 soc_clock
, ÷rs
),
1609 "Failed to get SOC Clock settings from VBIOS!",
1612 *current_soc_did
= (uint8_t)dividers
.ulDid
;
1613 *current_vol_index
= (uint8_t)(dep_on_soc
->entries
[i
].vddInd
);
1618 uint16_t vega10_locate_vddc_given_clock(struct pp_hwmgr
*hwmgr
,
1620 struct phm_ppt_v1_clock_voltage_dependency_table
*dep_table
)
1624 for (i
= 0; i
< dep_table
->count
; i
++) {
1625 if (dep_table
->entries
[i
].clk
== clk
)
1626 return dep_table
->entries
[i
].vddc
;
1629 pr_info("[LocateVddcGivenClock] Cannot locate SOC Vddc for this clock!");
1634 * Populates all SMC SCLK levels' structure based on the trimmed allowed dpm engine clock states
1636 * @param hwmgr the address of the hardware manager
1638 static int vega10_populate_all_graphic_levels(struct pp_hwmgr
*hwmgr
)
1640 struct vega10_hwmgr
*data
=
1641 (struct vega10_hwmgr
*)(hwmgr
->backend
);
1642 struct phm_ppt_v2_information
*table_info
=
1643 (struct phm_ppt_v2_information
*)(hwmgr
->pptable
);
1644 struct phm_ppt_v1_clock_voltage_dependency_table
*dep_table
=
1645 table_info
->vdd_dep_on_socclk
;
1646 PPTable_t
*pp_table
= &(data
->smc_state_table
.pp_table
);
1647 struct vega10_single_dpm_table
*dpm_table
= &(data
->dpm_table
.gfx_table
);
1651 for (i
= 0; i
< dpm_table
->count
; i
++) {
1652 result
= vega10_populate_single_gfx_level(hwmgr
,
1653 dpm_table
->dpm_levels
[i
].value
,
1654 &(pp_table
->GfxclkLevel
[i
]),
1655 &(pp_table
->AcgFreqTable
[i
]));
1661 while (i
< NUM_GFXCLK_DPM_LEVELS
) {
1662 result
= vega10_populate_single_gfx_level(hwmgr
,
1663 dpm_table
->dpm_levels
[j
].value
,
1664 &(pp_table
->GfxclkLevel
[i
]),
1665 &(pp_table
->AcgFreqTable
[i
]));
1671 pp_table
->GfxclkSlewRate
=
1672 cpu_to_le16(table_info
->us_gfxclk_slew_rate
);
1674 dpm_table
= &(data
->dpm_table
.soc_table
);
1675 for (i
= 0; i
< dpm_table
->count
; i
++) {
1676 pp_table
->SocVid
[i
] =
1677 (uint8_t)convert_to_vid(
1678 vega10_locate_vddc_given_clock(hwmgr
,
1679 dpm_table
->dpm_levels
[i
].value
,
1681 result
= vega10_populate_single_soc_level(hwmgr
,
1682 dpm_table
->dpm_levels
[i
].value
,
1683 &(pp_table
->SocclkDid
[i
]),
1684 &(pp_table
->SocDpmVoltageIndex
[i
]));
1690 while (i
< NUM_SOCCLK_DPM_LEVELS
) {
1691 pp_table
->SocVid
[i
] = pp_table
->SocVid
[j
];
1692 result
= vega10_populate_single_soc_level(hwmgr
,
1693 dpm_table
->dpm_levels
[j
].value
,
1694 &(pp_table
->SocclkDid
[i
]),
1695 &(pp_table
->SocDpmVoltageIndex
[i
]));
1705 * @brief Populates single SMC GFXCLK structure using the provided clock.
1707 * @param hwmgr - the address of the hardware manager.
1708 * @param mem_clock - the memory clock to use to populate the structure.
1709 * @return 0 on success..
1711 static int vega10_populate_single_memory_level(struct pp_hwmgr
*hwmgr
,
1712 uint32_t mem_clock
, uint8_t *current_mem_vid
,
1713 PllSetting_t
*current_memclk_level
, uint8_t *current_mem_soc_vind
)
1715 struct vega10_hwmgr
*data
=
1716 (struct vega10_hwmgr
*)(hwmgr
->backend
);
1717 struct phm_ppt_v2_information
*table_info
=
1718 (struct phm_ppt_v2_information
*)(hwmgr
->pptable
);
1719 struct phm_ppt_v1_clock_voltage_dependency_table
*dep_on_mclk
=
1720 table_info
->vdd_dep_on_mclk
;
1721 struct pp_atomfwctrl_clock_dividers_soc15 dividers
;
1722 uint32_t mem_max_clock
=
1723 hwmgr
->platform_descriptor
.overdriveLimit
.memoryClock
;
1726 if (data
->apply_overdrive_next_settings_mask
&
1727 DPMTABLE_OD_UPDATE_VDDC
)
1728 dep_on_mclk
= (struct phm_ppt_v1_clock_voltage_dependency_table
*)
1729 &data
->odn_dpm_table
.vdd_dependency_on_mclk
;
1731 PP_ASSERT_WITH_CODE(dep_on_mclk
,
1732 "Invalid SOC_VDD-UCLK Dependency Table!",
1735 if (data
->need_update_dpm_table
& DPMTABLE_OD_UPDATE_MCLK
)
1736 mem_clock
= mem_clock
> mem_max_clock
? mem_max_clock
: mem_clock
;
1738 for (i
= 0; i
< dep_on_mclk
->count
; i
++) {
1739 if (dep_on_mclk
->entries
[i
].clk
== mem_clock
)
1742 PP_ASSERT_WITH_CODE(dep_on_mclk
->count
> i
,
1743 "Cannot find UCLK in SOC_VDD-UCLK Dependency Table!",
1747 PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(
1748 hwmgr
, COMPUTE_GPUCLK_INPUT_FLAG_UCLK
, mem_clock
, ÷rs
),
1749 "Failed to get UCLK settings from VBIOS!",
1753 (uint8_t)(convert_to_vid(dep_on_mclk
->entries
[i
].mvdd
));
1754 *current_mem_soc_vind
=
1755 (uint8_t)(dep_on_mclk
->entries
[i
].vddInd
);
1756 current_memclk_level
->FbMult
= cpu_to_le32(dividers
.ulPll_fb_mult
);
1757 current_memclk_level
->Did
= (uint8_t)(dividers
.ulDid
);
1759 PP_ASSERT_WITH_CODE(current_memclk_level
->Did
>= 1,
1760 "Invalid Divider ID!",
1767 * @brief Populates all SMC MCLK levels' structure based on the trimmed allowed dpm memory clock states.
1769 * @param pHwMgr - the address of the hardware manager.
1770 * @return PP_Result_OK on success.
1772 static int vega10_populate_all_memory_levels(struct pp_hwmgr
*hwmgr
)
1774 struct vega10_hwmgr
*data
=
1775 (struct vega10_hwmgr
*)(hwmgr
->backend
);
1776 PPTable_t
*pp_table
= &(data
->smc_state_table
.pp_table
);
1777 struct vega10_single_dpm_table
*dpm_table
=
1778 &(data
->dpm_table
.mem_table
);
1780 uint32_t i
, j
, reg
, mem_channels
;
1782 for (i
= 0; i
< dpm_table
->count
; i
++) {
1783 result
= vega10_populate_single_memory_level(hwmgr
,
1784 dpm_table
->dpm_levels
[i
].value
,
1785 &(pp_table
->MemVid
[i
]),
1786 &(pp_table
->UclkLevel
[i
]),
1787 &(pp_table
->MemSocVoltageIndex
[i
]));
1793 while (i
< NUM_UCLK_DPM_LEVELS
) {
1794 result
= vega10_populate_single_memory_level(hwmgr
,
1795 dpm_table
->dpm_levels
[j
].value
,
1796 &(pp_table
->MemVid
[i
]),
1797 &(pp_table
->UclkLevel
[i
]),
1798 &(pp_table
->MemSocVoltageIndex
[i
]));
1804 reg
= soc15_get_register_offset(DF_HWID
, 0,
1805 mmDF_CS_AON0_DramBaseAddress0_BASE_IDX
,
1806 mmDF_CS_AON0_DramBaseAddress0
);
1807 mem_channels
= (cgs_read_register(hwmgr
->device
, reg
) &
1808 DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK
) >>
1809 DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT
;
1810 PP_ASSERT_WITH_CODE(mem_channels
< ARRAY_SIZE(channel_number
),
1811 "Mem Channel Index Exceeded maximum!",
1814 pp_table
->NumMemoryChannels
= cpu_to_le16(mem_channels
);
1815 pp_table
->MemoryChannelWidth
=
1816 cpu_to_le16(HBM_MEMORY_CHANNEL_WIDTH
*
1817 channel_number
[mem_channels
]);
1819 pp_table
->LowestUclkReservedForUlv
=
1820 (uint8_t)(data
->lowest_uclk_reserved_for_ulv
);
1825 static int vega10_populate_single_display_type(struct pp_hwmgr
*hwmgr
,
1826 DSPCLK_e disp_clock
)
1828 struct vega10_hwmgr
*data
=
1829 (struct vega10_hwmgr
*)(hwmgr
->backend
);
1830 PPTable_t
*pp_table
= &(data
->smc_state_table
.pp_table
);
1831 struct phm_ppt_v2_information
*table_info
=
1832 (struct phm_ppt_v2_information
*)
1834 struct phm_ppt_v1_clock_voltage_dependency_table
*dep_table
;
1836 uint16_t clk
= 0, vddc
= 0;
1839 switch (disp_clock
) {
1840 case DSPCLK_DCEFCLK
:
1841 dep_table
= table_info
->vdd_dep_on_dcefclk
;
1843 case DSPCLK_DISPCLK
:
1844 dep_table
= table_info
->vdd_dep_on_dispclk
;
1847 dep_table
= table_info
->vdd_dep_on_pixclk
;
1850 dep_table
= table_info
->vdd_dep_on_phyclk
;
1856 PP_ASSERT_WITH_CODE(dep_table
->count
<= NUM_DSPCLK_LEVELS
,
1857 "Number Of Entries Exceeded maximum!",
1860 for (i
= 0; i
< dep_table
->count
; i
++) {
1861 clk
= (uint16_t)(dep_table
->entries
[i
].clk
/ 100);
1862 vddc
= table_info
->vddc_lookup_table
->
1863 entries
[dep_table
->entries
[i
].vddInd
].us_vdd
;
1864 vid
= (uint8_t)convert_to_vid(vddc
);
1865 pp_table
->DisplayClockTable
[disp_clock
][i
].Freq
=
1867 pp_table
->DisplayClockTable
[disp_clock
][i
].Vid
=
1871 while (i
< NUM_DSPCLK_LEVELS
) {
1872 pp_table
->DisplayClockTable
[disp_clock
][i
].Freq
=
1874 pp_table
->DisplayClockTable
[disp_clock
][i
].Vid
=
1882 static int vega10_populate_all_display_clock_levels(struct pp_hwmgr
*hwmgr
)
1886 for (i
= 0; i
< DSPCLK_COUNT
; i
++) {
1887 PP_ASSERT_WITH_CODE(!vega10_populate_single_display_type(hwmgr
, i
),
1888 "Failed to populate Clock in DisplayClockTable!",
1895 static int vega10_populate_single_eclock_level(struct pp_hwmgr
*hwmgr
,
1896 uint32_t eclock
, uint8_t *current_eclk_did
,
1897 uint8_t *current_soc_vol
)
1899 struct phm_ppt_v2_information
*table_info
=
1900 (struct phm_ppt_v2_information
*)(hwmgr
->pptable
);
1901 struct phm_ppt_v1_mm_clock_voltage_dependency_table
*dep_table
=
1902 table_info
->mm_dep_table
;
1903 struct pp_atomfwctrl_clock_dividers_soc15 dividers
;
1906 PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(hwmgr
,
1907 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK
,
1909 "Failed to get ECLK clock settings from VBIOS!",
1912 *current_eclk_did
= (uint8_t)dividers
.ulDid
;
1914 for (i
= 0; i
< dep_table
->count
; i
++) {
1915 if (dep_table
->entries
[i
].eclk
== eclock
)
1916 *current_soc_vol
= dep_table
->entries
[i
].vddcInd
;
1922 static int vega10_populate_smc_vce_levels(struct pp_hwmgr
*hwmgr
)
1924 struct vega10_hwmgr
*data
=
1925 (struct vega10_hwmgr
*)(hwmgr
->backend
);
1926 PPTable_t
*pp_table
= &(data
->smc_state_table
.pp_table
);
1927 struct vega10_single_dpm_table
*dpm_table
= &(data
->dpm_table
.eclk_table
);
1928 int result
= -EINVAL
;
1931 for (i
= 0; i
< dpm_table
->count
; i
++) {
1932 result
= vega10_populate_single_eclock_level(hwmgr
,
1933 dpm_table
->dpm_levels
[i
].value
,
1934 &(pp_table
->EclkDid
[i
]),
1935 &(pp_table
->VceDpmVoltageIndex
[i
]));
1941 while (i
< NUM_VCE_DPM_LEVELS
) {
1942 result
= vega10_populate_single_eclock_level(hwmgr
,
1943 dpm_table
->dpm_levels
[j
].value
,
1944 &(pp_table
->EclkDid
[i
]),
1945 &(pp_table
->VceDpmVoltageIndex
[i
]));
1954 static int vega10_populate_single_vclock_level(struct pp_hwmgr
*hwmgr
,
1955 uint32_t vclock
, uint8_t *current_vclk_did
)
1957 struct pp_atomfwctrl_clock_dividers_soc15 dividers
;
1959 PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(hwmgr
,
1960 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK
,
1962 "Failed to get VCLK clock settings from VBIOS!",
1965 *current_vclk_did
= (uint8_t)dividers
.ulDid
;
1970 static int vega10_populate_single_dclock_level(struct pp_hwmgr
*hwmgr
,
1971 uint32_t dclock
, uint8_t *current_dclk_did
)
1973 struct pp_atomfwctrl_clock_dividers_soc15 dividers
;
1975 PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(hwmgr
,
1976 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK
,
1978 "Failed to get DCLK clock settings from VBIOS!",
1981 *current_dclk_did
= (uint8_t)dividers
.ulDid
;
1986 static int vega10_populate_smc_uvd_levels(struct pp_hwmgr
*hwmgr
)
1988 struct vega10_hwmgr
*data
=
1989 (struct vega10_hwmgr
*)(hwmgr
->backend
);
1990 PPTable_t
*pp_table
= &(data
->smc_state_table
.pp_table
);
1991 struct vega10_single_dpm_table
*vclk_dpm_table
=
1992 &(data
->dpm_table
.vclk_table
);
1993 struct vega10_single_dpm_table
*dclk_dpm_table
=
1994 &(data
->dpm_table
.dclk_table
);
1995 struct phm_ppt_v2_information
*table_info
=
1996 (struct phm_ppt_v2_information
*)(hwmgr
->pptable
);
1997 struct phm_ppt_v1_mm_clock_voltage_dependency_table
*dep_table
=
1998 table_info
->mm_dep_table
;
1999 int result
= -EINVAL
;
2002 for (i
= 0; i
< vclk_dpm_table
->count
; i
++) {
2003 result
= vega10_populate_single_vclock_level(hwmgr
,
2004 vclk_dpm_table
->dpm_levels
[i
].value
,
2005 &(pp_table
->VclkDid
[i
]));
2011 while (i
< NUM_UVD_DPM_LEVELS
) {
2012 result
= vega10_populate_single_vclock_level(hwmgr
,
2013 vclk_dpm_table
->dpm_levels
[j
].value
,
2014 &(pp_table
->VclkDid
[i
]));
2020 for (i
= 0; i
< dclk_dpm_table
->count
; i
++) {
2021 result
= vega10_populate_single_dclock_level(hwmgr
,
2022 dclk_dpm_table
->dpm_levels
[i
].value
,
2023 &(pp_table
->DclkDid
[i
]));
2029 while (i
< NUM_UVD_DPM_LEVELS
) {
2030 result
= vega10_populate_single_dclock_level(hwmgr
,
2031 dclk_dpm_table
->dpm_levels
[j
].value
,
2032 &(pp_table
->DclkDid
[i
]));
2038 for (i
= 0; i
< dep_table
->count
; i
++) {
2039 if (dep_table
->entries
[i
].vclk
==
2040 vclk_dpm_table
->dpm_levels
[i
].value
&&
2041 dep_table
->entries
[i
].dclk
==
2042 dclk_dpm_table
->dpm_levels
[i
].value
)
2043 pp_table
->UvdDpmVoltageIndex
[i
] =
2044 dep_table
->entries
[i
].vddcInd
;
2050 while (i
< NUM_UVD_DPM_LEVELS
) {
2051 pp_table
->UvdDpmVoltageIndex
[i
] = dep_table
->entries
[j
].vddcInd
;
2058 static int vega10_populate_clock_stretcher_table(struct pp_hwmgr
*hwmgr
)
2060 struct vega10_hwmgr
*data
=
2061 (struct vega10_hwmgr
*)(hwmgr
->backend
);
2062 PPTable_t
*pp_table
= &(data
->smc_state_table
.pp_table
);
2063 struct phm_ppt_v2_information
*table_info
=
2064 (struct phm_ppt_v2_information
*)(hwmgr
->pptable
);
2065 struct phm_ppt_v1_clock_voltage_dependency_table
*dep_table
=
2066 table_info
->vdd_dep_on_sclk
;
2069 for (i
= 0; i
< dep_table
->count
; i
++) {
2070 pp_table
->CksEnable
[i
] = dep_table
->entries
[i
].cks_enable
;
2071 pp_table
->CksVidOffset
[i
] = (uint8_t)(dep_table
->entries
[i
].cks_voffset
2072 * VOLTAGE_VID_OFFSET_SCALE2
/ VOLTAGE_VID_OFFSET_SCALE1
);
2078 static int vega10_populate_avfs_parameters(struct pp_hwmgr
*hwmgr
)
2080 struct vega10_hwmgr
*data
=
2081 (struct vega10_hwmgr
*)(hwmgr
->backend
);
2082 PPTable_t
*pp_table
= &(data
->smc_state_table
.pp_table
);
2083 struct phm_ppt_v2_information
*table_info
=
2084 (struct phm_ppt_v2_information
*)(hwmgr
->pptable
);
2085 struct phm_ppt_v1_clock_voltage_dependency_table
*dep_table
=
2086 table_info
->vdd_dep_on_sclk
;
2087 struct pp_atomfwctrl_avfs_parameters avfs_params
= {0};
2091 pp_table
->MinVoltageVid
= (uint8_t)0xff;
2092 pp_table
->MaxVoltageVid
= (uint8_t)0;
2094 if (data
->smu_features
[GNLD_AVFS
].supported
) {
2095 result
= pp_atomfwctrl_get_avfs_information(hwmgr
, &avfs_params
);
2097 pp_table
->MinVoltageVid
= (uint8_t)
2098 convert_to_vid((uint16_t)(avfs_params
.ulMinVddc
));
2099 pp_table
->MaxVoltageVid
= (uint8_t)
2100 convert_to_vid((uint16_t)(avfs_params
.ulMaxVddc
));
2102 pp_table
->AConstant
[0] = cpu_to_le32(avfs_params
.ulMeanNsigmaAcontant0
);
2103 pp_table
->AConstant
[1] = cpu_to_le32(avfs_params
.ulMeanNsigmaAcontant1
);
2104 pp_table
->AConstant
[2] = cpu_to_le32(avfs_params
.ulMeanNsigmaAcontant2
);
2105 pp_table
->DC_tol_sigma
= cpu_to_le16(avfs_params
.usMeanNsigmaDcTolSigma
);
2106 pp_table
->Platform_mean
= cpu_to_le16(avfs_params
.usMeanNsigmaPlatformMean
);
2107 pp_table
->Platform_sigma
= cpu_to_le16(avfs_params
.usMeanNsigmaDcTolSigma
);
2108 pp_table
->PSM_Age_CompFactor
= cpu_to_le16(avfs_params
.usPsmAgeComfactor
);
2110 pp_table
->BtcGbVdroopTableCksOff
.a0
=
2111 cpu_to_le32(avfs_params
.ulGbVdroopTableCksoffA0
);
2112 pp_table
->BtcGbVdroopTableCksOff
.a0_shift
= 20;
2113 pp_table
->BtcGbVdroopTableCksOff
.a1
=
2114 cpu_to_le32(avfs_params
.ulGbVdroopTableCksoffA1
);
2115 pp_table
->BtcGbVdroopTableCksOff
.a1_shift
= 20;
2116 pp_table
->BtcGbVdroopTableCksOff
.a2
=
2117 cpu_to_le32(avfs_params
.ulGbVdroopTableCksoffA2
);
2118 pp_table
->BtcGbVdroopTableCksOff
.a2_shift
= 20;
2120 pp_table
->OverrideBtcGbCksOn
= avfs_params
.ucEnableGbVdroopTableCkson
;
2121 pp_table
->BtcGbVdroopTableCksOn
.a0
=
2122 cpu_to_le32(avfs_params
.ulGbVdroopTableCksonA0
);
2123 pp_table
->BtcGbVdroopTableCksOn
.a0_shift
= 20;
2124 pp_table
->BtcGbVdroopTableCksOn
.a1
=
2125 cpu_to_le32(avfs_params
.ulGbVdroopTableCksonA1
);
2126 pp_table
->BtcGbVdroopTableCksOn
.a1_shift
= 20;
2127 pp_table
->BtcGbVdroopTableCksOn
.a2
=
2128 cpu_to_le32(avfs_params
.ulGbVdroopTableCksonA2
);
2129 pp_table
->BtcGbVdroopTableCksOn
.a2_shift
= 20;
2131 pp_table
->AvfsGbCksOn
.m1
=
2132 cpu_to_le32(avfs_params
.ulGbFuseTableCksonM1
);
2133 pp_table
->AvfsGbCksOn
.m2
=
2134 cpu_to_le32(avfs_params
.ulGbFuseTableCksonM2
);
2135 pp_table
->AvfsGbCksOn
.b
=
2136 cpu_to_le32(avfs_params
.ulGbFuseTableCksonB
);
2137 pp_table
->AvfsGbCksOn
.m1_shift
= 24;
2138 pp_table
->AvfsGbCksOn
.m2_shift
= 12;
2139 pp_table
->AvfsGbCksOn
.b_shift
= 0;
2141 pp_table
->OverrideAvfsGbCksOn
=
2142 avfs_params
.ucEnableGbFuseTableCkson
;
2143 pp_table
->AvfsGbCksOff
.m1
=
2144 cpu_to_le32(avfs_params
.ulGbFuseTableCksoffM1
);
2145 pp_table
->AvfsGbCksOff
.m2
=
2146 cpu_to_le32(avfs_params
.ulGbFuseTableCksoffM2
);
2147 pp_table
->AvfsGbCksOff
.b
=
2148 cpu_to_le32(avfs_params
.ulGbFuseTableCksoffB
);
2149 pp_table
->AvfsGbCksOff
.m1_shift
= 24;
2150 pp_table
->AvfsGbCksOff
.m2_shift
= 12;
2151 pp_table
->AvfsGbCksOff
.b_shift
= 0;
2153 for (i
= 0; i
< dep_table
->count
; i
++)
2154 pp_table
->StaticVoltageOffsetVid
[i
] =
2155 convert_to_vid((uint8_t)(dep_table
->entries
[i
].sclk_offset
));
2157 if ((PPREGKEY_VEGA10QUADRATICEQUATION_DFLT
!=
2158 data
->disp_clk_quad_eqn_a
) &&
2159 (PPREGKEY_VEGA10QUADRATICEQUATION_DFLT
!=
2160 data
->disp_clk_quad_eqn_b
)) {
2161 pp_table
->DisplayClock2Gfxclk
[DSPCLK_DISPCLK
].m1
=
2162 (int32_t)data
->disp_clk_quad_eqn_a
;
2163 pp_table
->DisplayClock2Gfxclk
[DSPCLK_DISPCLK
].m2
=
2164 (int32_t)data
->disp_clk_quad_eqn_b
;
2165 pp_table
->DisplayClock2Gfxclk
[DSPCLK_DISPCLK
].b
=
2166 (int32_t)data
->disp_clk_quad_eqn_c
;
2168 pp_table
->DisplayClock2Gfxclk
[DSPCLK_DISPCLK
].m1
=
2169 (int32_t)avfs_params
.ulDispclk2GfxclkM1
;
2170 pp_table
->DisplayClock2Gfxclk
[DSPCLK_DISPCLK
].m2
=
2171 (int32_t)avfs_params
.ulDispclk2GfxclkM2
;
2172 pp_table
->DisplayClock2Gfxclk
[DSPCLK_DISPCLK
].b
=
2173 (int32_t)avfs_params
.ulDispclk2GfxclkB
;
2176 pp_table
->DisplayClock2Gfxclk
[DSPCLK_DISPCLK
].m1_shift
= 24;
2177 pp_table
->DisplayClock2Gfxclk
[DSPCLK_DISPCLK
].m2_shift
= 12;
2178 pp_table
->DisplayClock2Gfxclk
[DSPCLK_DISPCLK
].b_shift
= 12;
2180 if ((PPREGKEY_VEGA10QUADRATICEQUATION_DFLT
!=
2181 data
->dcef_clk_quad_eqn_a
) &&
2182 (PPREGKEY_VEGA10QUADRATICEQUATION_DFLT
!=
2183 data
->dcef_clk_quad_eqn_b
)) {
2184 pp_table
->DisplayClock2Gfxclk
[DSPCLK_DCEFCLK
].m1
=
2185 (int32_t)data
->dcef_clk_quad_eqn_a
;
2186 pp_table
->DisplayClock2Gfxclk
[DSPCLK_DCEFCLK
].m2
=
2187 (int32_t)data
->dcef_clk_quad_eqn_b
;
2188 pp_table
->DisplayClock2Gfxclk
[DSPCLK_DCEFCLK
].b
=
2189 (int32_t)data
->dcef_clk_quad_eqn_c
;
2191 pp_table
->DisplayClock2Gfxclk
[DSPCLK_DCEFCLK
].m1
=
2192 (int32_t)avfs_params
.ulDcefclk2GfxclkM1
;
2193 pp_table
->DisplayClock2Gfxclk
[DSPCLK_DCEFCLK
].m2
=
2194 (int32_t)avfs_params
.ulDcefclk2GfxclkM2
;
2195 pp_table
->DisplayClock2Gfxclk
[DSPCLK_DCEFCLK
].b
=
2196 (int32_t)avfs_params
.ulDcefclk2GfxclkB
;
2199 pp_table
->DisplayClock2Gfxclk
[DSPCLK_DCEFCLK
].m1_shift
= 24;
2200 pp_table
->DisplayClock2Gfxclk
[DSPCLK_DCEFCLK
].m2_shift
= 12;
2201 pp_table
->DisplayClock2Gfxclk
[DSPCLK_DCEFCLK
].b_shift
= 12;
2203 if ((PPREGKEY_VEGA10QUADRATICEQUATION_DFLT
!=
2204 data
->pixel_clk_quad_eqn_a
) &&
2205 (PPREGKEY_VEGA10QUADRATICEQUATION_DFLT
!=
2206 data
->pixel_clk_quad_eqn_b
)) {
2207 pp_table
->DisplayClock2Gfxclk
[DSPCLK_PIXCLK
].m1
=
2208 (int32_t)data
->pixel_clk_quad_eqn_a
;
2209 pp_table
->DisplayClock2Gfxclk
[DSPCLK_PIXCLK
].m2
=
2210 (int32_t)data
->pixel_clk_quad_eqn_b
;
2211 pp_table
->DisplayClock2Gfxclk
[DSPCLK_PIXCLK
].b
=
2212 (int32_t)data
->pixel_clk_quad_eqn_c
;
2214 pp_table
->DisplayClock2Gfxclk
[DSPCLK_PIXCLK
].m1
=
2215 (int32_t)avfs_params
.ulPixelclk2GfxclkM1
;
2216 pp_table
->DisplayClock2Gfxclk
[DSPCLK_PIXCLK
].m2
=
2217 (int32_t)avfs_params
.ulPixelclk2GfxclkM2
;
2218 pp_table
->DisplayClock2Gfxclk
[DSPCLK_PIXCLK
].b
=
2219 (int32_t)avfs_params
.ulPixelclk2GfxclkB
;
2222 pp_table
->DisplayClock2Gfxclk
[DSPCLK_PIXCLK
].m1_shift
= 24;
2223 pp_table
->DisplayClock2Gfxclk
[DSPCLK_PIXCLK
].m2_shift
= 12;
2224 pp_table
->DisplayClock2Gfxclk
[DSPCLK_PIXCLK
].b_shift
= 12;
2225 if ((PPREGKEY_VEGA10QUADRATICEQUATION_DFLT
!=
2226 data
->phy_clk_quad_eqn_a
) &&
2227 (PPREGKEY_VEGA10QUADRATICEQUATION_DFLT
!=
2228 data
->phy_clk_quad_eqn_b
)) {
2229 pp_table
->DisplayClock2Gfxclk
[DSPCLK_PHYCLK
].m1
=
2230 (int32_t)data
->phy_clk_quad_eqn_a
;
2231 pp_table
->DisplayClock2Gfxclk
[DSPCLK_PHYCLK
].m2
=
2232 (int32_t)data
->phy_clk_quad_eqn_b
;
2233 pp_table
->DisplayClock2Gfxclk
[DSPCLK_PHYCLK
].b
=
2234 (int32_t)data
->phy_clk_quad_eqn_c
;
2236 pp_table
->DisplayClock2Gfxclk
[DSPCLK_PHYCLK
].m1
=
2237 (int32_t)avfs_params
.ulPhyclk2GfxclkM1
;
2238 pp_table
->DisplayClock2Gfxclk
[DSPCLK_PHYCLK
].m2
=
2239 (int32_t)avfs_params
.ulPhyclk2GfxclkM2
;
2240 pp_table
->DisplayClock2Gfxclk
[DSPCLK_PHYCLK
].b
=
2241 (int32_t)avfs_params
.ulPhyclk2GfxclkB
;
2244 pp_table
->DisplayClock2Gfxclk
[DSPCLK_PHYCLK
].m1_shift
= 24;
2245 pp_table
->DisplayClock2Gfxclk
[DSPCLK_PHYCLK
].m2_shift
= 12;
2246 pp_table
->DisplayClock2Gfxclk
[DSPCLK_PHYCLK
].b_shift
= 12;
2248 pp_table
->AcgBtcGbVdroopTable
.a0
= avfs_params
.ulAcgGbVdroopTableA0
;
2249 pp_table
->AcgBtcGbVdroopTable
.a0_shift
= 20;
2250 pp_table
->AcgBtcGbVdroopTable
.a1
= avfs_params
.ulAcgGbVdroopTableA1
;
2251 pp_table
->AcgBtcGbVdroopTable
.a1_shift
= 20;
2252 pp_table
->AcgBtcGbVdroopTable
.a2
= avfs_params
.ulAcgGbVdroopTableA2
;
2253 pp_table
->AcgBtcGbVdroopTable
.a2_shift
= 20;
2255 pp_table
->AcgAvfsGb
.m1
= avfs_params
.ulAcgGbFuseTableM1
;
2256 pp_table
->AcgAvfsGb
.m2
= avfs_params
.ulAcgGbFuseTableM2
;
2257 pp_table
->AcgAvfsGb
.b
= avfs_params
.ulAcgGbFuseTableB
;
2258 pp_table
->AcgAvfsGb
.m1_shift
= 0;
2259 pp_table
->AcgAvfsGb
.m2_shift
= 0;
2260 pp_table
->AcgAvfsGb
.b_shift
= 0;
2263 data
->smu_features
[GNLD_AVFS
].supported
= false;
2270 static int vega10_acg_enable(struct pp_hwmgr
*hwmgr
)
2272 struct vega10_hwmgr
*data
=
2273 (struct vega10_hwmgr
*)(hwmgr
->backend
);
2274 uint32_t agc_btc_response
;
2276 if (data
->smu_features
[GNLD_ACG
].supported
) {
2277 if (0 == vega10_enable_smc_features(hwmgr
, true,
2278 data
->smu_features
[GNLD_DPM_PREFETCHER
].smu_feature_bitmap
))
2279 data
->smu_features
[GNLD_DPM_PREFETCHER
].enabled
= true;
2281 smum_send_msg_to_smc(hwmgr
, PPSMC_MSG_InitializeAcg
);
2283 smum_send_msg_to_smc(hwmgr
, PPSMC_MSG_RunAcgBtc
);
2284 vega10_read_arg_from_smc(hwmgr
, &agc_btc_response
);
2286 if (1 == agc_btc_response
) {
2287 if (1 == data
->acg_loop_state
)
2288 smum_send_msg_to_smc(hwmgr
, PPSMC_MSG_RunAcgInClosedLoop
);
2289 else if (2 == data
->acg_loop_state
)
2290 smum_send_msg_to_smc(hwmgr
, PPSMC_MSG_RunAcgInOpenLoop
);
2291 if (0 == vega10_enable_smc_features(hwmgr
, true,
2292 data
->smu_features
[GNLD_ACG
].smu_feature_bitmap
))
2293 data
->smu_features
[GNLD_ACG
].enabled
= true;
2295 pr_info("[ACG_Enable] ACG BTC Returned Failed Status!\n");
2296 data
->smu_features
[GNLD_ACG
].enabled
= false;
2303 static int vega10_acg_disable(struct pp_hwmgr
*hwmgr
)
2305 struct vega10_hwmgr
*data
=
2306 (struct vega10_hwmgr
*)(hwmgr
->backend
);
2308 if (data
->smu_features
[GNLD_ACG
].supported
&&
2309 data
->smu_features
[GNLD_ACG
].enabled
)
2310 if (!vega10_enable_smc_features(hwmgr
, false,
2311 data
->smu_features
[GNLD_ACG
].smu_feature_bitmap
))
2312 data
->smu_features
[GNLD_ACG
].enabled
= false;
2317 static int vega10_populate_gpio_parameters(struct pp_hwmgr
*hwmgr
)
2319 struct vega10_hwmgr
*data
=
2320 (struct vega10_hwmgr
*)(hwmgr
->backend
);
2321 PPTable_t
*pp_table
= &(data
->smc_state_table
.pp_table
);
2322 struct pp_atomfwctrl_gpio_parameters gpio_params
= {0};
2325 result
= pp_atomfwctrl_get_gpio_information(hwmgr
, &gpio_params
);
2327 if (PP_CAP(PHM_PlatformCaps_RegulatorHot
) &&
2328 data
->registry_data
.regulator_hot_gpio_support
) {
2329 pp_table
->VR0HotGpio
= gpio_params
.ucVR0HotGpio
;
2330 pp_table
->VR0HotPolarity
= gpio_params
.ucVR0HotPolarity
;
2331 pp_table
->VR1HotGpio
= gpio_params
.ucVR1HotGpio
;
2332 pp_table
->VR1HotPolarity
= gpio_params
.ucVR1HotPolarity
;
2334 pp_table
->VR0HotGpio
= 0;
2335 pp_table
->VR0HotPolarity
= 0;
2336 pp_table
->VR1HotGpio
= 0;
2337 pp_table
->VR1HotPolarity
= 0;
2340 if (PP_CAP(PHM_PlatformCaps_AutomaticDCTransition
) &&
2341 data
->registry_data
.ac_dc_switch_gpio_support
) {
2342 pp_table
->AcDcGpio
= gpio_params
.ucAcDcGpio
;
2343 pp_table
->AcDcPolarity
= gpio_params
.ucAcDcPolarity
;
2345 pp_table
->AcDcGpio
= 0;
2346 pp_table
->AcDcPolarity
= 0;
2353 static int vega10_avfs_enable(struct pp_hwmgr
*hwmgr
, bool enable
)
2355 struct vega10_hwmgr
*data
=
2356 (struct vega10_hwmgr
*)(hwmgr
->backend
);
2358 if (data
->smu_features
[GNLD_AVFS
].supported
) {
2360 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr
,
2362 data
->smu_features
[GNLD_AVFS
].smu_feature_bitmap
),
2363 "[avfs_control] Attempt to Enable AVFS feature Failed!",
2365 data
->smu_features
[GNLD_AVFS
].enabled
= true;
2367 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr
,
2369 data
->smu_features
[GNLD_AVFS
].smu_feature_bitmap
),
2370 "[avfs_control] Attempt to Disable AVFS feature Failed!",
2372 data
->smu_features
[GNLD_AVFS
].enabled
= false;
2379 static int vega10_populate_and_upload_avfs_fuse_override(struct pp_hwmgr
*hwmgr
)
2383 uint64_t serial_number
= 0;
2384 uint32_t top32
, bottom32
;
2385 struct phm_fuses_default fuse
;
2387 struct vega10_hwmgr
*data
= (struct vega10_hwmgr
*)(hwmgr
->backend
);
2388 AvfsFuseOverride_t
*avfs_fuse_table
= &(data
->smc_state_table
.avfs_fuse_override_table
);
2390 smum_send_msg_to_smc(hwmgr
, PPSMC_MSG_ReadSerialNumTop32
);
2391 vega10_read_arg_from_smc(hwmgr
, &top32
);
2393 smum_send_msg_to_smc(hwmgr
, PPSMC_MSG_ReadSerialNumBottom32
);
2394 vega10_read_arg_from_smc(hwmgr
, &bottom32
);
2396 serial_number
= ((uint64_t)bottom32
<< 32) | top32
;
2398 if (pp_override_get_default_fuse_value(serial_number
, &fuse
) == 0) {
2399 avfs_fuse_table
->VFT0_b
= fuse
.VFT0_b
;
2400 avfs_fuse_table
->VFT0_m1
= fuse
.VFT0_m1
;
2401 avfs_fuse_table
->VFT0_m2
= fuse
.VFT0_m2
;
2402 avfs_fuse_table
->VFT1_b
= fuse
.VFT1_b
;
2403 avfs_fuse_table
->VFT1_m1
= fuse
.VFT1_m1
;
2404 avfs_fuse_table
->VFT1_m2
= fuse
.VFT1_m2
;
2405 avfs_fuse_table
->VFT2_b
= fuse
.VFT2_b
;
2406 avfs_fuse_table
->VFT2_m1
= fuse
.VFT2_m1
;
2407 avfs_fuse_table
->VFT2_m2
= fuse
.VFT2_m2
;
2408 result
= vega10_copy_table_to_smc(hwmgr
,
2409 (uint8_t *)avfs_fuse_table
, AVFSFUSETABLE
);
2410 PP_ASSERT_WITH_CODE(!result
,
2411 "Failed to upload FuseOVerride!",
2418 static int vega10_save_default_power_profile(struct pp_hwmgr
*hwmgr
)
2420 struct vega10_hwmgr
*data
= (struct vega10_hwmgr
*)(hwmgr
->backend
);
2421 struct vega10_single_dpm_table
*dpm_table
= &(data
->dpm_table
.gfx_table
);
2424 hwmgr
->default_gfx_power_profile
.type
= AMD_PP_GFX_PROFILE
;
2425 hwmgr
->default_compute_power_profile
.type
= AMD_PP_COMPUTE_PROFILE
;
2427 /* Optimize compute power profile: Use only highest
2428 * 2 power levels (if more than 2 are available)
2430 if (dpm_table
->count
> 2)
2431 min_level
= dpm_table
->count
- 2;
2432 else if (dpm_table
->count
== 2)
2437 hwmgr
->default_compute_power_profile
.min_sclk
=
2438 dpm_table
->dpm_levels
[min_level
].value
;
2440 hwmgr
->gfx_power_profile
= hwmgr
->default_gfx_power_profile
;
2441 hwmgr
->compute_power_profile
= hwmgr
->default_compute_power_profile
;
2447 * Initializes the SMC table and uploads it
2449 * @param hwmgr the address of the powerplay hardware manager.
2450 * @param pInput the pointer to input data (PowerState)
2453 static int vega10_init_smc_table(struct pp_hwmgr
*hwmgr
)
2456 struct vega10_hwmgr
*data
=
2457 (struct vega10_hwmgr
*)(hwmgr
->backend
);
2458 struct phm_ppt_v2_information
*table_info
=
2459 (struct phm_ppt_v2_information
*)(hwmgr
->pptable
);
2460 PPTable_t
*pp_table
= &(data
->smc_state_table
.pp_table
);
2461 struct pp_atomfwctrl_voltage_table voltage_table
;
2462 struct pp_atomfwctrl_bios_boot_up_values boot_up_values
;
2464 result
= vega10_setup_default_dpm_tables(hwmgr
);
2465 PP_ASSERT_WITH_CODE(!result
,
2466 "Failed to setup default DPM tables!",
2469 pp_atomfwctrl_get_voltage_table_v4(hwmgr
, VOLTAGE_TYPE_VDDC
,
2470 VOLTAGE_OBJ_SVID2
, &voltage_table
);
2471 pp_table
->MaxVidStep
= voltage_table
.max_vid_step
;
2473 pp_table
->GfxDpmVoltageMode
=
2474 (uint8_t)(table_info
->uc_gfx_dpm_voltage_mode
);
2475 pp_table
->SocDpmVoltageMode
=
2476 (uint8_t)(table_info
->uc_soc_dpm_voltage_mode
);
2477 pp_table
->UclkDpmVoltageMode
=
2478 (uint8_t)(table_info
->uc_uclk_dpm_voltage_mode
);
2479 pp_table
->UvdDpmVoltageMode
=
2480 (uint8_t)(table_info
->uc_uvd_dpm_voltage_mode
);
2481 pp_table
->VceDpmVoltageMode
=
2482 (uint8_t)(table_info
->uc_vce_dpm_voltage_mode
);
2483 pp_table
->Mp0DpmVoltageMode
=
2484 (uint8_t)(table_info
->uc_mp0_dpm_voltage_mode
);
2486 pp_table
->DisplayDpmVoltageMode
=
2487 (uint8_t)(table_info
->uc_dcef_dpm_voltage_mode
);
2489 data
->vddc_voltage_table
.psi0_enable
= voltage_table
.psi0_enable
;
2490 data
->vddc_voltage_table
.psi1_enable
= voltage_table
.psi1_enable
;
2492 if (data
->registry_data
.ulv_support
&&
2493 table_info
->us_ulv_voltage_offset
) {
2494 result
= vega10_populate_ulv_state(hwmgr
);
2495 PP_ASSERT_WITH_CODE(!result
,
2496 "Failed to initialize ULV state!",
2500 result
= vega10_populate_smc_link_levels(hwmgr
);
2501 PP_ASSERT_WITH_CODE(!result
,
2502 "Failed to initialize Link Level!",
2505 result
= vega10_populate_all_graphic_levels(hwmgr
);
2506 PP_ASSERT_WITH_CODE(!result
,
2507 "Failed to initialize Graphics Level!",
2510 result
= vega10_populate_all_memory_levels(hwmgr
);
2511 PP_ASSERT_WITH_CODE(!result
,
2512 "Failed to initialize Memory Level!",
2515 result
= vega10_populate_all_display_clock_levels(hwmgr
);
2516 PP_ASSERT_WITH_CODE(!result
,
2517 "Failed to initialize Display Level!",
2520 result
= vega10_populate_smc_vce_levels(hwmgr
);
2521 PP_ASSERT_WITH_CODE(!result
,
2522 "Failed to initialize VCE Level!",
2525 result
= vega10_populate_smc_uvd_levels(hwmgr
);
2526 PP_ASSERT_WITH_CODE(!result
,
2527 "Failed to initialize UVD Level!",
2530 if (data
->registry_data
.clock_stretcher_support
) {
2531 result
= vega10_populate_clock_stretcher_table(hwmgr
);
2532 PP_ASSERT_WITH_CODE(!result
,
2533 "Failed to populate Clock Stretcher Table!",
2537 result
= pp_atomfwctrl_get_vbios_bootup_values(hwmgr
, &boot_up_values
);
2539 data
->vbios_boot_state
.vddc
= boot_up_values
.usVddc
;
2540 data
->vbios_boot_state
.vddci
= boot_up_values
.usVddci
;
2541 data
->vbios_boot_state
.mvddc
= boot_up_values
.usMvddc
;
2542 data
->vbios_boot_state
.gfx_clock
= boot_up_values
.ulGfxClk
;
2543 data
->vbios_boot_state
.mem_clock
= boot_up_values
.ulUClk
;
2544 data
->vbios_boot_state
.soc_clock
= boot_up_values
.ulSocClk
;
2545 data
->vbios_boot_state
.dcef_clock
= boot_up_values
.ulDCEFClk
;
2546 if (0 != boot_up_values
.usVddc
) {
2547 smum_send_msg_to_smc_with_parameter(hwmgr
,
2548 PPSMC_MSG_SetFloorSocVoltage
,
2549 (boot_up_values
.usVddc
* 4));
2550 data
->vbios_boot_state
.bsoc_vddc_lock
= true;
2552 data
->vbios_boot_state
.bsoc_vddc_lock
= false;
2554 smum_send_msg_to_smc_with_parameter(hwmgr
,
2555 PPSMC_MSG_SetMinDeepSleepDcefclk
,
2556 (uint32_t)(data
->vbios_boot_state
.dcef_clock
/ 100));
2559 result
= vega10_populate_avfs_parameters(hwmgr
);
2560 PP_ASSERT_WITH_CODE(!result
,
2561 "Failed to initialize AVFS Parameters!",
2564 result
= vega10_populate_gpio_parameters(hwmgr
);
2565 PP_ASSERT_WITH_CODE(!result
,
2566 "Failed to initialize GPIO Parameters!",
2569 pp_table
->GfxclkAverageAlpha
= (uint8_t)
2570 (data
->gfxclk_average_alpha
);
2571 pp_table
->SocclkAverageAlpha
= (uint8_t)
2572 (data
->socclk_average_alpha
);
2573 pp_table
->UclkAverageAlpha
= (uint8_t)
2574 (data
->uclk_average_alpha
);
2575 pp_table
->GfxActivityAverageAlpha
= (uint8_t)
2576 (data
->gfx_activity_average_alpha
);
2578 vega10_populate_and_upload_avfs_fuse_override(hwmgr
);
2580 result
= vega10_copy_table_to_smc(hwmgr
,
2581 (uint8_t *)pp_table
, PPTABLE
);
2582 PP_ASSERT_WITH_CODE(!result
,
2583 "Failed to upload PPtable!", return result
);
2585 result
= vega10_avfs_enable(hwmgr
, true);
2586 PP_ASSERT_WITH_CODE(!result
, "Attempt to enable AVFS feature Failed!",
2588 vega10_acg_enable(hwmgr
);
2589 vega10_save_default_power_profile(hwmgr
);
2594 static int vega10_enable_thermal_protection(struct pp_hwmgr
*hwmgr
)
2596 struct vega10_hwmgr
*data
= (struct vega10_hwmgr
*)(hwmgr
->backend
);
2598 if (data
->smu_features
[GNLD_THERMAL
].supported
) {
2599 if (data
->smu_features
[GNLD_THERMAL
].enabled
)
2600 pr_info("THERMAL Feature Already enabled!");
2602 PP_ASSERT_WITH_CODE(
2603 !vega10_enable_smc_features(hwmgr
,
2605 data
->smu_features
[GNLD_THERMAL
].smu_feature_bitmap
),
2606 "Enable THERMAL Feature Failed!",
2608 data
->smu_features
[GNLD_THERMAL
].enabled
= true;
2614 static int vega10_disable_thermal_protection(struct pp_hwmgr
*hwmgr
)
2616 struct vega10_hwmgr
*data
= (struct vega10_hwmgr
*)(hwmgr
->backend
);
2618 if (data
->smu_features
[GNLD_THERMAL
].supported
) {
2619 if (!data
->smu_features
[GNLD_THERMAL
].enabled
)
2620 pr_info("THERMAL Feature Already disabled!");
2622 PP_ASSERT_WITH_CODE(
2623 !vega10_enable_smc_features(hwmgr
,
2625 data
->smu_features
[GNLD_THERMAL
].smu_feature_bitmap
),
2626 "disable THERMAL Feature Failed!",
2628 data
->smu_features
[GNLD_THERMAL
].enabled
= false;
2634 static int vega10_enable_vrhot_feature(struct pp_hwmgr
*hwmgr
)
2636 struct vega10_hwmgr
*data
=
2637 (struct vega10_hwmgr
*)(hwmgr
->backend
);
2639 if (PP_CAP(PHM_PlatformCaps_RegulatorHot
)) {
2640 if (data
->smu_features
[GNLD_VR0HOT
].supported
) {
2641 PP_ASSERT_WITH_CODE(
2642 !vega10_enable_smc_features(hwmgr
,
2644 data
->smu_features
[GNLD_VR0HOT
].smu_feature_bitmap
),
2645 "Attempt to Enable VR0 Hot feature Failed!",
2647 data
->smu_features
[GNLD_VR0HOT
].enabled
= true;
2649 if (data
->smu_features
[GNLD_VR1HOT
].supported
) {
2650 PP_ASSERT_WITH_CODE(
2651 !vega10_enable_smc_features(hwmgr
,
2653 data
->smu_features
[GNLD_VR1HOT
].smu_feature_bitmap
),
2654 "Attempt to Enable VR0 Hot feature Failed!",
2656 data
->smu_features
[GNLD_VR1HOT
].enabled
= true;
2663 static int vega10_enable_ulv(struct pp_hwmgr
*hwmgr
)
2665 struct vega10_hwmgr
*data
=
2666 (struct vega10_hwmgr
*)(hwmgr
->backend
);
2668 if (data
->registry_data
.ulv_support
) {
2669 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr
,
2670 true, data
->smu_features
[GNLD_ULV
].smu_feature_bitmap
),
2671 "Enable ULV Feature Failed!",
2673 data
->smu_features
[GNLD_ULV
].enabled
= true;
2679 static int vega10_disable_ulv(struct pp_hwmgr
*hwmgr
)
2681 struct vega10_hwmgr
*data
=
2682 (struct vega10_hwmgr
*)(hwmgr
->backend
);
2684 if (data
->registry_data
.ulv_support
) {
2685 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr
,
2686 false, data
->smu_features
[GNLD_ULV
].smu_feature_bitmap
),
2687 "disable ULV Feature Failed!",
2689 data
->smu_features
[GNLD_ULV
].enabled
= false;
2695 static int vega10_enable_deep_sleep_master_switch(struct pp_hwmgr
*hwmgr
)
2697 struct vega10_hwmgr
*data
=
2698 (struct vega10_hwmgr
*)(hwmgr
->backend
);
2700 if (data
->smu_features
[GNLD_DS_GFXCLK
].supported
) {
2701 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr
,
2702 true, data
->smu_features
[GNLD_DS_GFXCLK
].smu_feature_bitmap
),
2703 "Attempt to Enable DS_GFXCLK Feature Failed!",
2705 data
->smu_features
[GNLD_DS_GFXCLK
].enabled
= true;
2708 if (data
->smu_features
[GNLD_DS_SOCCLK
].supported
) {
2709 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr
,
2710 true, data
->smu_features
[GNLD_DS_SOCCLK
].smu_feature_bitmap
),
2711 "Attempt to Enable DS_SOCCLK Feature Failed!",
2713 data
->smu_features
[GNLD_DS_SOCCLK
].enabled
= true;
2716 if (data
->smu_features
[GNLD_DS_LCLK
].supported
) {
2717 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr
,
2718 true, data
->smu_features
[GNLD_DS_LCLK
].smu_feature_bitmap
),
2719 "Attempt to Enable DS_LCLK Feature Failed!",
2721 data
->smu_features
[GNLD_DS_LCLK
].enabled
= true;
2724 if (data
->smu_features
[GNLD_DS_DCEFCLK
].supported
) {
2725 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr
,
2726 true, data
->smu_features
[GNLD_DS_DCEFCLK
].smu_feature_bitmap
),
2727 "Attempt to Enable DS_DCEFCLK Feature Failed!",
2729 data
->smu_features
[GNLD_DS_DCEFCLK
].enabled
= true;
2735 static int vega10_disable_deep_sleep_master_switch(struct pp_hwmgr
*hwmgr
)
2737 struct vega10_hwmgr
*data
=
2738 (struct vega10_hwmgr
*)(hwmgr
->backend
);
2740 if (data
->smu_features
[GNLD_DS_GFXCLK
].supported
) {
2741 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr
,
2742 false, data
->smu_features
[GNLD_DS_GFXCLK
].smu_feature_bitmap
),
2743 "Attempt to disable DS_GFXCLK Feature Failed!",
2745 data
->smu_features
[GNLD_DS_GFXCLK
].enabled
= false;
2748 if (data
->smu_features
[GNLD_DS_SOCCLK
].supported
) {
2749 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr
,
2750 false, data
->smu_features
[GNLD_DS_SOCCLK
].smu_feature_bitmap
),
2751 "Attempt to disable DS_ Feature Failed!",
2753 data
->smu_features
[GNLD_DS_SOCCLK
].enabled
= false;
2756 if (data
->smu_features
[GNLD_DS_LCLK
].supported
) {
2757 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr
,
2758 false, data
->smu_features
[GNLD_DS_LCLK
].smu_feature_bitmap
),
2759 "Attempt to disable DS_LCLK Feature Failed!",
2761 data
->smu_features
[GNLD_DS_LCLK
].enabled
= false;
2764 if (data
->smu_features
[GNLD_DS_DCEFCLK
].supported
) {
2765 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr
,
2766 false, data
->smu_features
[GNLD_DS_DCEFCLK
].smu_feature_bitmap
),
2767 "Attempt to disable DS_DCEFCLK Feature Failed!",
2769 data
->smu_features
[GNLD_DS_DCEFCLK
].enabled
= false;
2775 static int vega10_stop_dpm(struct pp_hwmgr
*hwmgr
, uint32_t bitmap
)
2777 struct vega10_hwmgr
*data
=
2778 (struct vega10_hwmgr
*)(hwmgr
->backend
);
2779 uint32_t i
, feature_mask
= 0;
2782 if(data
->smu_features
[GNLD_LED_DISPLAY
].supported
== true){
2783 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr
,
2784 false, data
->smu_features
[GNLD_LED_DISPLAY
].smu_feature_bitmap
),
2785 "Attempt to disable LED DPM feature failed!", return -EINVAL
);
2786 data
->smu_features
[GNLD_LED_DISPLAY
].enabled
= false;
2789 for (i
= 0; i
< GNLD_DPM_MAX
; i
++) {
2790 if (data
->smu_features
[i
].smu_feature_bitmap
& bitmap
) {
2791 if (data
->smu_features
[i
].supported
) {
2792 if (data
->smu_features
[i
].enabled
) {
2793 feature_mask
|= data
->smu_features
[i
].
2795 data
->smu_features
[i
].enabled
= false;
2801 vega10_enable_smc_features(hwmgr
, false, feature_mask
);
2807 * @brief Tell SMC to enabled the supported DPMs.
2809 * @param hwmgr - the address of the powerplay hardware manager.
2810 * @Param bitmap - bitmap for the features to enabled.
2811 * @return 0 on at least one DPM is successfully enabled.
2813 static int vega10_start_dpm(struct pp_hwmgr
*hwmgr
, uint32_t bitmap
)
2815 struct vega10_hwmgr
*data
=
2816 (struct vega10_hwmgr
*)(hwmgr
->backend
);
2817 uint32_t i
, feature_mask
= 0;
2819 for (i
= 0; i
< GNLD_DPM_MAX
; i
++) {
2820 if (data
->smu_features
[i
].smu_feature_bitmap
& bitmap
) {
2821 if (data
->smu_features
[i
].supported
) {
2822 if (!data
->smu_features
[i
].enabled
) {
2823 feature_mask
|= data
->smu_features
[i
].
2825 data
->smu_features
[i
].enabled
= true;
2831 if (vega10_enable_smc_features(hwmgr
,
2832 true, feature_mask
)) {
2833 for (i
= 0; i
< GNLD_DPM_MAX
; i
++) {
2834 if (data
->smu_features
[i
].smu_feature_bitmap
&
2836 data
->smu_features
[i
].enabled
= false;
2840 if(data
->smu_features
[GNLD_LED_DISPLAY
].supported
== true){
2841 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr
,
2842 true, data
->smu_features
[GNLD_LED_DISPLAY
].smu_feature_bitmap
),
2843 "Attempt to Enable LED DPM feature Failed!", return -EINVAL
);
2844 data
->smu_features
[GNLD_LED_DISPLAY
].enabled
= true;
2847 if (data
->vbios_boot_state
.bsoc_vddc_lock
) {
2848 smum_send_msg_to_smc_with_parameter(hwmgr
,
2849 PPSMC_MSG_SetFloorSocVoltage
, 0);
2850 data
->vbios_boot_state
.bsoc_vddc_lock
= false;
2853 if (PP_CAP(PHM_PlatformCaps_Falcon_QuickTransition
)) {
2854 if (data
->smu_features
[GNLD_ACDC
].supported
) {
2855 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr
,
2856 true, data
->smu_features
[GNLD_ACDC
].smu_feature_bitmap
),
2857 "Attempt to Enable DS_GFXCLK Feature Failed!",
2859 data
->smu_features
[GNLD_ACDC
].enabled
= true;
2866 static int vega10_enable_dpm_tasks(struct pp_hwmgr
*hwmgr
)
2868 struct vega10_hwmgr
*data
=
2869 (struct vega10_hwmgr
*)(hwmgr
->backend
);
2870 int tmp_result
, result
= 0;
2872 tmp_result
= smum_send_msg_to_smc_with_parameter(hwmgr
,
2873 PPSMC_MSG_ConfigureTelemetry
, data
->config_telemetry
);
2874 PP_ASSERT_WITH_CODE(!tmp_result
,
2875 "Failed to configure telemetry!",
2878 smum_send_msg_to_smc_with_parameter(hwmgr
,
2879 PPSMC_MSG_NumOfDisplays
, 0);
2881 tmp_result
= (!vega10_is_dpm_running(hwmgr
)) ? 0 : -1;
2882 PP_ASSERT_WITH_CODE(!tmp_result
,
2883 "DPM is already running right , skipping re-enablement!",
2886 if ((data
->smu_version
== 0x001c2c00) ||
2887 (data
->smu_version
== 0x001c2d00)) {
2888 tmp_result
= smum_send_msg_to_smc_with_parameter(hwmgr
,
2889 PPSMC_MSG_UpdatePkgPwrPidAlpha
, 1);
2890 PP_ASSERT_WITH_CODE(!tmp_result
,
2891 "Failed to set package power PID!",
2895 tmp_result
= vega10_construct_voltage_tables(hwmgr
);
2896 PP_ASSERT_WITH_CODE(!tmp_result
,
2897 "Failed to contruct voltage tables!",
2898 result
= tmp_result
);
2900 tmp_result
= vega10_init_smc_table(hwmgr
);
2901 PP_ASSERT_WITH_CODE(!tmp_result
,
2902 "Failed to initialize SMC table!",
2903 result
= tmp_result
);
2905 if (PP_CAP(PHM_PlatformCaps_ThermalController
)) {
2906 tmp_result
= vega10_enable_thermal_protection(hwmgr
);
2907 PP_ASSERT_WITH_CODE(!tmp_result
,
2908 "Failed to enable thermal protection!",
2909 result
= tmp_result
);
2912 tmp_result
= vega10_enable_vrhot_feature(hwmgr
);
2913 PP_ASSERT_WITH_CODE(!tmp_result
,
2914 "Failed to enable VR hot feature!",
2915 result
= tmp_result
);
2917 tmp_result
= vega10_enable_deep_sleep_master_switch(hwmgr
);
2918 PP_ASSERT_WITH_CODE(!tmp_result
,
2919 "Failed to enable deep sleep master switch!",
2920 result
= tmp_result
);
2922 tmp_result
= vega10_start_dpm(hwmgr
, SMC_DPM_FEATURES
);
2923 PP_ASSERT_WITH_CODE(!tmp_result
,
2924 "Failed to start DPM!", result
= tmp_result
);
2926 /* enable didt, do not abort if failed didt */
2927 tmp_result
= vega10_enable_didt_config(hwmgr
);
2928 PP_ASSERT(!tmp_result
,
2929 "Failed to enable didt config!");
2931 tmp_result
= vega10_enable_power_containment(hwmgr
);
2932 PP_ASSERT_WITH_CODE(!tmp_result
,
2933 "Failed to enable power containment!",
2934 result
= tmp_result
);
2936 tmp_result
= vega10_power_control_set_level(hwmgr
);
2937 PP_ASSERT_WITH_CODE(!tmp_result
,
2938 "Failed to power control set level!",
2939 result
= tmp_result
);
2941 tmp_result
= vega10_enable_ulv(hwmgr
);
2942 PP_ASSERT_WITH_CODE(!tmp_result
,
2943 "Failed to enable ULV!",
2944 result
= tmp_result
);
2949 static int vega10_get_power_state_size(struct pp_hwmgr
*hwmgr
)
2951 return sizeof(struct vega10_power_state
);
2954 static int vega10_get_pp_table_entry_callback_func(struct pp_hwmgr
*hwmgr
,
2955 void *state
, struct pp_power_state
*power_state
,
2956 void *pp_table
, uint32_t classification_flag
)
2958 ATOM_Vega10_GFXCLK_Dependency_Record_V2
*patom_record_V2
;
2959 struct vega10_power_state
*vega10_power_state
=
2960 cast_phw_vega10_power_state(&(power_state
->hardware
));
2961 struct vega10_performance_level
*performance_level
;
2962 ATOM_Vega10_State
*state_entry
= (ATOM_Vega10_State
*)state
;
2963 ATOM_Vega10_POWERPLAYTABLE
*powerplay_table
=
2964 (ATOM_Vega10_POWERPLAYTABLE
*)pp_table
;
2965 ATOM_Vega10_SOCCLK_Dependency_Table
*socclk_dep_table
=
2966 (ATOM_Vega10_SOCCLK_Dependency_Table
*)
2967 (((unsigned long)powerplay_table
) +
2968 le16_to_cpu(powerplay_table
->usSocclkDependencyTableOffset
));
2969 ATOM_Vega10_GFXCLK_Dependency_Table
*gfxclk_dep_table
=
2970 (ATOM_Vega10_GFXCLK_Dependency_Table
*)
2971 (((unsigned long)powerplay_table
) +
2972 le16_to_cpu(powerplay_table
->usGfxclkDependencyTableOffset
));
2973 ATOM_Vega10_MCLK_Dependency_Table
*mclk_dep_table
=
2974 (ATOM_Vega10_MCLK_Dependency_Table
*)
2975 (((unsigned long)powerplay_table
) +
2976 le16_to_cpu(powerplay_table
->usMclkDependencyTableOffset
));
2979 /* The following fields are not initialized here:
2980 * id orderedList allStatesList
2982 power_state
->classification
.ui_label
=
2983 (le16_to_cpu(state_entry
->usClassification
) &
2984 ATOM_PPLIB_CLASSIFICATION_UI_MASK
) >>
2985 ATOM_PPLIB_CLASSIFICATION_UI_SHIFT
;
2986 power_state
->classification
.flags
= classification_flag
;
2987 /* NOTE: There is a classification2 flag in BIOS
2988 * that is not being used right now
2990 power_state
->classification
.temporary_state
= false;
2991 power_state
->classification
.to_be_deleted
= false;
2993 power_state
->validation
.disallowOnDC
=
2994 ((le32_to_cpu(state_entry
->ulCapsAndSettings
) &
2995 ATOM_Vega10_DISALLOW_ON_DC
) != 0);
2997 power_state
->display
.disableFrameModulation
= false;
2998 power_state
->display
.limitRefreshrate
= false;
2999 power_state
->display
.enableVariBright
=
3000 ((le32_to_cpu(state_entry
->ulCapsAndSettings
) &
3001 ATOM_Vega10_ENABLE_VARIBRIGHT
) != 0);
3003 power_state
->validation
.supportedPowerLevels
= 0;
3004 power_state
->uvd_clocks
.VCLK
= 0;
3005 power_state
->uvd_clocks
.DCLK
= 0;
3006 power_state
->temperatures
.min
= 0;
3007 power_state
->temperatures
.max
= 0;
3009 performance_level
= &(vega10_power_state
->performance_levels
3010 [vega10_power_state
->performance_level_count
++]);
3012 PP_ASSERT_WITH_CODE(
3013 (vega10_power_state
->performance_level_count
<
3014 NUM_GFXCLK_DPM_LEVELS
),
3015 "Performance levels exceeds SMC limit!",
3018 PP_ASSERT_WITH_CODE(
3019 (vega10_power_state
->performance_level_count
<=
3020 hwmgr
->platform_descriptor
.
3021 hardwareActivityPerformanceLevels
),
3022 "Performance levels exceeds Driver limit!",
3025 /* Performance levels are arranged from low to high. */
3026 performance_level
->soc_clock
= socclk_dep_table
->entries
3027 [state_entry
->ucSocClockIndexLow
].ulClk
;
3028 performance_level
->gfx_clock
= gfxclk_dep_table
->entries
3029 [state_entry
->ucGfxClockIndexLow
].ulClk
;
3030 performance_level
->mem_clock
= mclk_dep_table
->entries
3031 [state_entry
->ucMemClockIndexLow
].ulMemClk
;
3033 performance_level
= &(vega10_power_state
->performance_levels
3034 [vega10_power_state
->performance_level_count
++]);
3035 performance_level
->soc_clock
= socclk_dep_table
->entries
3036 [state_entry
->ucSocClockIndexHigh
].ulClk
;
3037 if (gfxclk_dep_table
->ucRevId
== 0) {
3038 performance_level
->gfx_clock
= gfxclk_dep_table
->entries
3039 [state_entry
->ucGfxClockIndexHigh
].ulClk
;
3040 } else if (gfxclk_dep_table
->ucRevId
== 1) {
3041 patom_record_V2
= (ATOM_Vega10_GFXCLK_Dependency_Record_V2
*)gfxclk_dep_table
->entries
;
3042 performance_level
->gfx_clock
= patom_record_V2
[state_entry
->ucGfxClockIndexHigh
].ulClk
;
3045 performance_level
->mem_clock
= mclk_dep_table
->entries
3046 [state_entry
->ucMemClockIndexHigh
].ulMemClk
;
3050 static int vega10_get_pp_table_entry(struct pp_hwmgr
*hwmgr
,
3051 unsigned long entry_index
, struct pp_power_state
*state
)
3054 struct vega10_power_state
*ps
;
3056 state
->hardware
.magic
= PhwVega10_Magic
;
3058 ps
= cast_phw_vega10_power_state(&state
->hardware
);
3060 result
= vega10_get_powerplay_table_entry(hwmgr
, entry_index
, state
,
3061 vega10_get_pp_table_entry_callback_func
);
3064 * This is the earliest time we have all the dependency table
3065 * and the VBIOS boot state
3067 /* set DC compatible flag if this state supports DC */
3068 if (!state
->validation
.disallowOnDC
)
3069 ps
->dc_compatible
= true;
3071 ps
->uvd_clks
.vclk
= state
->uvd_clocks
.VCLK
;
3072 ps
->uvd_clks
.dclk
= state
->uvd_clocks
.DCLK
;
3077 static int vega10_patch_boot_state(struct pp_hwmgr
*hwmgr
,
3078 struct pp_hw_power_state
*hw_ps
)
3083 static int vega10_apply_state_adjust_rules(struct pp_hwmgr
*hwmgr
,
3084 struct pp_power_state
*request_ps
,
3085 const struct pp_power_state
*current_ps
)
3087 struct vega10_power_state
*vega10_ps
=
3088 cast_phw_vega10_power_state(&request_ps
->hardware
);
3091 struct PP_Clocks minimum_clocks
= {0};
3092 bool disable_mclk_switching
;
3093 bool disable_mclk_switching_for_frame_lock
;
3094 bool disable_mclk_switching_for_vr
;
3095 bool force_mclk_high
;
3096 struct cgs_display_info info
= {0};
3097 const struct phm_clock_and_voltage_limits
*max_limits
;
3099 struct vega10_hwmgr
*data
= (struct vega10_hwmgr
*)(hwmgr
->backend
);
3100 struct phm_ppt_v2_information
*table_info
=
3101 (struct phm_ppt_v2_information
*)(hwmgr
->pptable
);
3103 uint32_t stable_pstate_sclk_dpm_percentage
;
3104 uint32_t stable_pstate_sclk
= 0, stable_pstate_mclk
= 0;
3107 data
->battery_state
= (PP_StateUILabel_Battery
==
3108 request_ps
->classification
.ui_label
);
3110 if (vega10_ps
->performance_level_count
!= 2)
3111 pr_info("VI should always have 2 performance levels");
3113 max_limits
= (PP_PowerSource_AC
== hwmgr
->power_source
) ?
3114 &(hwmgr
->dyn_state
.max_clock_voltage_on_ac
) :
3115 &(hwmgr
->dyn_state
.max_clock_voltage_on_dc
);
3117 /* Cap clock DPM tables at DC MAX if it is in DC. */
3118 if (PP_PowerSource_DC
== hwmgr
->power_source
) {
3119 for (i
= 0; i
< vega10_ps
->performance_level_count
; i
++) {
3120 if (vega10_ps
->performance_levels
[i
].mem_clock
>
3122 vega10_ps
->performance_levels
[i
].mem_clock
=
3124 if (vega10_ps
->performance_levels
[i
].gfx_clock
>
3126 vega10_ps
->performance_levels
[i
].gfx_clock
=
3131 vega10_ps
->vce_clks
.evclk
= hwmgr
->vce_arbiter
.evclk
;
3132 vega10_ps
->vce_clks
.ecclk
= hwmgr
->vce_arbiter
.ecclk
;
3134 cgs_get_active_displays_info(hwmgr
->device
, &info
);
3136 /* result = PHM_CheckVBlankTime(hwmgr, &vblankTooShort);*/
3137 minimum_clocks
.engineClock
= hwmgr
->display_config
.min_core_set_clock
;
3138 minimum_clocks
.memoryClock
= hwmgr
->display_config
.min_mem_set_clock
;
3140 if (PP_CAP(PHM_PlatformCaps_StablePState
)) {
3141 stable_pstate_sclk_dpm_percentage
=
3142 data
->registry_data
.stable_pstate_sclk_dpm_percentage
;
3143 PP_ASSERT_WITH_CODE(
3144 data
->registry_data
.stable_pstate_sclk_dpm_percentage
>= 1 &&
3145 data
->registry_data
.stable_pstate_sclk_dpm_percentage
<= 100,
3146 "percent sclk value must range from 1% to 100%, setting default value",
3147 stable_pstate_sclk_dpm_percentage
= 75);
3149 max_limits
= &(hwmgr
->dyn_state
.max_clock_voltage_on_ac
);
3150 stable_pstate_sclk
= (max_limits
->sclk
*
3151 stable_pstate_sclk_dpm_percentage
) / 100;
3153 for (count
= table_info
->vdd_dep_on_sclk
->count
- 1;
3154 count
>= 0; count
--) {
3155 if (stable_pstate_sclk
>=
3156 table_info
->vdd_dep_on_sclk
->entries
[count
].clk
) {
3157 stable_pstate_sclk
=
3158 table_info
->vdd_dep_on_sclk
->entries
[count
].clk
;
3164 stable_pstate_sclk
= table_info
->vdd_dep_on_sclk
->entries
[0].clk
;
3166 stable_pstate_mclk
= max_limits
->mclk
;
3168 minimum_clocks
.engineClock
= stable_pstate_sclk
;
3169 minimum_clocks
.memoryClock
= stable_pstate_mclk
;
3172 if (minimum_clocks
.engineClock
< hwmgr
->gfx_arbiter
.sclk
)
3173 minimum_clocks
.engineClock
= hwmgr
->gfx_arbiter
.sclk
;
3175 if (minimum_clocks
.memoryClock
< hwmgr
->gfx_arbiter
.mclk
)
3176 minimum_clocks
.memoryClock
= hwmgr
->gfx_arbiter
.mclk
;
3178 vega10_ps
->sclk_threshold
= hwmgr
->gfx_arbiter
.sclk_threshold
;
3180 if (hwmgr
->gfx_arbiter
.sclk_over_drive
) {
3181 PP_ASSERT_WITH_CODE((hwmgr
->gfx_arbiter
.sclk_over_drive
<=
3182 hwmgr
->platform_descriptor
.overdriveLimit
.engineClock
),
3183 "Overdrive sclk exceeds limit",
3184 hwmgr
->gfx_arbiter
.sclk_over_drive
=
3185 hwmgr
->platform_descriptor
.overdriveLimit
.engineClock
);
3187 if (hwmgr
->gfx_arbiter
.sclk_over_drive
>= hwmgr
->gfx_arbiter
.sclk
)
3188 vega10_ps
->performance_levels
[1].gfx_clock
=
3189 hwmgr
->gfx_arbiter
.sclk_over_drive
;
3192 if (hwmgr
->gfx_arbiter
.mclk_over_drive
) {
3193 PP_ASSERT_WITH_CODE((hwmgr
->gfx_arbiter
.mclk_over_drive
<=
3194 hwmgr
->platform_descriptor
.overdriveLimit
.memoryClock
),
3195 "Overdrive mclk exceeds limit",
3196 hwmgr
->gfx_arbiter
.mclk_over_drive
=
3197 hwmgr
->platform_descriptor
.overdriveLimit
.memoryClock
);
3199 if (hwmgr
->gfx_arbiter
.mclk_over_drive
>= hwmgr
->gfx_arbiter
.mclk
)
3200 vega10_ps
->performance_levels
[1].mem_clock
=
3201 hwmgr
->gfx_arbiter
.mclk_over_drive
;
3204 disable_mclk_switching_for_frame_lock
= phm_cap_enabled(
3205 hwmgr
->platform_descriptor
.platformCaps
,
3206 PHM_PlatformCaps_DisableMclkSwitchingForFrameLock
);
3207 disable_mclk_switching_for_vr
= PP_CAP(PHM_PlatformCaps_DisableMclkSwitchForVR
);
3208 force_mclk_high
= PP_CAP(PHM_PlatformCaps_ForceMclkHigh
);
3210 disable_mclk_switching
= (info
.display_count
> 1) ||
3211 disable_mclk_switching_for_frame_lock
||
3212 disable_mclk_switching_for_vr
||
3215 sclk
= vega10_ps
->performance_levels
[0].gfx_clock
;
3216 mclk
= vega10_ps
->performance_levels
[0].mem_clock
;
3218 if (sclk
< minimum_clocks
.engineClock
)
3219 sclk
= (minimum_clocks
.engineClock
> max_limits
->sclk
) ?
3220 max_limits
->sclk
: minimum_clocks
.engineClock
;
3222 if (mclk
< minimum_clocks
.memoryClock
)
3223 mclk
= (minimum_clocks
.memoryClock
> max_limits
->mclk
) ?
3224 max_limits
->mclk
: minimum_clocks
.memoryClock
;
3226 vega10_ps
->performance_levels
[0].gfx_clock
= sclk
;
3227 vega10_ps
->performance_levels
[0].mem_clock
= mclk
;
3229 if (vega10_ps
->performance_levels
[1].gfx_clock
<
3230 vega10_ps
->performance_levels
[0].gfx_clock
)
3231 vega10_ps
->performance_levels
[0].gfx_clock
=
3232 vega10_ps
->performance_levels
[1].gfx_clock
;
3234 if (disable_mclk_switching
) {
3235 /* Set Mclk the max of level 0 and level 1 */
3236 if (mclk
< vega10_ps
->performance_levels
[1].mem_clock
)
3237 mclk
= vega10_ps
->performance_levels
[1].mem_clock
;
3239 /* Find the lowest MCLK frequency that is within
3240 * the tolerable latency defined in DAL
3243 for (i
= 0; i
< data
->mclk_latency_table
.count
; i
++) {
3244 if ((data
->mclk_latency_table
.entries
[i
].latency
<= latency
) &&
3245 (data
->mclk_latency_table
.entries
[i
].frequency
>=
3246 vega10_ps
->performance_levels
[0].mem_clock
) &&
3247 (data
->mclk_latency_table
.entries
[i
].frequency
<=
3248 vega10_ps
->performance_levels
[1].mem_clock
))
3249 mclk
= data
->mclk_latency_table
.entries
[i
].frequency
;
3251 vega10_ps
->performance_levels
[0].mem_clock
= mclk
;
3253 if (vega10_ps
->performance_levels
[1].mem_clock
<
3254 vega10_ps
->performance_levels
[0].mem_clock
)
3255 vega10_ps
->performance_levels
[0].mem_clock
=
3256 vega10_ps
->performance_levels
[1].mem_clock
;
3259 if (PP_CAP(PHM_PlatformCaps_StablePState
)) {
3260 for (i
= 0; i
< vega10_ps
->performance_level_count
; i
++) {
3261 vega10_ps
->performance_levels
[i
].gfx_clock
= stable_pstate_sclk
;
3262 vega10_ps
->performance_levels
[i
].mem_clock
= stable_pstate_mclk
;
3269 static int vega10_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr
*hwmgr
, const void *input
)
3271 const struct phm_set_power_state_input
*states
=
3272 (const struct phm_set_power_state_input
*)input
;
3273 const struct vega10_power_state
*vega10_ps
=
3274 cast_const_phw_vega10_power_state(states
->pnew_state
);
3275 struct vega10_hwmgr
*data
=
3276 (struct vega10_hwmgr
*)(hwmgr
->backend
);
3277 struct vega10_single_dpm_table
*sclk_table
=
3278 &(data
->dpm_table
.gfx_table
);
3279 uint32_t sclk
= vega10_ps
->performance_levels
3280 [vega10_ps
->performance_level_count
- 1].gfx_clock
;
3281 struct vega10_single_dpm_table
*mclk_table
=
3282 &(data
->dpm_table
.mem_table
);
3283 uint32_t mclk
= vega10_ps
->performance_levels
3284 [vega10_ps
->performance_level_count
- 1].mem_clock
;
3285 struct PP_Clocks min_clocks
= {0};
3287 struct cgs_display_info info
= {0};
3289 data
->need_update_dpm_table
= 0;
3291 if (PP_CAP(PHM_PlatformCaps_ODNinACSupport
) ||
3292 PP_CAP(PHM_PlatformCaps_ODNinDCSupport
)) {
3293 for (i
= 0; i
< sclk_table
->count
; i
++) {
3294 if (sclk
== sclk_table
->dpm_levels
[i
].value
)
3298 if (!(data
->apply_overdrive_next_settings_mask
&
3299 DPMTABLE_OD_UPDATE_SCLK
) && i
>= sclk_table
->count
) {
3300 /* Check SCLK in DAL's minimum clocks
3301 * in case DeepSleep divider update is required.
3303 if (data
->display_timing
.min_clock_in_sr
!=
3304 min_clocks
.engineClockInSR
&&
3305 (min_clocks
.engineClockInSR
>=
3306 VEGA10_MINIMUM_ENGINE_CLOCK
||
3307 data
->display_timing
.min_clock_in_sr
>=
3308 VEGA10_MINIMUM_ENGINE_CLOCK
))
3309 data
->need_update_dpm_table
|= DPMTABLE_UPDATE_SCLK
;
3312 cgs_get_active_displays_info(hwmgr
->device
, &info
);
3314 if (data
->display_timing
.num_existing_displays
!=
3316 data
->need_update_dpm_table
|= DPMTABLE_UPDATE_MCLK
;
3318 for (i
= 0; i
< sclk_table
->count
; i
++) {
3319 if (sclk
== sclk_table
->dpm_levels
[i
].value
)
3323 if (i
>= sclk_table
->count
)
3324 data
->need_update_dpm_table
|= DPMTABLE_OD_UPDATE_SCLK
;
3326 /* Check SCLK in DAL's minimum clocks
3327 * in case DeepSleep divider update is required.
3329 if (data
->display_timing
.min_clock_in_sr
!=
3330 min_clocks
.engineClockInSR
&&
3331 (min_clocks
.engineClockInSR
>=
3332 VEGA10_MINIMUM_ENGINE_CLOCK
||
3333 data
->display_timing
.min_clock_in_sr
>=
3334 VEGA10_MINIMUM_ENGINE_CLOCK
))
3335 data
->need_update_dpm_table
|= DPMTABLE_UPDATE_SCLK
;
3338 for (i
= 0; i
< mclk_table
->count
; i
++) {
3339 if (mclk
== mclk_table
->dpm_levels
[i
].value
)
3343 cgs_get_active_displays_info(hwmgr
->device
, &info
);
3345 if (i
>= mclk_table
->count
)
3346 data
->need_update_dpm_table
|= DPMTABLE_OD_UPDATE_MCLK
;
3348 if (data
->display_timing
.num_existing_displays
!=
3349 info
.display_count
||
3350 i
>= mclk_table
->count
)
3351 data
->need_update_dpm_table
|= DPMTABLE_UPDATE_MCLK
;
3356 static int vega10_populate_and_upload_sclk_mclk_dpm_levels(
3357 struct pp_hwmgr
*hwmgr
, const void *input
)
3360 const struct phm_set_power_state_input
*states
=
3361 (const struct phm_set_power_state_input
*)input
;
3362 const struct vega10_power_state
*vega10_ps
=
3363 cast_const_phw_vega10_power_state(states
->pnew_state
);
3364 struct vega10_hwmgr
*data
=
3365 (struct vega10_hwmgr
*)(hwmgr
->backend
);
3366 uint32_t sclk
= vega10_ps
->performance_levels
3367 [vega10_ps
->performance_level_count
- 1].gfx_clock
;
3368 uint32_t mclk
= vega10_ps
->performance_levels
3369 [vega10_ps
->performance_level_count
- 1].mem_clock
;
3370 struct vega10_dpm_table
*dpm_table
= &data
->dpm_table
;
3371 struct vega10_dpm_table
*golden_dpm_table
=
3372 &data
->golden_dpm_table
;
3373 uint32_t dpm_count
, clock_percent
;
3376 if (PP_CAP(PHM_PlatformCaps_ODNinACSupport
) ||
3377 PP_CAP(PHM_PlatformCaps_ODNinDCSupport
)) {
3379 if (!data
->need_update_dpm_table
&&
3380 !data
->apply_optimized_settings
&&
3381 !data
->apply_overdrive_next_settings_mask
)
3384 if (data
->apply_overdrive_next_settings_mask
&
3385 DPMTABLE_OD_UPDATE_SCLK
) {
3387 dpm_count
< dpm_table
->gfx_table
.count
;
3389 dpm_table
->gfx_table
.dpm_levels
[dpm_count
].enabled
=
3390 data
->odn_dpm_table
.odn_core_clock_dpm_levels
.
3391 performance_level_entries
[dpm_count
].enabled
;
3392 dpm_table
->gfx_table
.dpm_levels
[dpm_count
].value
=
3393 data
->odn_dpm_table
.odn_core_clock_dpm_levels
.
3394 performance_level_entries
[dpm_count
].clock
;
3398 if (data
->apply_overdrive_next_settings_mask
&
3399 DPMTABLE_OD_UPDATE_MCLK
) {
3401 dpm_count
< dpm_table
->mem_table
.count
;
3403 dpm_table
->mem_table
.dpm_levels
[dpm_count
].enabled
=
3404 data
->odn_dpm_table
.odn_memory_clock_dpm_levels
.
3405 performance_level_entries
[dpm_count
].enabled
;
3406 dpm_table
->mem_table
.dpm_levels
[dpm_count
].value
=
3407 data
->odn_dpm_table
.odn_memory_clock_dpm_levels
.
3408 performance_level_entries
[dpm_count
].clock
;
3412 if ((data
->need_update_dpm_table
& DPMTABLE_UPDATE_SCLK
) ||
3413 data
->apply_optimized_settings
||
3414 (data
->apply_overdrive_next_settings_mask
&
3415 DPMTABLE_OD_UPDATE_SCLK
)) {
3416 result
= vega10_populate_all_graphic_levels(hwmgr
);
3417 PP_ASSERT_WITH_CODE(!result
,
3418 "Failed to populate SCLK during \
3419 PopulateNewDPMClocksStates Function!",
3423 if ((data
->need_update_dpm_table
& DPMTABLE_UPDATE_MCLK
) ||
3424 (data
->apply_overdrive_next_settings_mask
&
3425 DPMTABLE_OD_UPDATE_MCLK
)){
3426 result
= vega10_populate_all_memory_levels(hwmgr
);
3427 PP_ASSERT_WITH_CODE(!result
,
3428 "Failed to populate MCLK during \
3429 PopulateNewDPMClocksStates Function!",
3433 if (!data
->need_update_dpm_table
&&
3434 !data
->apply_optimized_settings
)
3437 if (data
->need_update_dpm_table
& DPMTABLE_OD_UPDATE_SCLK
&&
3438 data
->smu_features
[GNLD_DPM_GFXCLK
].supported
) {
3440 gfx_table
.dpm_levels
[dpm_table
->gfx_table
.count
- 1].
3442 if (PP_CAP(PHM_PlatformCaps_OD6PlusinACSupport
) ||
3443 PP_CAP(PHM_PlatformCaps_OD6PlusinDCSupport
)) {
3444 /* Need to do calculation based on the golden DPM table
3445 * as the Heatmap GPU Clock axis is also based on
3446 * the default values
3448 PP_ASSERT_WITH_CODE(
3449 golden_dpm_table
->gfx_table
.dpm_levels
3450 [golden_dpm_table
->gfx_table
.count
- 1].value
,
3454 dpm_count
= dpm_table
->gfx_table
.count
< 2 ?
3455 0 : dpm_table
->gfx_table
.count
- 2;
3456 for (i
= dpm_count
; i
> 1; i
--) {
3457 if (sclk
> golden_dpm_table
->gfx_table
.dpm_levels
3458 [golden_dpm_table
->gfx_table
.count
- 1].value
) {
3460 ((sclk
- golden_dpm_table
->gfx_table
.dpm_levels
3461 [golden_dpm_table
->gfx_table
.count
- 1].value
) *
3463 golden_dpm_table
->gfx_table
.dpm_levels
3464 [golden_dpm_table
->gfx_table
.count
- 1].value
;
3466 dpm_table
->gfx_table
.dpm_levels
[i
].value
=
3467 golden_dpm_table
->gfx_table
.dpm_levels
[i
].value
+
3468 (golden_dpm_table
->gfx_table
.dpm_levels
[i
].value
*
3469 clock_percent
) / 100;
3470 } else if (golden_dpm_table
->
3471 gfx_table
.dpm_levels
[dpm_table
->gfx_table
.count
-1].value
>
3474 ((golden_dpm_table
->gfx_table
.dpm_levels
3475 [golden_dpm_table
->gfx_table
.count
- 1].value
-
3477 golden_dpm_table
->gfx_table
.dpm_levels
3478 [golden_dpm_table
->gfx_table
.count
-1].value
;
3480 dpm_table
->gfx_table
.dpm_levels
[i
].value
=
3481 golden_dpm_table
->gfx_table
.dpm_levels
[i
].value
-
3482 (golden_dpm_table
->gfx_table
.dpm_levels
[i
].value
*
3483 clock_percent
) / 100;
3485 dpm_table
->gfx_table
.dpm_levels
[i
].value
=
3486 golden_dpm_table
->gfx_table
.dpm_levels
[i
].value
;
3491 if (data
->need_update_dpm_table
& DPMTABLE_OD_UPDATE_MCLK
&&
3492 data
->smu_features
[GNLD_DPM_UCLK
].supported
) {
3494 mem_table
.dpm_levels
[dpm_table
->mem_table
.count
- 1].
3497 if (PP_CAP(PHM_PlatformCaps_OD6PlusinACSupport
) ||
3498 PP_CAP(PHM_PlatformCaps_OD6PlusinDCSupport
)) {
3500 PP_ASSERT_WITH_CODE(
3501 golden_dpm_table
->mem_table
.dpm_levels
3502 [golden_dpm_table
->mem_table
.count
- 1].value
,
3506 dpm_count
= dpm_table
->mem_table
.count
< 2 ?
3507 0 : dpm_table
->mem_table
.count
- 2;
3508 for (i
= dpm_count
; i
> 1; i
--) {
3509 if (mclk
> golden_dpm_table
->mem_table
.dpm_levels
3510 [golden_dpm_table
->mem_table
.count
-1].value
) {
3511 clock_percent
= ((mclk
-
3512 golden_dpm_table
->mem_table
.dpm_levels
3513 [golden_dpm_table
->mem_table
.count
-1].value
) *
3515 golden_dpm_table
->mem_table
.dpm_levels
3516 [golden_dpm_table
->mem_table
.count
-1].value
;
3518 dpm_table
->mem_table
.dpm_levels
[i
].value
=
3519 golden_dpm_table
->mem_table
.dpm_levels
[i
].value
+
3520 (golden_dpm_table
->mem_table
.dpm_levels
[i
].value
*
3521 clock_percent
) / 100;
3522 } else if (golden_dpm_table
->mem_table
.dpm_levels
3523 [dpm_table
->mem_table
.count
-1].value
> mclk
) {
3524 clock_percent
= ((golden_dpm_table
->mem_table
.dpm_levels
3525 [golden_dpm_table
->mem_table
.count
-1].value
- mclk
) *
3527 golden_dpm_table
->mem_table
.dpm_levels
3528 [golden_dpm_table
->mem_table
.count
-1].value
;
3530 dpm_table
->mem_table
.dpm_levels
[i
].value
=
3531 golden_dpm_table
->mem_table
.dpm_levels
[i
].value
-
3532 (golden_dpm_table
->mem_table
.dpm_levels
[i
].value
*
3533 clock_percent
) / 100;
3535 dpm_table
->mem_table
.dpm_levels
[i
].value
=
3536 golden_dpm_table
->mem_table
.dpm_levels
[i
].value
;
3541 if ((data
->need_update_dpm_table
&
3542 (DPMTABLE_OD_UPDATE_SCLK
+ DPMTABLE_UPDATE_SCLK
)) ||
3543 data
->apply_optimized_settings
) {
3544 result
= vega10_populate_all_graphic_levels(hwmgr
);
3545 PP_ASSERT_WITH_CODE(!result
,
3546 "Failed to populate SCLK during \
3547 PopulateNewDPMClocksStates Function!",
3551 if (data
->need_update_dpm_table
&
3552 (DPMTABLE_OD_UPDATE_MCLK
+ DPMTABLE_UPDATE_MCLK
)) {
3553 result
= vega10_populate_all_memory_levels(hwmgr
);
3554 PP_ASSERT_WITH_CODE(!result
,
3555 "Failed to populate MCLK during \
3556 PopulateNewDPMClocksStates Function!",
3563 static int vega10_trim_single_dpm_states(struct pp_hwmgr
*hwmgr
,
3564 struct vega10_single_dpm_table
*dpm_table
,
3565 uint32_t low_limit
, uint32_t high_limit
)
3569 for (i
= 0; i
< dpm_table
->count
; i
++) {
3570 if ((dpm_table
->dpm_levels
[i
].value
< low_limit
) ||
3571 (dpm_table
->dpm_levels
[i
].value
> high_limit
))
3572 dpm_table
->dpm_levels
[i
].enabled
= false;
3574 dpm_table
->dpm_levels
[i
].enabled
= true;
3579 static int vega10_trim_single_dpm_states_with_mask(struct pp_hwmgr
*hwmgr
,
3580 struct vega10_single_dpm_table
*dpm_table
,
3581 uint32_t low_limit
, uint32_t high_limit
,
3582 uint32_t disable_dpm_mask
)
3586 for (i
= 0; i
< dpm_table
->count
; i
++) {
3587 if ((dpm_table
->dpm_levels
[i
].value
< low_limit
) ||
3588 (dpm_table
->dpm_levels
[i
].value
> high_limit
))
3589 dpm_table
->dpm_levels
[i
].enabled
= false;
3590 else if (!((1 << i
) & disable_dpm_mask
))
3591 dpm_table
->dpm_levels
[i
].enabled
= false;
3593 dpm_table
->dpm_levels
[i
].enabled
= true;
3598 static int vega10_trim_dpm_states(struct pp_hwmgr
*hwmgr
,
3599 const struct vega10_power_state
*vega10_ps
)
3601 struct vega10_hwmgr
*data
=
3602 (struct vega10_hwmgr
*)(hwmgr
->backend
);
3603 uint32_t high_limit_count
;
3605 PP_ASSERT_WITH_CODE((vega10_ps
->performance_level_count
>= 1),
3606 "power state did not have any performance level",
3609 high_limit_count
= (vega10_ps
->performance_level_count
== 1) ? 0 : 1;
3611 vega10_trim_single_dpm_states(hwmgr
,
3612 &(data
->dpm_table
.soc_table
),
3613 vega10_ps
->performance_levels
[0].soc_clock
,
3614 vega10_ps
->performance_levels
[high_limit_count
].soc_clock
);
3616 vega10_trim_single_dpm_states_with_mask(hwmgr
,
3617 &(data
->dpm_table
.gfx_table
),
3618 vega10_ps
->performance_levels
[0].gfx_clock
,
3619 vega10_ps
->performance_levels
[high_limit_count
].gfx_clock
,
3620 data
->disable_dpm_mask
);
3622 vega10_trim_single_dpm_states(hwmgr
,
3623 &(data
->dpm_table
.mem_table
),
3624 vega10_ps
->performance_levels
[0].mem_clock
,
3625 vega10_ps
->performance_levels
[high_limit_count
].mem_clock
);
3630 static uint32_t vega10_find_lowest_dpm_level(
3631 struct vega10_single_dpm_table
*table
)
3635 for (i
= 0; i
< table
->count
; i
++) {
3636 if (table
->dpm_levels
[i
].enabled
)
3643 static uint32_t vega10_find_highest_dpm_level(
3644 struct vega10_single_dpm_table
*table
)
3648 if (table
->count
<= MAX_REGULAR_DPM_NUMBER
) {
3649 for (i
= table
->count
; i
> 0; i
--) {
3650 if (table
->dpm_levels
[i
- 1].enabled
)
3654 pr_info("DPM Table Has Too Many Entries!");
3655 return MAX_REGULAR_DPM_NUMBER
- 1;
3661 static void vega10_apply_dal_minimum_voltage_request(
3662 struct pp_hwmgr
*hwmgr
)
3667 static int vega10_get_soc_index_for_max_uclk(struct pp_hwmgr
*hwmgr
)
3669 struct phm_ppt_v1_clock_voltage_dependency_table
*vdd_dep_table_on_mclk
;
3670 struct phm_ppt_v2_information
*table_info
=
3671 (struct phm_ppt_v2_information
*)(hwmgr
->pptable
);
3673 vdd_dep_table_on_mclk
= table_info
->vdd_dep_on_mclk
;
3675 return vdd_dep_table_on_mclk
->entries
[NUM_UCLK_DPM_LEVELS
- 1].vddInd
+ 1;
3678 static int vega10_upload_dpm_bootup_level(struct pp_hwmgr
*hwmgr
)
3680 struct vega10_hwmgr
*data
=
3681 (struct vega10_hwmgr
*)(hwmgr
->backend
);
3682 uint32_t socclk_idx
;
3684 vega10_apply_dal_minimum_voltage_request(hwmgr
);
3686 if (!data
->registry_data
.sclk_dpm_key_disabled
) {
3687 if (data
->smc_state_table
.gfx_boot_level
!=
3688 data
->dpm_table
.gfx_table
.dpm_state
.soft_min_level
) {
3689 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter(
3691 PPSMC_MSG_SetSoftMinGfxclkByIndex
,
3692 data
->smc_state_table
.gfx_boot_level
),
3693 "Failed to set soft min sclk index!",
3695 data
->dpm_table
.gfx_table
.dpm_state
.soft_min_level
=
3696 data
->smc_state_table
.gfx_boot_level
;
3700 if (!data
->registry_data
.mclk_dpm_key_disabled
) {
3701 if (data
->smc_state_table
.mem_boot_level
!=
3702 data
->dpm_table
.mem_table
.dpm_state
.soft_min_level
) {
3703 if (data
->smc_state_table
.mem_boot_level
== NUM_UCLK_DPM_LEVELS
- 1) {
3704 socclk_idx
= vega10_get_soc_index_for_max_uclk(hwmgr
);
3705 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter(
3707 PPSMC_MSG_SetSoftMinSocclkByIndex
,
3709 "Failed to set soft min uclk index!",
3712 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter(
3714 PPSMC_MSG_SetSoftMinUclkByIndex
,
3715 data
->smc_state_table
.mem_boot_level
),
3716 "Failed to set soft min uclk index!",
3719 data
->dpm_table
.mem_table
.dpm_state
.soft_min_level
=
3720 data
->smc_state_table
.mem_boot_level
;
3727 static int vega10_upload_dpm_max_level(struct pp_hwmgr
*hwmgr
)
3729 struct vega10_hwmgr
*data
=
3730 (struct vega10_hwmgr
*)(hwmgr
->backend
);
3732 vega10_apply_dal_minimum_voltage_request(hwmgr
);
3734 if (!data
->registry_data
.sclk_dpm_key_disabled
) {
3735 if (data
->smc_state_table
.gfx_max_level
!=
3736 data
->dpm_table
.gfx_table
.dpm_state
.soft_max_level
) {
3737 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter(
3739 PPSMC_MSG_SetSoftMaxGfxclkByIndex
,
3740 data
->smc_state_table
.gfx_max_level
),
3741 "Failed to set soft max sclk index!",
3743 data
->dpm_table
.gfx_table
.dpm_state
.soft_max_level
=
3744 data
->smc_state_table
.gfx_max_level
;
3748 if (!data
->registry_data
.mclk_dpm_key_disabled
) {
3749 if (data
->smc_state_table
.mem_max_level
!=
3750 data
->dpm_table
.mem_table
.dpm_state
.soft_max_level
) {
3751 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter(
3753 PPSMC_MSG_SetSoftMaxUclkByIndex
,
3754 data
->smc_state_table
.mem_max_level
),
3755 "Failed to set soft max mclk index!",
3757 data
->dpm_table
.mem_table
.dpm_state
.soft_max_level
=
3758 data
->smc_state_table
.mem_max_level
;
3765 static int vega10_generate_dpm_level_enable_mask(
3766 struct pp_hwmgr
*hwmgr
, const void *input
)
3768 struct vega10_hwmgr
*data
=
3769 (struct vega10_hwmgr
*)(hwmgr
->backend
);
3770 const struct phm_set_power_state_input
*states
=
3771 (const struct phm_set_power_state_input
*)input
;
3772 const struct vega10_power_state
*vega10_ps
=
3773 cast_const_phw_vega10_power_state(states
->pnew_state
);
3776 PP_ASSERT_WITH_CODE(!vega10_trim_dpm_states(hwmgr
, vega10_ps
),
3777 "Attempt to Trim DPM States Failed!",
3780 data
->smc_state_table
.gfx_boot_level
=
3781 vega10_find_lowest_dpm_level(&(data
->dpm_table
.gfx_table
));
3782 data
->smc_state_table
.gfx_max_level
=
3783 vega10_find_highest_dpm_level(&(data
->dpm_table
.gfx_table
));
3784 data
->smc_state_table
.mem_boot_level
=
3785 vega10_find_lowest_dpm_level(&(data
->dpm_table
.mem_table
));
3786 data
->smc_state_table
.mem_max_level
=
3787 vega10_find_highest_dpm_level(&(data
->dpm_table
.mem_table
));
3789 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr
),
3790 "Attempt to upload DPM Bootup Levels Failed!",
3792 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr
),
3793 "Attempt to upload DPM Max Levels Failed!",
3795 for(i
= data
->smc_state_table
.gfx_boot_level
; i
< data
->smc_state_table
.gfx_max_level
; i
++)
3796 data
->dpm_table
.gfx_table
.dpm_levels
[i
].enabled
= true;
3799 for(i
= data
->smc_state_table
.mem_boot_level
; i
< data
->smc_state_table
.mem_max_level
; i
++)
3800 data
->dpm_table
.mem_table
.dpm_levels
[i
].enabled
= true;
3805 int vega10_enable_disable_vce_dpm(struct pp_hwmgr
*hwmgr
, bool enable
)
3807 struct vega10_hwmgr
*data
=
3808 (struct vega10_hwmgr
*)(hwmgr
->backend
);
3810 if (data
->smu_features
[GNLD_DPM_VCE
].supported
) {
3811 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr
,
3813 data
->smu_features
[GNLD_DPM_VCE
].smu_feature_bitmap
),
3814 "Attempt to Enable/Disable DPM VCE Failed!",
3816 data
->smu_features
[GNLD_DPM_VCE
].enabled
= enable
;
3822 static int vega10_update_sclk_threshold(struct pp_hwmgr
*hwmgr
)
3824 struct vega10_hwmgr
*data
=
3825 (struct vega10_hwmgr
*)(hwmgr
->backend
);
3827 uint32_t low_sclk_interrupt_threshold
= 0;
3829 if (PP_CAP(PHM_PlatformCaps_SclkThrottleLowNotification
) &&
3830 (hwmgr
->gfx_arbiter
.sclk_threshold
!=
3831 data
->low_sclk_interrupt_threshold
)) {
3832 data
->low_sclk_interrupt_threshold
=
3833 hwmgr
->gfx_arbiter
.sclk_threshold
;
3834 low_sclk_interrupt_threshold
=
3835 data
->low_sclk_interrupt_threshold
;
3837 data
->smc_state_table
.pp_table
.LowGfxclkInterruptThreshold
=
3838 cpu_to_le32(low_sclk_interrupt_threshold
);
3840 /* This message will also enable SmcToHost Interrupt */
3841 result
= smum_send_msg_to_smc_with_parameter(hwmgr
,
3842 PPSMC_MSG_SetLowGfxclkInterruptThreshold
,
3843 (uint32_t)low_sclk_interrupt_threshold
);
3849 static int vega10_set_power_state_tasks(struct pp_hwmgr
*hwmgr
,
3852 int tmp_result
, result
= 0;
3853 struct vega10_hwmgr
*data
=
3854 (struct vega10_hwmgr
*)(hwmgr
->backend
);
3855 PPTable_t
*pp_table
= &(data
->smc_state_table
.pp_table
);
3857 tmp_result
= vega10_find_dpm_states_clocks_in_dpm_table(hwmgr
, input
);
3858 PP_ASSERT_WITH_CODE(!tmp_result
,
3859 "Failed to find DPM states clocks in DPM table!",
3860 result
= tmp_result
);
3862 tmp_result
= vega10_populate_and_upload_sclk_mclk_dpm_levels(hwmgr
, input
);
3863 PP_ASSERT_WITH_CODE(!tmp_result
,
3864 "Failed to populate and upload SCLK MCLK DPM levels!",
3865 result
= tmp_result
);
3867 tmp_result
= vega10_generate_dpm_level_enable_mask(hwmgr
, input
);
3868 PP_ASSERT_WITH_CODE(!tmp_result
,
3869 "Failed to generate DPM level enabled mask!",
3870 result
= tmp_result
);
3872 tmp_result
= vega10_update_sclk_threshold(hwmgr
);
3873 PP_ASSERT_WITH_CODE(!tmp_result
,
3874 "Failed to update SCLK threshold!",
3875 result
= tmp_result
);
3877 result
= vega10_copy_table_to_smc(hwmgr
,
3878 (uint8_t *)pp_table
, PPTABLE
);
3879 PP_ASSERT_WITH_CODE(!result
,
3880 "Failed to upload PPtable!", return result
);
3882 data
->apply_optimized_settings
= false;
3883 data
->apply_overdrive_next_settings_mask
= 0;
3888 static uint32_t vega10_dpm_get_sclk(struct pp_hwmgr
*hwmgr
, bool low
)
3890 struct pp_power_state
*ps
;
3891 struct vega10_power_state
*vega10_ps
;
3896 ps
= hwmgr
->request_ps
;
3901 vega10_ps
= cast_phw_vega10_power_state(&ps
->hardware
);
3904 return vega10_ps
->performance_levels
[0].gfx_clock
;
3906 return vega10_ps
->performance_levels
3907 [vega10_ps
->performance_level_count
- 1].gfx_clock
;
3910 static uint32_t vega10_dpm_get_mclk(struct pp_hwmgr
*hwmgr
, bool low
)
3912 struct pp_power_state
*ps
;
3913 struct vega10_power_state
*vega10_ps
;
3918 ps
= hwmgr
->request_ps
;
3923 vega10_ps
= cast_phw_vega10_power_state(&ps
->hardware
);
3926 return vega10_ps
->performance_levels
[0].mem_clock
;
3928 return vega10_ps
->performance_levels
3929 [vega10_ps
->performance_level_count
-1].mem_clock
;
3932 static int vega10_get_gpu_power(struct pp_hwmgr
*hwmgr
,
3933 struct pp_gpu_power
*query
)
3937 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr
,
3938 PPSMC_MSG_GetCurrPkgPwr
),
3939 "Failed to get current package power!",
3942 vega10_read_arg_from_smc(hwmgr
, &value
);
3943 /* power value is an integer */
3944 query
->average_gpu_power
= value
<< 8;
3949 static int vega10_read_sensor(struct pp_hwmgr
*hwmgr
, int idx
,
3950 void *value
, int *size
)
3952 uint32_t sclk_idx
, mclk_idx
, activity_percent
= 0;
3953 struct vega10_hwmgr
*data
= (struct vega10_hwmgr
*)(hwmgr
->backend
);
3954 struct vega10_dpm_table
*dpm_table
= &data
->dpm_table
;
3958 case AMDGPU_PP_SENSOR_GFX_SCLK
:
3959 ret
= smum_send_msg_to_smc(hwmgr
, PPSMC_MSG_GetCurrentGfxclkIndex
);
3961 vega10_read_arg_from_smc(hwmgr
, &sclk_idx
);
3962 *((uint32_t *)value
) = dpm_table
->gfx_table
.dpm_levels
[sclk_idx
].value
;
3966 case AMDGPU_PP_SENSOR_GFX_MCLK
:
3967 ret
= smum_send_msg_to_smc(hwmgr
, PPSMC_MSG_GetCurrentUclkIndex
);
3969 vega10_read_arg_from_smc(hwmgr
, &mclk_idx
);
3970 *((uint32_t *)value
) = dpm_table
->mem_table
.dpm_levels
[mclk_idx
].value
;
3974 case AMDGPU_PP_SENSOR_GPU_LOAD
:
3975 ret
= smum_send_msg_to_smc_with_parameter(hwmgr
, PPSMC_MSG_GetAverageGfxActivity
, 0);
3977 vega10_read_arg_from_smc(hwmgr
, &activity_percent
);
3978 *((uint32_t *)value
) = activity_percent
> 100 ? 100 : activity_percent
;
3982 case AMDGPU_PP_SENSOR_GPU_TEMP
:
3983 *((uint32_t *)value
) = vega10_thermal_get_temperature(hwmgr
);
3986 case AMDGPU_PP_SENSOR_UVD_POWER
:
3987 *((uint32_t *)value
) = data
->uvd_power_gated
? 0 : 1;
3990 case AMDGPU_PP_SENSOR_VCE_POWER
:
3991 *((uint32_t *)value
) = data
->vce_power_gated
? 0 : 1;
3994 case AMDGPU_PP_SENSOR_GPU_POWER
:
3995 if (*size
< sizeof(struct pp_gpu_power
))
3998 *size
= sizeof(struct pp_gpu_power
);
3999 ret
= vega10_get_gpu_power(hwmgr
, (struct pp_gpu_power
*)value
);
4009 static int vega10_notify_smc_display_change(struct pp_hwmgr
*hwmgr
,
4012 return smum_send_msg_to_smc_with_parameter(hwmgr
,
4013 PPSMC_MSG_SetUclkFastSwitch
,
4017 int vega10_display_clock_voltage_request(struct pp_hwmgr
*hwmgr
,
4018 struct pp_display_clock_request
*clock_req
)
4021 enum amd_pp_clock_type clk_type
= clock_req
->clock_type
;
4022 uint32_t clk_freq
= clock_req
->clock_freq_in_khz
/ 1000;
4023 DSPCLK_e clk_select
= 0;
4024 uint32_t clk_request
= 0;
4027 case amd_pp_dcef_clock
:
4028 clk_select
= DSPCLK_DCEFCLK
;
4030 case amd_pp_disp_clock
:
4031 clk_select
= DSPCLK_DISPCLK
;
4033 case amd_pp_pixel_clock
:
4034 clk_select
= DSPCLK_PIXCLK
;
4036 case amd_pp_phy_clock
:
4037 clk_select
= DSPCLK_PHYCLK
;
4040 pr_info("[DisplayClockVoltageRequest]Invalid Clock Type!");
4046 clk_request
= (clk_freq
<< 16) | clk_select
;
4047 result
= smum_send_msg_to_smc_with_parameter(hwmgr
,
4048 PPSMC_MSG_RequestDisplayClockByFreq
,
4055 static uint8_t vega10_get_uclk_index(struct pp_hwmgr
*hwmgr
,
4056 struct phm_ppt_v1_clock_voltage_dependency_table
*mclk_table
,
4062 if (mclk_table
== NULL
|| mclk_table
->count
== 0)
4065 count
= (uint8_t)(mclk_table
->count
);
4067 for(i
= 0; i
< count
; i
++) {
4068 if(mclk_table
->entries
[i
].clk
>= frequency
)
4075 static int vega10_notify_smc_display_config_after_ps_adjustment(
4076 struct pp_hwmgr
*hwmgr
)
4078 struct vega10_hwmgr
*data
=
4079 (struct vega10_hwmgr
*)(hwmgr
->backend
);
4080 struct vega10_single_dpm_table
*dpm_table
=
4081 &data
->dpm_table
.dcef_table
;
4082 struct phm_ppt_v2_information
*table_info
=
4083 (struct phm_ppt_v2_information
*)hwmgr
->pptable
;
4084 struct phm_ppt_v1_clock_voltage_dependency_table
*mclk_table
= table_info
->vdd_dep_on_mclk
;
4086 uint32_t num_active_disps
= 0;
4087 struct cgs_display_info info
= {0};
4088 struct PP_Clocks min_clocks
= {0};
4090 struct pp_display_clock_request clock_req
;
4092 info
.mode_info
= NULL
;
4094 cgs_get_active_displays_info(hwmgr
->device
, &info
);
4096 num_active_disps
= info
.display_count
;
4098 if (num_active_disps
> 1)
4099 vega10_notify_smc_display_change(hwmgr
, false);
4101 vega10_notify_smc_display_change(hwmgr
, true);
4103 min_clocks
.dcefClock
= hwmgr
->display_config
.min_dcef_set_clk
;
4104 min_clocks
.dcefClockInSR
= hwmgr
->display_config
.min_dcef_deep_sleep_set_clk
;
4105 min_clocks
.memoryClock
= hwmgr
->display_config
.min_mem_set_clock
;
4107 for (i
= 0; i
< dpm_table
->count
; i
++) {
4108 if (dpm_table
->dpm_levels
[i
].value
== min_clocks
.dcefClock
)
4112 if (i
< dpm_table
->count
) {
4113 clock_req
.clock_type
= amd_pp_dcef_clock
;
4114 clock_req
.clock_freq_in_khz
= dpm_table
->dpm_levels
[i
].value
;
4115 if (!vega10_display_clock_voltage_request(hwmgr
, &clock_req
)) {
4116 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter(
4117 hwmgr
, PPSMC_MSG_SetMinDeepSleepDcefclk
,
4118 min_clocks
.dcefClockInSR
/100),
4119 "Attempt to set divider for DCEFCLK Failed!",);
4121 pr_info("Attempt to set Hard Min for DCEFCLK Failed!");
4124 pr_debug("Cannot find requested DCEFCLK!");
4127 if (min_clocks
.memoryClock
!= 0) {
4128 idx
= vega10_get_uclk_index(hwmgr
, mclk_table
, min_clocks
.memoryClock
);
4129 smum_send_msg_to_smc_with_parameter(hwmgr
, PPSMC_MSG_SetSoftMinUclkByIndex
, idx
);
4130 data
->dpm_table
.mem_table
.dpm_state
.soft_min_level
= idx
;
4136 static int vega10_force_dpm_highest(struct pp_hwmgr
*hwmgr
)
4138 struct vega10_hwmgr
*data
=
4139 (struct vega10_hwmgr
*)(hwmgr
->backend
);
4141 data
->smc_state_table
.gfx_boot_level
=
4142 data
->smc_state_table
.gfx_max_level
=
4143 vega10_find_highest_dpm_level(&(data
->dpm_table
.gfx_table
));
4144 data
->smc_state_table
.mem_boot_level
=
4145 data
->smc_state_table
.mem_max_level
=
4146 vega10_find_highest_dpm_level(&(data
->dpm_table
.mem_table
));
4148 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr
),
4149 "Failed to upload boot level to highest!",
4152 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr
),
4153 "Failed to upload dpm max level to highest!",
4159 static int vega10_force_dpm_lowest(struct pp_hwmgr
*hwmgr
)
4161 struct vega10_hwmgr
*data
=
4162 (struct vega10_hwmgr
*)(hwmgr
->backend
);
4164 data
->smc_state_table
.gfx_boot_level
=
4165 data
->smc_state_table
.gfx_max_level
=
4166 vega10_find_lowest_dpm_level(&(data
->dpm_table
.gfx_table
));
4167 data
->smc_state_table
.mem_boot_level
=
4168 data
->smc_state_table
.mem_max_level
=
4169 vega10_find_lowest_dpm_level(&(data
->dpm_table
.mem_table
));
4171 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr
),
4172 "Failed to upload boot level to highest!",
4175 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr
),
4176 "Failed to upload dpm max level to highest!",
4183 static int vega10_unforce_dpm_levels(struct pp_hwmgr
*hwmgr
)
4185 struct vega10_hwmgr
*data
= (struct vega10_hwmgr
*)(hwmgr
->backend
);
4187 data
->smc_state_table
.gfx_boot_level
=
4188 vega10_find_lowest_dpm_level(&(data
->dpm_table
.gfx_table
));
4189 data
->smc_state_table
.gfx_max_level
=
4190 vega10_find_highest_dpm_level(&(data
->dpm_table
.gfx_table
));
4191 data
->smc_state_table
.mem_boot_level
=
4192 vega10_find_lowest_dpm_level(&(data
->dpm_table
.mem_table
));
4193 data
->smc_state_table
.mem_max_level
=
4194 vega10_find_highest_dpm_level(&(data
->dpm_table
.mem_table
));
4196 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr
),
4197 "Failed to upload DPM Bootup Levels!",
4200 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr
),
4201 "Failed to upload DPM Max Levels!",
4206 static int vega10_get_profiling_clk_mask(struct pp_hwmgr
*hwmgr
, enum amd_dpm_forced_level level
,
4207 uint32_t *sclk_mask
, uint32_t *mclk_mask
, uint32_t *soc_mask
)
4209 struct phm_ppt_v2_information
*table_info
=
4210 (struct phm_ppt_v2_information
*)(hwmgr
->pptable
);
4212 if (table_info
->vdd_dep_on_sclk
->count
> VEGA10_UMD_PSTATE_GFXCLK_LEVEL
&&
4213 table_info
->vdd_dep_on_socclk
->count
> VEGA10_UMD_PSTATE_SOCCLK_LEVEL
&&
4214 table_info
->vdd_dep_on_mclk
->count
> VEGA10_UMD_PSTATE_MCLK_LEVEL
) {
4215 *sclk_mask
= VEGA10_UMD_PSTATE_GFXCLK_LEVEL
;
4216 *soc_mask
= VEGA10_UMD_PSTATE_SOCCLK_LEVEL
;
4217 *mclk_mask
= VEGA10_UMD_PSTATE_MCLK_LEVEL
;
4220 if (level
== AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK
) {
4222 } else if (level
== AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK
) {
4224 } else if (level
== AMD_DPM_FORCED_LEVEL_PROFILE_PEAK
) {
4225 *sclk_mask
= table_info
->vdd_dep_on_sclk
->count
- 1;
4226 *soc_mask
= table_info
->vdd_dep_on_socclk
->count
- 1;
4227 *mclk_mask
= table_info
->vdd_dep_on_mclk
->count
- 1;
4232 static void vega10_set_fan_control_mode(struct pp_hwmgr
*hwmgr
, uint32_t mode
)
4235 case AMD_FAN_CTRL_NONE
:
4236 vega10_fan_ctrl_set_fan_speed_percent(hwmgr
, 100);
4238 case AMD_FAN_CTRL_MANUAL
:
4239 if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl
))
4240 vega10_fan_ctrl_stop_smc_fan_control(hwmgr
);
4242 case AMD_FAN_CTRL_AUTO
:
4243 if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl
))
4244 vega10_fan_ctrl_start_smc_fan_control(hwmgr
);
4251 static int vega10_dpm_force_dpm_level(struct pp_hwmgr
*hwmgr
,
4252 enum amd_dpm_forced_level level
)
4255 uint32_t sclk_mask
= 0;
4256 uint32_t mclk_mask
= 0;
4257 uint32_t soc_mask
= 0;
4260 case AMD_DPM_FORCED_LEVEL_HIGH
:
4261 ret
= vega10_force_dpm_highest(hwmgr
);
4263 case AMD_DPM_FORCED_LEVEL_LOW
:
4264 ret
= vega10_force_dpm_lowest(hwmgr
);
4266 case AMD_DPM_FORCED_LEVEL_AUTO
:
4267 ret
= vega10_unforce_dpm_levels(hwmgr
);
4269 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD
:
4270 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK
:
4271 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK
:
4272 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK
:
4273 ret
= vega10_get_profiling_clk_mask(hwmgr
, level
, &sclk_mask
, &mclk_mask
, &soc_mask
);
4276 vega10_force_clock_level(hwmgr
, PP_SCLK
, 1<<sclk_mask
);
4277 vega10_force_clock_level(hwmgr
, PP_MCLK
, 1<<mclk_mask
);
4279 case AMD_DPM_FORCED_LEVEL_MANUAL
:
4280 case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT
:
4286 if (level
== AMD_DPM_FORCED_LEVEL_PROFILE_PEAK
&& hwmgr
->dpm_level
!= AMD_DPM_FORCED_LEVEL_PROFILE_PEAK
)
4287 vega10_set_fan_control_mode(hwmgr
, AMD_FAN_CTRL_NONE
);
4288 else if (level
!= AMD_DPM_FORCED_LEVEL_PROFILE_PEAK
&& hwmgr
->dpm_level
== AMD_DPM_FORCED_LEVEL_PROFILE_PEAK
)
4289 vega10_set_fan_control_mode(hwmgr
, AMD_FAN_CTRL_AUTO
);
4294 static uint32_t vega10_get_fan_control_mode(struct pp_hwmgr
*hwmgr
)
4296 struct vega10_hwmgr
*data
= (struct vega10_hwmgr
*)(hwmgr
->backend
);
4298 if (data
->smu_features
[GNLD_FAN_CONTROL
].enabled
== false)
4299 return AMD_FAN_CTRL_MANUAL
;
4301 return AMD_FAN_CTRL_AUTO
;
4304 static int vega10_get_dal_power_level(struct pp_hwmgr
*hwmgr
,
4305 struct amd_pp_simple_clock_info
*info
)
4307 struct phm_ppt_v2_information
*table_info
=
4308 (struct phm_ppt_v2_information
*)hwmgr
->pptable
;
4309 struct phm_clock_and_voltage_limits
*max_limits
=
4310 &table_info
->max_clock_voltage_on_ac
;
4312 info
->engine_max_clock
= max_limits
->sclk
;
4313 info
->memory_max_clock
= max_limits
->mclk
;
4318 static void vega10_get_sclks(struct pp_hwmgr
*hwmgr
,
4319 struct pp_clock_levels_with_latency
*clocks
)
4321 struct phm_ppt_v2_information
*table_info
=
4322 (struct phm_ppt_v2_information
*)hwmgr
->pptable
;
4323 struct phm_ppt_v1_clock_voltage_dependency_table
*dep_table
=
4324 table_info
->vdd_dep_on_sclk
;
4327 for (i
= 0; i
< dep_table
->count
; i
++) {
4328 if (dep_table
->entries
[i
].clk
) {
4329 clocks
->data
[clocks
->num_levels
].clocks_in_khz
=
4330 dep_table
->entries
[i
].clk
;
4331 clocks
->num_levels
++;
4337 static uint32_t vega10_get_mem_latency(struct pp_hwmgr
*hwmgr
,
4340 if (clock
>= MEM_FREQ_LOW_LATENCY
&&
4341 clock
< MEM_FREQ_HIGH_LATENCY
)
4342 return MEM_LATENCY_HIGH
;
4343 else if (clock
>= MEM_FREQ_HIGH_LATENCY
)
4344 return MEM_LATENCY_LOW
;
4346 return MEM_LATENCY_ERR
;
4349 static void vega10_get_memclocks(struct pp_hwmgr
*hwmgr
,
4350 struct pp_clock_levels_with_latency
*clocks
)
4352 struct phm_ppt_v2_information
*table_info
=
4353 (struct phm_ppt_v2_information
*)hwmgr
->pptable
;
4354 struct phm_ppt_v1_clock_voltage_dependency_table
*dep_table
=
4355 table_info
->vdd_dep_on_mclk
;
4356 struct vega10_hwmgr
*data
= (struct vega10_hwmgr
*)(hwmgr
->backend
);
4359 clocks
->num_levels
= 0;
4360 data
->mclk_latency_table
.count
= 0;
4362 for (i
= 0; i
< dep_table
->count
; i
++) {
4363 if (dep_table
->entries
[i
].clk
) {
4364 clocks
->data
[clocks
->num_levels
].clocks_in_khz
=
4365 data
->mclk_latency_table
.entries
4366 [data
->mclk_latency_table
.count
].frequency
=
4367 dep_table
->entries
[i
].clk
;
4368 clocks
->data
[clocks
->num_levels
].latency_in_us
=
4369 data
->mclk_latency_table
.entries
4370 [data
->mclk_latency_table
.count
].latency
=
4371 vega10_get_mem_latency(hwmgr
,
4372 dep_table
->entries
[i
].clk
);
4373 clocks
->num_levels
++;
4374 data
->mclk_latency_table
.count
++;
4379 static void vega10_get_dcefclocks(struct pp_hwmgr
*hwmgr
,
4380 struct pp_clock_levels_with_latency
*clocks
)
4382 struct phm_ppt_v2_information
*table_info
=
4383 (struct phm_ppt_v2_information
*)hwmgr
->pptable
;
4384 struct phm_ppt_v1_clock_voltage_dependency_table
*dep_table
=
4385 table_info
->vdd_dep_on_dcefclk
;
4388 for (i
= 0; i
< dep_table
->count
; i
++) {
4389 clocks
->data
[i
].clocks_in_khz
= dep_table
->entries
[i
].clk
;
4390 clocks
->data
[i
].latency_in_us
= 0;
4391 clocks
->num_levels
++;
4395 static void vega10_get_socclocks(struct pp_hwmgr
*hwmgr
,
4396 struct pp_clock_levels_with_latency
*clocks
)
4398 struct phm_ppt_v2_information
*table_info
=
4399 (struct phm_ppt_v2_information
*)hwmgr
->pptable
;
4400 struct phm_ppt_v1_clock_voltage_dependency_table
*dep_table
=
4401 table_info
->vdd_dep_on_socclk
;
4404 for (i
= 0; i
< dep_table
->count
; i
++) {
4405 clocks
->data
[i
].clocks_in_khz
= dep_table
->entries
[i
].clk
;
4406 clocks
->data
[i
].latency_in_us
= 0;
4407 clocks
->num_levels
++;
4411 static int vega10_get_clock_by_type_with_latency(struct pp_hwmgr
*hwmgr
,
4412 enum amd_pp_clock_type type
,
4413 struct pp_clock_levels_with_latency
*clocks
)
4416 case amd_pp_sys_clock
:
4417 vega10_get_sclks(hwmgr
, clocks
);
4419 case amd_pp_mem_clock
:
4420 vega10_get_memclocks(hwmgr
, clocks
);
4422 case amd_pp_dcef_clock
:
4423 vega10_get_dcefclocks(hwmgr
, clocks
);
4425 case amd_pp_soc_clock
:
4426 vega10_get_socclocks(hwmgr
, clocks
);
4435 static int vega10_get_clock_by_type_with_voltage(struct pp_hwmgr
*hwmgr
,
4436 enum amd_pp_clock_type type
,
4437 struct pp_clock_levels_with_voltage
*clocks
)
4439 struct phm_ppt_v2_information
*table_info
=
4440 (struct phm_ppt_v2_information
*)hwmgr
->pptable
;
4441 struct phm_ppt_v1_clock_voltage_dependency_table
*dep_table
;
4445 case amd_pp_mem_clock
:
4446 dep_table
= table_info
->vdd_dep_on_mclk
;
4448 case amd_pp_dcef_clock
:
4449 dep_table
= table_info
->vdd_dep_on_dcefclk
;
4451 case amd_pp_disp_clock
:
4452 dep_table
= table_info
->vdd_dep_on_dispclk
;
4454 case amd_pp_pixel_clock
:
4455 dep_table
= table_info
->vdd_dep_on_pixclk
;
4457 case amd_pp_phy_clock
:
4458 dep_table
= table_info
->vdd_dep_on_phyclk
;
4464 for (i
= 0; i
< dep_table
->count
; i
++) {
4465 clocks
->data
[i
].clocks_in_khz
= dep_table
->entries
[i
].clk
;
4466 clocks
->data
[i
].voltage_in_mv
= (uint32_t)(table_info
->vddc_lookup_table
->
4467 entries
[dep_table
->entries
[i
].vddInd
].us_vdd
);
4468 clocks
->num_levels
++;
4471 if (i
< dep_table
->count
)
4477 static int vega10_set_watermarks_for_clocks_ranges(struct pp_hwmgr
*hwmgr
,
4478 struct pp_wm_sets_with_clock_ranges_soc15
*wm_with_clock_ranges
)
4480 struct vega10_hwmgr
*data
= (struct vega10_hwmgr
*)(hwmgr
->backend
);
4481 Watermarks_t
*table
= &(data
->smc_state_table
.water_marks_table
);
4485 if (!data
->registry_data
.disable_water_mark
) {
4486 for (i
= 0; i
< wm_with_clock_ranges
->num_wm_sets_dmif
; i
++) {
4487 table
->WatermarkRow
[WM_DCEFCLK
][i
].MinClock
=
4488 cpu_to_le16((uint16_t)
4489 (wm_with_clock_ranges
->wm_sets_dmif
[i
].wm_min_dcefclk_in_khz
) /
4491 table
->WatermarkRow
[WM_DCEFCLK
][i
].MaxClock
=
4492 cpu_to_le16((uint16_t)
4493 (wm_with_clock_ranges
->wm_sets_dmif
[i
].wm_max_dcefclk_in_khz
) /
4495 table
->WatermarkRow
[WM_DCEFCLK
][i
].MinUclk
=
4496 cpu_to_le16((uint16_t)
4497 (wm_with_clock_ranges
->wm_sets_dmif
[i
].wm_min_memclk_in_khz
) /
4499 table
->WatermarkRow
[WM_DCEFCLK
][i
].MaxUclk
=
4500 cpu_to_le16((uint16_t)
4501 (wm_with_clock_ranges
->wm_sets_dmif
[i
].wm_max_memclk_in_khz
) /
4503 table
->WatermarkRow
[WM_DCEFCLK
][i
].WmSetting
= (uint8_t)
4504 wm_with_clock_ranges
->wm_sets_dmif
[i
].wm_set_id
;
4507 for (i
= 0; i
< wm_with_clock_ranges
->num_wm_sets_mcif
; i
++) {
4508 table
->WatermarkRow
[WM_SOCCLK
][i
].MinClock
=
4509 cpu_to_le16((uint16_t)
4510 (wm_with_clock_ranges
->wm_sets_mcif
[i
].wm_min_socclk_in_khz
) /
4512 table
->WatermarkRow
[WM_SOCCLK
][i
].MaxClock
=
4513 cpu_to_le16((uint16_t)
4514 (wm_with_clock_ranges
->wm_sets_mcif
[i
].wm_max_socclk_in_khz
) /
4516 table
->WatermarkRow
[WM_SOCCLK
][i
].MinUclk
=
4517 cpu_to_le16((uint16_t)
4518 (wm_with_clock_ranges
->wm_sets_mcif
[i
].wm_min_memclk_in_khz
) /
4520 table
->WatermarkRow
[WM_SOCCLK
][i
].MaxUclk
=
4521 cpu_to_le16((uint16_t)
4522 (wm_with_clock_ranges
->wm_sets_mcif
[i
].wm_max_memclk_in_khz
) /
4524 table
->WatermarkRow
[WM_SOCCLK
][i
].WmSetting
= (uint8_t)
4525 wm_with_clock_ranges
->wm_sets_mcif
[i
].wm_set_id
;
4527 data
->water_marks_bitmap
= WaterMarksExist
;
4533 static int vega10_force_clock_level(struct pp_hwmgr
*hwmgr
,
4534 enum pp_clock_type type
, uint32_t mask
)
4536 struct vega10_hwmgr
*data
= (struct vega10_hwmgr
*)(hwmgr
->backend
);
4539 if (hwmgr
->request_dpm_level
& (AMD_DPM_FORCED_LEVEL_AUTO
|
4540 AMD_DPM_FORCED_LEVEL_LOW
|
4541 AMD_DPM_FORCED_LEVEL_HIGH
))
4546 for (i
= 0; i
< 32; i
++) {
4547 if (mask
& (1 << i
))
4550 data
->smc_state_table
.gfx_boot_level
= i
;
4552 for (i
= 31; i
>= 0; i
--) {
4553 if (mask
& (1 << i
))
4556 data
->smc_state_table
.gfx_max_level
= i
;
4558 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr
),
4559 "Failed to upload boot level to lowest!",
4562 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr
),
4563 "Failed to upload dpm max level to highest!",
4568 for (i
= 0; i
< 32; i
++) {
4569 if (mask
& (1 << i
))
4572 data
->smc_state_table
.mem_boot_level
= i
;
4574 for (i
= 31; i
>= 0; i
--) {
4575 if (mask
& (1 << i
))
4578 data
->smc_state_table
.mem_max_level
= i
;
4580 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr
),
4581 "Failed to upload boot level to lowest!",
4584 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr
),
4585 "Failed to upload dpm max level to highest!",
4598 static int vega10_print_clock_levels(struct pp_hwmgr
*hwmgr
,
4599 enum pp_clock_type type
, char *buf
)
4601 struct vega10_hwmgr
*data
= (struct vega10_hwmgr
*)(hwmgr
->backend
);
4602 struct vega10_single_dpm_table
*sclk_table
= &(data
->dpm_table
.gfx_table
);
4603 struct vega10_single_dpm_table
*mclk_table
= &(data
->dpm_table
.mem_table
);
4604 struct vega10_pcie_table
*pcie_table
= &(data
->dpm_table
.pcie_table
);
4605 int i
, now
, size
= 0;
4609 if (data
->registry_data
.sclk_dpm_key_disabled
)
4612 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr
,
4613 PPSMC_MSG_GetCurrentGfxclkIndex
),
4614 "Attempt to get current sclk index Failed!",
4616 PP_ASSERT_WITH_CODE(!vega10_read_arg_from_smc(hwmgr
,
4618 "Attempt to read sclk index Failed!",
4621 for (i
= 0; i
< sclk_table
->count
; i
++)
4622 size
+= sprintf(buf
+ size
, "%d: %uMhz %s\n",
4623 i
, sclk_table
->dpm_levels
[i
].value
/ 100,
4624 (i
== now
) ? "*" : "");
4627 if (data
->registry_data
.mclk_dpm_key_disabled
)
4630 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr
,
4631 PPSMC_MSG_GetCurrentUclkIndex
),
4632 "Attempt to get current mclk index Failed!",
4634 PP_ASSERT_WITH_CODE(!vega10_read_arg_from_smc(hwmgr
,
4636 "Attempt to read mclk index Failed!",
4639 for (i
= 0; i
< mclk_table
->count
; i
++)
4640 size
+= sprintf(buf
+ size
, "%d: %uMhz %s\n",
4641 i
, mclk_table
->dpm_levels
[i
].value
/ 100,
4642 (i
== now
) ? "*" : "");
4645 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr
,
4646 PPSMC_MSG_GetCurrentLinkIndex
),
4647 "Attempt to get current mclk index Failed!",
4649 PP_ASSERT_WITH_CODE(!vega10_read_arg_from_smc(hwmgr
,
4651 "Attempt to read mclk index Failed!",
4654 for (i
= 0; i
< pcie_table
->count
; i
++)
4655 size
+= sprintf(buf
+ size
, "%d: %s %s\n", i
,
4656 (pcie_table
->pcie_gen
[i
] == 0) ? "2.5GB, x1" :
4657 (pcie_table
->pcie_gen
[i
] == 1) ? "5.0GB, x16" :
4658 (pcie_table
->pcie_gen
[i
] == 2) ? "8.0GB, x16" : "",
4659 (i
== now
) ? "*" : "");
4667 static int vega10_display_configuration_changed_task(struct pp_hwmgr
*hwmgr
)
4669 struct vega10_hwmgr
*data
= (struct vega10_hwmgr
*)(hwmgr
->backend
);
4671 uint32_t num_turned_on_displays
= 1;
4672 Watermarks_t
*wm_table
= &(data
->smc_state_table
.water_marks_table
);
4673 struct cgs_display_info info
= {0};
4675 if ((data
->water_marks_bitmap
& WaterMarksExist
) &&
4676 !(data
->water_marks_bitmap
& WaterMarksLoaded
)) {
4677 result
= vega10_copy_table_to_smc(hwmgr
,
4678 (uint8_t *)wm_table
, WMTABLE
);
4679 PP_ASSERT_WITH_CODE(result
, "Failed to update WMTABLE!", return EINVAL
);
4680 data
->water_marks_bitmap
|= WaterMarksLoaded
;
4683 if (data
->water_marks_bitmap
& WaterMarksLoaded
) {
4684 cgs_get_active_displays_info(hwmgr
->device
, &info
);
4685 num_turned_on_displays
= info
.display_count
;
4686 smum_send_msg_to_smc_with_parameter(hwmgr
,
4687 PPSMC_MSG_NumOfDisplays
, num_turned_on_displays
);
4693 int vega10_enable_disable_uvd_dpm(struct pp_hwmgr
*hwmgr
, bool enable
)
4695 struct vega10_hwmgr
*data
=
4696 (struct vega10_hwmgr
*)(hwmgr
->backend
);
4698 if (data
->smu_features
[GNLD_DPM_UVD
].supported
) {
4699 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr
,
4701 data
->smu_features
[GNLD_DPM_UVD
].smu_feature_bitmap
),
4702 "Attempt to Enable/Disable DPM UVD Failed!",
4704 data
->smu_features
[GNLD_DPM_UVD
].enabled
= enable
;
4709 static void vega10_power_gate_vce(struct pp_hwmgr
*hwmgr
, bool bgate
)
4711 struct vega10_hwmgr
*data
= (struct vega10_hwmgr
*)(hwmgr
->backend
);
4713 data
->vce_power_gated
= bgate
;
4714 vega10_enable_disable_vce_dpm(hwmgr
, !bgate
);
4717 static void vega10_power_gate_uvd(struct pp_hwmgr
*hwmgr
, bool bgate
)
4719 struct vega10_hwmgr
*data
= (struct vega10_hwmgr
*)(hwmgr
->backend
);
4721 data
->uvd_power_gated
= bgate
;
4722 vega10_enable_disable_uvd_dpm(hwmgr
, !bgate
);
4725 static inline bool vega10_are_power_levels_equal(
4726 const struct vega10_performance_level
*pl1
,
4727 const struct vega10_performance_level
*pl2
)
4729 return ((pl1
->soc_clock
== pl2
->soc_clock
) &&
4730 (pl1
->gfx_clock
== pl2
->gfx_clock
) &&
4731 (pl1
->mem_clock
== pl2
->mem_clock
));
4734 static int vega10_check_states_equal(struct pp_hwmgr
*hwmgr
,
4735 const struct pp_hw_power_state
*pstate1
,
4736 const struct pp_hw_power_state
*pstate2
, bool *equal
)
4738 const struct vega10_power_state
*psa
;
4739 const struct vega10_power_state
*psb
;
4742 if (pstate1
== NULL
|| pstate2
== NULL
|| equal
== NULL
)
4745 psa
= cast_const_phw_vega10_power_state(pstate1
);
4746 psb
= cast_const_phw_vega10_power_state(pstate2
);
4747 /* If the two states don't even have the same number of performance levels they cannot be the same state. */
4748 if (psa
->performance_level_count
!= psb
->performance_level_count
) {
4753 for (i
= 0; i
< psa
->performance_level_count
; i
++) {
4754 if (!vega10_are_power_levels_equal(&(psa
->performance_levels
[i
]), &(psb
->performance_levels
[i
]))) {
4755 /* If we have found even one performance level pair that is different the states are different. */
4761 /* If all performance levels are the same try to use the UVD clocks to break the tie.*/
4762 *equal
= ((psa
->uvd_clks
.vclk
== psb
->uvd_clks
.vclk
) && (psa
->uvd_clks
.dclk
== psb
->uvd_clks
.dclk
));
4763 *equal
&= ((psa
->vce_clks
.evclk
== psb
->vce_clks
.evclk
) && (psa
->vce_clks
.ecclk
== psb
->vce_clks
.ecclk
));
4764 *equal
&= (psa
->sclk_threshold
== psb
->sclk_threshold
);
4770 vega10_check_smc_update_required_for_display_configuration(struct pp_hwmgr
*hwmgr
)
4772 struct vega10_hwmgr
*data
= (struct vega10_hwmgr
*)(hwmgr
->backend
);
4773 bool is_update_required
= false;
4774 struct cgs_display_info info
= {0, 0, NULL
};
4776 cgs_get_active_displays_info(hwmgr
->device
, &info
);
4778 if (data
->display_timing
.num_existing_displays
!= info
.display_count
)
4779 is_update_required
= true;
4781 if (PP_CAP(PHM_PlatformCaps_SclkDeepSleep
)) {
4782 if (data
->display_timing
.min_clock_in_sr
!= hwmgr
->display_config
.min_core_set_clock_in_sr
)
4783 is_update_required
= true;
4786 return is_update_required
;
4789 static int vega10_disable_dpm_tasks(struct pp_hwmgr
*hwmgr
)
4791 int tmp_result
, result
= 0;
4793 tmp_result
= (vega10_is_dpm_running(hwmgr
)) ? 0 : -1;
4794 PP_ASSERT_WITH_CODE(tmp_result
== 0,
4795 "DPM is not running right now, no need to disable DPM!",
4798 if (PP_CAP(PHM_PlatformCaps_ThermalController
))
4799 vega10_disable_thermal_protection(hwmgr
);
4801 tmp_result
= vega10_disable_power_containment(hwmgr
);
4802 PP_ASSERT_WITH_CODE((tmp_result
== 0),
4803 "Failed to disable power containment!", result
= tmp_result
);
4805 tmp_result
= vega10_disable_didt_config(hwmgr
);
4806 PP_ASSERT_WITH_CODE((tmp_result
== 0),
4807 "Failed to disable didt config!", result
= tmp_result
);
4809 tmp_result
= vega10_avfs_enable(hwmgr
, false);
4810 PP_ASSERT_WITH_CODE((tmp_result
== 0),
4811 "Failed to disable AVFS!", result
= tmp_result
);
4813 tmp_result
= vega10_stop_dpm(hwmgr
, SMC_DPM_FEATURES
);
4814 PP_ASSERT_WITH_CODE((tmp_result
== 0),
4815 "Failed to stop DPM!", result
= tmp_result
);
4817 tmp_result
= vega10_disable_deep_sleep_master_switch(hwmgr
);
4818 PP_ASSERT_WITH_CODE((tmp_result
== 0),
4819 "Failed to disable deep sleep!", result
= tmp_result
);
4821 tmp_result
= vega10_disable_ulv(hwmgr
);
4822 PP_ASSERT_WITH_CODE((tmp_result
== 0),
4823 "Failed to disable ulv!", result
= tmp_result
);
4825 tmp_result
= vega10_acg_disable(hwmgr
);
4826 PP_ASSERT_WITH_CODE((tmp_result
== 0),
4827 "Failed to disable acg!", result
= tmp_result
);
4831 static int vega10_power_off_asic(struct pp_hwmgr
*hwmgr
)
4833 struct vega10_hwmgr
*data
= (struct vega10_hwmgr
*)(hwmgr
->backend
);
4836 result
= vega10_disable_dpm_tasks(hwmgr
);
4837 PP_ASSERT_WITH_CODE((0 == result
),
4838 "[disable_dpm_tasks] Failed to disable DPM!",
4840 data
->water_marks_bitmap
&= ~(WaterMarksLoaded
);
4845 static void vega10_find_min_clock_index(struct pp_hwmgr
*hwmgr
,
4846 uint32_t *sclk_idx
, uint32_t *mclk_idx
,
4847 uint32_t min_sclk
, uint32_t min_mclk
)
4849 struct vega10_hwmgr
*data
= (struct vega10_hwmgr
*)(hwmgr
->backend
);
4850 struct vega10_dpm_table
*dpm_table
= &(data
->dpm_table
);
4853 for (i
= 0; i
< dpm_table
->gfx_table
.count
; i
++) {
4854 if (dpm_table
->gfx_table
.dpm_levels
[i
].enabled
&&
4855 dpm_table
->gfx_table
.dpm_levels
[i
].value
>= min_sclk
) {
4861 for (i
= 0; i
< dpm_table
->mem_table
.count
; i
++) {
4862 if (dpm_table
->mem_table
.dpm_levels
[i
].enabled
&&
4863 dpm_table
->mem_table
.dpm_levels
[i
].value
>= min_mclk
) {
4870 static int vega10_set_power_profile_state(struct pp_hwmgr
*hwmgr
,
4871 struct amd_pp_profile
*request
)
4873 struct vega10_hwmgr
*data
= (struct vega10_hwmgr
*)(hwmgr
->backend
);
4874 uint32_t sclk_idx
= ~0, mclk_idx
= ~0;
4876 if (hwmgr
->dpm_level
!= AMD_DPM_FORCED_LEVEL_AUTO
)
4879 vega10_find_min_clock_index(hwmgr
, &sclk_idx
, &mclk_idx
,
4880 request
->min_sclk
, request
->min_mclk
);
4882 if (sclk_idx
!= ~0) {
4883 if (!data
->registry_data
.sclk_dpm_key_disabled
)
4884 PP_ASSERT_WITH_CODE(
4885 !smum_send_msg_to_smc_with_parameter(
4887 PPSMC_MSG_SetSoftMinGfxclkByIndex
,
4889 "Failed to set soft min sclk index!",
4893 if (mclk_idx
!= ~0) {
4894 if (!data
->registry_data
.mclk_dpm_key_disabled
)
4895 PP_ASSERT_WITH_CODE(
4896 !smum_send_msg_to_smc_with_parameter(
4898 PPSMC_MSG_SetSoftMinUclkByIndex
,
4900 "Failed to set soft min mclk index!",
4907 static int vega10_get_sclk_od(struct pp_hwmgr
*hwmgr
)
4909 struct vega10_hwmgr
*data
= (struct vega10_hwmgr
*)(hwmgr
->backend
);
4910 struct vega10_single_dpm_table
*sclk_table
= &(data
->dpm_table
.gfx_table
);
4911 struct vega10_single_dpm_table
*golden_sclk_table
=
4912 &(data
->golden_dpm_table
.gfx_table
);
4915 value
= (sclk_table
->dpm_levels
[sclk_table
->count
- 1].value
-
4916 golden_sclk_table
->dpm_levels
4917 [golden_sclk_table
->count
- 1].value
) *
4919 golden_sclk_table
->dpm_levels
4920 [golden_sclk_table
->count
- 1].value
;
4925 static int vega10_set_sclk_od(struct pp_hwmgr
*hwmgr
, uint32_t value
)
4927 struct vega10_hwmgr
*data
= (struct vega10_hwmgr
*)(hwmgr
->backend
);
4928 struct vega10_single_dpm_table
*golden_sclk_table
=
4929 &(data
->golden_dpm_table
.gfx_table
);
4930 struct pp_power_state
*ps
;
4931 struct vega10_power_state
*vega10_ps
;
4933 ps
= hwmgr
->request_ps
;
4938 vega10_ps
= cast_phw_vega10_power_state(&ps
->hardware
);
4940 vega10_ps
->performance_levels
4941 [vega10_ps
->performance_level_count
- 1].gfx_clock
=
4942 golden_sclk_table
->dpm_levels
4943 [golden_sclk_table
->count
- 1].value
*
4945 golden_sclk_table
->dpm_levels
4946 [golden_sclk_table
->count
- 1].value
;
4948 if (vega10_ps
->performance_levels
4949 [vega10_ps
->performance_level_count
- 1].gfx_clock
>
4950 hwmgr
->platform_descriptor
.overdriveLimit
.engineClock
)
4951 vega10_ps
->performance_levels
4952 [vega10_ps
->performance_level_count
- 1].gfx_clock
=
4953 hwmgr
->platform_descriptor
.overdriveLimit
.engineClock
;
4958 static int vega10_get_mclk_od(struct pp_hwmgr
*hwmgr
)
4960 struct vega10_hwmgr
*data
= (struct vega10_hwmgr
*)(hwmgr
->backend
);
4961 struct vega10_single_dpm_table
*mclk_table
= &(data
->dpm_table
.mem_table
);
4962 struct vega10_single_dpm_table
*golden_mclk_table
=
4963 &(data
->golden_dpm_table
.mem_table
);
4966 value
= (mclk_table
->dpm_levels
4967 [mclk_table
->count
- 1].value
-
4968 golden_mclk_table
->dpm_levels
4969 [golden_mclk_table
->count
- 1].value
) *
4971 golden_mclk_table
->dpm_levels
4972 [golden_mclk_table
->count
- 1].value
;
4977 static int vega10_set_mclk_od(struct pp_hwmgr
*hwmgr
, uint32_t value
)
4979 struct vega10_hwmgr
*data
= (struct vega10_hwmgr
*)(hwmgr
->backend
);
4980 struct vega10_single_dpm_table
*golden_mclk_table
=
4981 &(data
->golden_dpm_table
.mem_table
);
4982 struct pp_power_state
*ps
;
4983 struct vega10_power_state
*vega10_ps
;
4985 ps
= hwmgr
->request_ps
;
4990 vega10_ps
= cast_phw_vega10_power_state(&ps
->hardware
);
4992 vega10_ps
->performance_levels
4993 [vega10_ps
->performance_level_count
- 1].mem_clock
=
4994 golden_mclk_table
->dpm_levels
4995 [golden_mclk_table
->count
- 1].value
*
4997 golden_mclk_table
->dpm_levels
4998 [golden_mclk_table
->count
- 1].value
;
5000 if (vega10_ps
->performance_levels
5001 [vega10_ps
->performance_level_count
- 1].mem_clock
>
5002 hwmgr
->platform_descriptor
.overdriveLimit
.memoryClock
)
5003 vega10_ps
->performance_levels
5004 [vega10_ps
->performance_level_count
- 1].mem_clock
=
5005 hwmgr
->platform_descriptor
.overdriveLimit
.memoryClock
;
5010 static int vega10_notify_cac_buffer_info(struct pp_hwmgr
*hwmgr
,
5011 uint32_t virtual_addr_low
,
5012 uint32_t virtual_addr_hi
,
5013 uint32_t mc_addr_low
,
5014 uint32_t mc_addr_hi
,
5017 smum_send_msg_to_smc_with_parameter(hwmgr
,
5018 PPSMC_MSG_SetSystemVirtualDramAddrHigh
,
5020 smum_send_msg_to_smc_with_parameter(hwmgr
,
5021 PPSMC_MSG_SetSystemVirtualDramAddrLow
,
5023 smum_send_msg_to_smc_with_parameter(hwmgr
,
5024 PPSMC_MSG_DramLogSetDramAddrHigh
,
5027 smum_send_msg_to_smc_with_parameter(hwmgr
,
5028 PPSMC_MSG_DramLogSetDramAddrLow
,
5031 smum_send_msg_to_smc_with_parameter(hwmgr
,
5032 PPSMC_MSG_DramLogSetDramSize
,
5037 static int vega10_register_thermal_interrupt(struct pp_hwmgr
*hwmgr
,
5040 struct cgs_irq_src_funcs
*irq_src
=
5041 (struct cgs_irq_src_funcs
*)info
;
5043 if (hwmgr
->thermal_controller
.ucType
==
5044 ATOM_VEGA10_PP_THERMALCONTROLLER_VEGA10
||
5045 hwmgr
->thermal_controller
.ucType
==
5046 ATOM_VEGA10_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL
) {
5047 PP_ASSERT_WITH_CODE(!cgs_add_irq_source(hwmgr
->device
,
5048 0xf, /* AMDGPU_IH_CLIENTID_THM */
5049 0, 0, irq_src
[0].set
, irq_src
[0].handler
, hwmgr
),
5050 "Failed to register high thermal interrupt!",
5052 PP_ASSERT_WITH_CODE(!cgs_add_irq_source(hwmgr
->device
,
5053 0xf, /* AMDGPU_IH_CLIENTID_THM */
5054 1, 0, irq_src
[1].set
, irq_src
[1].handler
, hwmgr
),
5055 "Failed to register low thermal interrupt!",
5059 /* Register CTF(GPIO_19) interrupt */
5060 PP_ASSERT_WITH_CODE(!cgs_add_irq_source(hwmgr
->device
,
5061 0x16, /* AMDGPU_IH_CLIENTID_ROM_SMUIO, */
5062 83, 0, irq_src
[2].set
, irq_src
[2].handler
, hwmgr
),
5063 "Failed to register CTF thermal interrupt!",
5069 static const struct pp_hwmgr_func vega10_hwmgr_funcs
= {
5070 .backend_init
= vega10_hwmgr_backend_init
,
5071 .backend_fini
= vega10_hwmgr_backend_fini
,
5072 .asic_setup
= vega10_setup_asic_task
,
5073 .dynamic_state_management_enable
= vega10_enable_dpm_tasks
,
5074 .dynamic_state_management_disable
= vega10_disable_dpm_tasks
,
5075 .get_num_of_pp_table_entries
=
5076 vega10_get_number_of_powerplay_table_entries
,
5077 .get_power_state_size
= vega10_get_power_state_size
,
5078 .get_pp_table_entry
= vega10_get_pp_table_entry
,
5079 .patch_boot_state
= vega10_patch_boot_state
,
5080 .apply_state_adjust_rules
= vega10_apply_state_adjust_rules
,
5081 .power_state_set
= vega10_set_power_state_tasks
,
5082 .get_sclk
= vega10_dpm_get_sclk
,
5083 .get_mclk
= vega10_dpm_get_mclk
,
5084 .notify_smc_display_config_after_ps_adjustment
=
5085 vega10_notify_smc_display_config_after_ps_adjustment
,
5086 .force_dpm_level
= vega10_dpm_force_dpm_level
,
5087 .get_temperature
= vega10_thermal_get_temperature
,
5088 .stop_thermal_controller
= vega10_thermal_stop_thermal_controller
,
5089 .get_fan_speed_info
= vega10_fan_ctrl_get_fan_speed_info
,
5090 .get_fan_speed_percent
= vega10_fan_ctrl_get_fan_speed_percent
,
5091 .set_fan_speed_percent
= vega10_fan_ctrl_set_fan_speed_percent
,
5092 .reset_fan_speed_to_default
=
5093 vega10_fan_ctrl_reset_fan_speed_to_default
,
5094 .get_fan_speed_rpm
= vega10_fan_ctrl_get_fan_speed_rpm
,
5095 .set_fan_speed_rpm
= vega10_fan_ctrl_set_fan_speed_rpm
,
5096 .uninitialize_thermal_controller
=
5097 vega10_thermal_ctrl_uninitialize_thermal_controller
,
5098 .set_fan_control_mode
= vega10_set_fan_control_mode
,
5099 .get_fan_control_mode
= vega10_get_fan_control_mode
,
5100 .read_sensor
= vega10_read_sensor
,
5101 .get_dal_power_level
= vega10_get_dal_power_level
,
5102 .get_clock_by_type_with_latency
= vega10_get_clock_by_type_with_latency
,
5103 .get_clock_by_type_with_voltage
= vega10_get_clock_by_type_with_voltage
,
5104 .set_watermarks_for_clocks_ranges
= vega10_set_watermarks_for_clocks_ranges
,
5105 .display_clock_voltage_request
= vega10_display_clock_voltage_request
,
5106 .force_clock_level
= vega10_force_clock_level
,
5107 .print_clock_levels
= vega10_print_clock_levels
,
5108 .display_config_changed
= vega10_display_configuration_changed_task
,
5109 .powergate_uvd
= vega10_power_gate_uvd
,
5110 .powergate_vce
= vega10_power_gate_vce
,
5111 .check_states_equal
= vega10_check_states_equal
,
5112 .check_smc_update_required_for_display_configuration
=
5113 vega10_check_smc_update_required_for_display_configuration
,
5114 .power_off_asic
= vega10_power_off_asic
,
5115 .disable_smc_firmware_ctf
= vega10_thermal_disable_alert
,
5116 .set_power_profile_state
= vega10_set_power_profile_state
,
5117 .get_sclk_od
= vega10_get_sclk_od
,
5118 .set_sclk_od
= vega10_set_sclk_od
,
5119 .get_mclk_od
= vega10_get_mclk_od
,
5120 .set_mclk_od
= vega10_set_mclk_od
,
5121 .avfs_control
= vega10_avfs_enable
,
5122 .notify_cac_buffer_info
= vega10_notify_cac_buffer_info
,
5123 .register_internal_thermal_interrupt
= vega10_register_thermal_interrupt
,
5124 .start_thermal_controller
= vega10_start_thermal_controller
,
5127 int vega10_hwmgr_init(struct pp_hwmgr
*hwmgr
)
5129 hwmgr
->hwmgr_func
= &vega10_hwmgr_funcs
;
5130 hwmgr
->pptable_func
= &vega10_pptable_funcs
;