2 * Copyright 2017 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/delay.h>
26 #include <linux/module.h>
27 #include <linux/slab.h>
30 #include "amd_powerplay.h"
31 #include "vega12_smumgr.h"
32 #include "hardwaremanager.h"
33 #include "ppatomfwctrl.h"
34 #include "atomfirmware.h"
35 #include "cgs_common.h"
36 #include "vega12_inc.h"
37 #include "pppcielanes.h"
38 #include "vega12_hwmgr.h"
39 #include "vega12_processpptables.h"
40 #include "vega12_pptable.h"
41 #include "vega12_thermal.h"
42 #include "vega12_ppsmc.h"
44 #include "amd_pcie_helpers.h"
45 #include "ppinterrupt.h"
46 #include "pp_overdriver.h"
47 #include "pp_thermal.h"
48 #include "vega12_baco.h"
51 static int vega12_force_clock_level(struct pp_hwmgr
*hwmgr
,
52 enum pp_clock_type type
, uint32_t mask
);
53 static int vega12_get_clock_ranges(struct pp_hwmgr
*hwmgr
,
58 static void vega12_set_default_registry_data(struct pp_hwmgr
*hwmgr
)
60 struct vega12_hwmgr
*data
=
61 (struct vega12_hwmgr
*)(hwmgr
->backend
);
63 data
->gfxclk_average_alpha
= PPVEGA12_VEGA12GFXCLKAVERAGEALPHA_DFLT
;
64 data
->socclk_average_alpha
= PPVEGA12_VEGA12SOCCLKAVERAGEALPHA_DFLT
;
65 data
->uclk_average_alpha
= PPVEGA12_VEGA12UCLKCLKAVERAGEALPHA_DFLT
;
66 data
->gfx_activity_average_alpha
= PPVEGA12_VEGA12GFXACTIVITYAVERAGEALPHA_DFLT
;
67 data
->lowest_uclk_reserved_for_ulv
= PPVEGA12_VEGA12LOWESTUCLKRESERVEDFORULV_DFLT
;
69 data
->display_voltage_mode
= PPVEGA12_VEGA12DISPLAYVOLTAGEMODE_DFLT
;
70 data
->dcef_clk_quad_eqn_a
= PPREGKEY_VEGA12QUADRATICEQUATION_DFLT
;
71 data
->dcef_clk_quad_eqn_b
= PPREGKEY_VEGA12QUADRATICEQUATION_DFLT
;
72 data
->dcef_clk_quad_eqn_c
= PPREGKEY_VEGA12QUADRATICEQUATION_DFLT
;
73 data
->disp_clk_quad_eqn_a
= PPREGKEY_VEGA12QUADRATICEQUATION_DFLT
;
74 data
->disp_clk_quad_eqn_b
= PPREGKEY_VEGA12QUADRATICEQUATION_DFLT
;
75 data
->disp_clk_quad_eqn_c
= PPREGKEY_VEGA12QUADRATICEQUATION_DFLT
;
76 data
->pixel_clk_quad_eqn_a
= PPREGKEY_VEGA12QUADRATICEQUATION_DFLT
;
77 data
->pixel_clk_quad_eqn_b
= PPREGKEY_VEGA12QUADRATICEQUATION_DFLT
;
78 data
->pixel_clk_quad_eqn_c
= PPREGKEY_VEGA12QUADRATICEQUATION_DFLT
;
79 data
->phy_clk_quad_eqn_a
= PPREGKEY_VEGA12QUADRATICEQUATION_DFLT
;
80 data
->phy_clk_quad_eqn_b
= PPREGKEY_VEGA12QUADRATICEQUATION_DFLT
;
81 data
->phy_clk_quad_eqn_c
= PPREGKEY_VEGA12QUADRATICEQUATION_DFLT
;
83 data
->registry_data
.disallowed_features
= 0x0;
84 data
->registry_data
.od_state_in_dc_support
= 0;
85 data
->registry_data
.thermal_support
= 1;
86 data
->registry_data
.skip_baco_hardware
= 0;
88 data
->registry_data
.log_avfs_param
= 0;
89 data
->registry_data
.sclk_throttle_low_notification
= 1;
90 data
->registry_data
.force_dpm_high
= 0;
91 data
->registry_data
.stable_pstate_sclk_dpm_percentage
= 75;
93 data
->registry_data
.didt_support
= 0;
94 if (data
->registry_data
.didt_support
) {
95 data
->registry_data
.didt_mode
= 6;
96 data
->registry_data
.sq_ramping_support
= 1;
97 data
->registry_data
.db_ramping_support
= 0;
98 data
->registry_data
.td_ramping_support
= 0;
99 data
->registry_data
.tcp_ramping_support
= 0;
100 data
->registry_data
.dbr_ramping_support
= 0;
101 data
->registry_data
.edc_didt_support
= 1;
102 data
->registry_data
.gc_didt_support
= 0;
103 data
->registry_data
.psm_didt_support
= 0;
106 data
->registry_data
.pcie_lane_override
= 0xff;
107 data
->registry_data
.pcie_speed_override
= 0xff;
108 data
->registry_data
.pcie_clock_override
= 0xffffffff;
109 data
->registry_data
.regulator_hot_gpio_support
= 1;
110 data
->registry_data
.ac_dc_switch_gpio_support
= 0;
111 data
->registry_data
.quick_transition_support
= 0;
112 data
->registry_data
.zrpm_start_temp
= 0xffff;
113 data
->registry_data
.zrpm_stop_temp
= 0xffff;
114 data
->registry_data
.odn_feature_enable
= 1;
115 data
->registry_data
.disable_water_mark
= 0;
116 data
->registry_data
.disable_pp_tuning
= 0;
117 data
->registry_data
.disable_xlpp_tuning
= 0;
118 data
->registry_data
.disable_workload_policy
= 0;
119 data
->registry_data
.perf_ui_tuning_profile_turbo
= 0x19190F0F;
120 data
->registry_data
.perf_ui_tuning_profile_powerSave
= 0x19191919;
121 data
->registry_data
.perf_ui_tuning_profile_xl
= 0x00000F0A;
122 data
->registry_data
.force_workload_policy_mask
= 0;
123 data
->registry_data
.disable_3d_fs_detection
= 0;
124 data
->registry_data
.fps_support
= 1;
125 data
->registry_data
.disable_auto_wattman
= 1;
126 data
->registry_data
.auto_wattman_debug
= 0;
127 data
->registry_data
.auto_wattman_sample_period
= 100;
128 data
->registry_data
.auto_wattman_threshold
= 50;
131 static int vega12_set_features_platform_caps(struct pp_hwmgr
*hwmgr
)
133 struct vega12_hwmgr
*data
=
134 (struct vega12_hwmgr
*)(hwmgr
->backend
);
135 struct amdgpu_device
*adev
= hwmgr
->adev
;
137 if (data
->vddci_control
== VEGA12_VOLTAGE_CONTROL_NONE
)
138 phm_cap_unset(hwmgr
->platform_descriptor
.platformCaps
,
139 PHM_PlatformCaps_ControlVDDCI
);
141 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
142 PHM_PlatformCaps_TablelessHardwareInterface
);
144 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
145 PHM_PlatformCaps_EnableSMU7ThermalManagement
);
147 if (adev
->pg_flags
& AMD_PG_SUPPORT_UVD
) {
148 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
149 PHM_PlatformCaps_UVDPowerGating
);
150 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
151 PHM_PlatformCaps_UVDDynamicPowerGating
);
154 if (adev
->pg_flags
& AMD_PG_SUPPORT_VCE
)
155 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
156 PHM_PlatformCaps_VCEPowerGating
);
158 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
159 PHM_PlatformCaps_UnTabledHardwareInterface
);
161 if (data
->registry_data
.odn_feature_enable
)
162 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
163 PHM_PlatformCaps_ODNinACSupport
);
165 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
166 PHM_PlatformCaps_OD6inACSupport
);
167 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
168 PHM_PlatformCaps_OD6PlusinACSupport
);
171 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
172 PHM_PlatformCaps_ActivityReporting
);
173 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
174 PHM_PlatformCaps_FanSpeedInTableIsRPM
);
176 if (data
->registry_data
.od_state_in_dc_support
) {
177 if (data
->registry_data
.odn_feature_enable
)
178 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
179 PHM_PlatformCaps_ODNinDCSupport
);
181 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
182 PHM_PlatformCaps_OD6inDCSupport
);
183 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
184 PHM_PlatformCaps_OD6PlusinDCSupport
);
188 if (data
->registry_data
.thermal_support
189 && data
->registry_data
.fuzzy_fan_control_support
190 && hwmgr
->thermal_controller
.advanceFanControlParameters
.usTMax
)
191 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
192 PHM_PlatformCaps_ODFuzzyFanControlSupport
);
194 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
195 PHM_PlatformCaps_DynamicPowerManagement
);
196 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
197 PHM_PlatformCaps_SMC
);
198 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
199 PHM_PlatformCaps_ThermalPolicyDelay
);
201 if (data
->registry_data
.force_dpm_high
)
202 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
203 PHM_PlatformCaps_ExclusiveModeAlwaysHigh
);
205 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
206 PHM_PlatformCaps_DynamicUVDState
);
208 if (data
->registry_data
.sclk_throttle_low_notification
)
209 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
210 PHM_PlatformCaps_SclkThrottleLowNotification
);
212 /* power tune caps */
213 /* assume disabled */
214 phm_cap_unset(hwmgr
->platform_descriptor
.platformCaps
,
215 PHM_PlatformCaps_PowerContainment
);
216 phm_cap_unset(hwmgr
->platform_descriptor
.platformCaps
,
217 PHM_PlatformCaps_DiDtSupport
);
218 phm_cap_unset(hwmgr
->platform_descriptor
.platformCaps
,
219 PHM_PlatformCaps_SQRamping
);
220 phm_cap_unset(hwmgr
->platform_descriptor
.platformCaps
,
221 PHM_PlatformCaps_DBRamping
);
222 phm_cap_unset(hwmgr
->platform_descriptor
.platformCaps
,
223 PHM_PlatformCaps_TDRamping
);
224 phm_cap_unset(hwmgr
->platform_descriptor
.platformCaps
,
225 PHM_PlatformCaps_TCPRamping
);
226 phm_cap_unset(hwmgr
->platform_descriptor
.platformCaps
,
227 PHM_PlatformCaps_DBRRamping
);
228 phm_cap_unset(hwmgr
->platform_descriptor
.platformCaps
,
229 PHM_PlatformCaps_DiDtEDCEnable
);
230 phm_cap_unset(hwmgr
->platform_descriptor
.platformCaps
,
231 PHM_PlatformCaps_GCEDC
);
232 phm_cap_unset(hwmgr
->platform_descriptor
.platformCaps
,
233 PHM_PlatformCaps_PSM
);
235 if (data
->registry_data
.didt_support
) {
236 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
, PHM_PlatformCaps_DiDtSupport
);
237 if (data
->registry_data
.sq_ramping_support
)
238 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
, PHM_PlatformCaps_SQRamping
);
239 if (data
->registry_data
.db_ramping_support
)
240 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
, PHM_PlatformCaps_DBRamping
);
241 if (data
->registry_data
.td_ramping_support
)
242 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
, PHM_PlatformCaps_TDRamping
);
243 if (data
->registry_data
.tcp_ramping_support
)
244 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
, PHM_PlatformCaps_TCPRamping
);
245 if (data
->registry_data
.dbr_ramping_support
)
246 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
, PHM_PlatformCaps_DBRRamping
);
247 if (data
->registry_data
.edc_didt_support
)
248 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
, PHM_PlatformCaps_DiDtEDCEnable
);
249 if (data
->registry_data
.gc_didt_support
)
250 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
, PHM_PlatformCaps_GCEDC
);
251 if (data
->registry_data
.psm_didt_support
)
252 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
, PHM_PlatformCaps_PSM
);
255 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
256 PHM_PlatformCaps_RegulatorHot
);
258 if (data
->registry_data
.ac_dc_switch_gpio_support
) {
259 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
260 PHM_PlatformCaps_AutomaticDCTransition
);
261 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
262 PHM_PlatformCaps_SMCtoPPLIBAcdcGpioScheme
);
265 if (data
->registry_data
.quick_transition_support
) {
266 phm_cap_unset(hwmgr
->platform_descriptor
.platformCaps
,
267 PHM_PlatformCaps_AutomaticDCTransition
);
268 phm_cap_unset(hwmgr
->platform_descriptor
.platformCaps
,
269 PHM_PlatformCaps_SMCtoPPLIBAcdcGpioScheme
);
270 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
271 PHM_PlatformCaps_Falcon_QuickTransition
);
274 if (data
->lowest_uclk_reserved_for_ulv
!= PPVEGA12_VEGA12LOWESTUCLKRESERVEDFORULV_DFLT
) {
275 phm_cap_unset(hwmgr
->platform_descriptor
.platformCaps
,
276 PHM_PlatformCaps_LowestUclkReservedForUlv
);
277 if (data
->lowest_uclk_reserved_for_ulv
== 1)
278 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
279 PHM_PlatformCaps_LowestUclkReservedForUlv
);
282 if (data
->registry_data
.custom_fan_support
)
283 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
284 PHM_PlatformCaps_CustomFanControlSupport
);
289 static void vega12_init_dpm_defaults(struct pp_hwmgr
*hwmgr
)
291 struct vega12_hwmgr
*data
= (struct vega12_hwmgr
*)(hwmgr
->backend
);
292 struct amdgpu_device
*adev
= hwmgr
->adev
;
293 uint32_t top32
, bottom32
;
296 data
->smu_features
[GNLD_DPM_PREFETCHER
].smu_feature_id
=
297 FEATURE_DPM_PREFETCHER_BIT
;
298 data
->smu_features
[GNLD_DPM_GFXCLK
].smu_feature_id
=
299 FEATURE_DPM_GFXCLK_BIT
;
300 data
->smu_features
[GNLD_DPM_UCLK
].smu_feature_id
=
301 FEATURE_DPM_UCLK_BIT
;
302 data
->smu_features
[GNLD_DPM_SOCCLK
].smu_feature_id
=
303 FEATURE_DPM_SOCCLK_BIT
;
304 data
->smu_features
[GNLD_DPM_UVD
].smu_feature_id
=
306 data
->smu_features
[GNLD_DPM_VCE
].smu_feature_id
=
308 data
->smu_features
[GNLD_ULV
].smu_feature_id
=
310 data
->smu_features
[GNLD_DPM_MP0CLK
].smu_feature_id
=
311 FEATURE_DPM_MP0CLK_BIT
;
312 data
->smu_features
[GNLD_DPM_LINK
].smu_feature_id
=
313 FEATURE_DPM_LINK_BIT
;
314 data
->smu_features
[GNLD_DPM_DCEFCLK
].smu_feature_id
=
315 FEATURE_DPM_DCEFCLK_BIT
;
316 data
->smu_features
[GNLD_DS_GFXCLK
].smu_feature_id
=
317 FEATURE_DS_GFXCLK_BIT
;
318 data
->smu_features
[GNLD_DS_SOCCLK
].smu_feature_id
=
319 FEATURE_DS_SOCCLK_BIT
;
320 data
->smu_features
[GNLD_DS_LCLK
].smu_feature_id
=
322 data
->smu_features
[GNLD_PPT
].smu_feature_id
=
324 data
->smu_features
[GNLD_TDC
].smu_feature_id
=
326 data
->smu_features
[GNLD_THERMAL
].smu_feature_id
=
328 data
->smu_features
[GNLD_GFX_PER_CU_CG
].smu_feature_id
=
329 FEATURE_GFX_PER_CU_CG_BIT
;
330 data
->smu_features
[GNLD_RM
].smu_feature_id
=
332 data
->smu_features
[GNLD_DS_DCEFCLK
].smu_feature_id
=
333 FEATURE_DS_DCEFCLK_BIT
;
334 data
->smu_features
[GNLD_ACDC
].smu_feature_id
=
336 data
->smu_features
[GNLD_VR0HOT
].smu_feature_id
=
338 data
->smu_features
[GNLD_VR1HOT
].smu_feature_id
=
340 data
->smu_features
[GNLD_FW_CTF
].smu_feature_id
=
342 data
->smu_features
[GNLD_LED_DISPLAY
].smu_feature_id
=
343 FEATURE_LED_DISPLAY_BIT
;
344 data
->smu_features
[GNLD_FAN_CONTROL
].smu_feature_id
=
345 FEATURE_FAN_CONTROL_BIT
;
346 data
->smu_features
[GNLD_DIDT
].smu_feature_id
= FEATURE_GFX_EDC_BIT
;
347 data
->smu_features
[GNLD_GFXOFF
].smu_feature_id
= FEATURE_GFXOFF_BIT
;
348 data
->smu_features
[GNLD_CG
].smu_feature_id
= FEATURE_CG_BIT
;
349 data
->smu_features
[GNLD_ACG
].smu_feature_id
= FEATURE_ACG_BIT
;
351 for (i
= 0; i
< GNLD_FEATURES_MAX
; i
++) {
352 data
->smu_features
[i
].smu_feature_bitmap
=
353 (uint64_t)(1ULL << data
->smu_features
[i
].smu_feature_id
);
354 data
->smu_features
[i
].allowed
=
355 ((data
->registry_data
.disallowed_features
>> i
) & 1) ?
359 /* Get the SN to turn into a Unique ID */
360 smum_send_msg_to_smc(hwmgr
, PPSMC_MSG_ReadSerialNumTop32
);
361 top32
= smum_get_argument(hwmgr
);
362 smum_send_msg_to_smc(hwmgr
, PPSMC_MSG_ReadSerialNumBottom32
);
363 bottom32
= smum_get_argument(hwmgr
);
365 adev
->unique_id
= ((uint64_t)bottom32
<< 32) | top32
;
368 static int vega12_set_private_data_based_on_pptable(struct pp_hwmgr
*hwmgr
)
373 static int vega12_hwmgr_backend_fini(struct pp_hwmgr
*hwmgr
)
375 kfree(hwmgr
->backend
);
376 hwmgr
->backend
= NULL
;
381 static int vega12_hwmgr_backend_init(struct pp_hwmgr
*hwmgr
)
384 struct vega12_hwmgr
*data
;
385 struct amdgpu_device
*adev
= hwmgr
->adev
;
387 data
= kzalloc(sizeof(struct vega12_hwmgr
), GFP_KERNEL
);
391 hwmgr
->backend
= data
;
393 vega12_set_default_registry_data(hwmgr
);
395 data
->disable_dpm_mask
= 0xff;
396 data
->workload_mask
= 0xff;
398 /* need to set voltage control types before EVV patching */
399 data
->vddc_control
= VEGA12_VOLTAGE_CONTROL_NONE
;
400 data
->mvdd_control
= VEGA12_VOLTAGE_CONTROL_NONE
;
401 data
->vddci_control
= VEGA12_VOLTAGE_CONTROL_NONE
;
403 data
->water_marks_bitmap
= 0;
404 data
->avfs_exist
= false;
406 vega12_set_features_platform_caps(hwmgr
);
408 vega12_init_dpm_defaults(hwmgr
);
410 /* Parse pptable data read from VBIOS */
411 vega12_set_private_data_based_on_pptable(hwmgr
);
413 data
->is_tlu_enabled
= false;
415 hwmgr
->platform_descriptor
.hardwareActivityPerformanceLevels
=
416 VEGA12_MAX_HARDWARE_POWERLEVELS
;
417 hwmgr
->platform_descriptor
.hardwarePerformanceLevels
= 2;
418 hwmgr
->platform_descriptor
.minimumClocksReductionPercentage
= 50;
420 hwmgr
->platform_descriptor
.vbiosInterruptId
= 0x20000400; /* IRQ_SOURCE1_SW_INT */
421 /* The true clock step depends on the frequency, typically 4.5 or 9 MHz. Here we use 5. */
422 hwmgr
->platform_descriptor
.clockStep
.engineClock
= 500;
423 hwmgr
->platform_descriptor
.clockStep
.memoryClock
= 500;
425 data
->total_active_cus
= adev
->gfx
.cu_info
.number
;
426 /* Setup default Overdrive Fan control settings */
427 data
->odn_fan_table
.target_fan_speed
=
428 hwmgr
->thermal_controller
.advanceFanControlParameters
.usMaxFanRPM
;
429 data
->odn_fan_table
.target_temperature
=
430 hwmgr
->thermal_controller
.advanceFanControlParameters
.ucTargetTemperature
;
431 data
->odn_fan_table
.min_performance_clock
=
432 hwmgr
->thermal_controller
.advanceFanControlParameters
.ulMinFanSCLKAcousticLimit
;
433 data
->odn_fan_table
.min_fan_limit
=
434 hwmgr
->thermal_controller
.advanceFanControlParameters
.usFanPWMMinLimit
*
435 hwmgr
->thermal_controller
.fanInfo
.ulMaxRPM
/ 100;
437 if (hwmgr
->feature_mask
& PP_GFXOFF_MASK
)
438 data
->gfxoff_controlled_by_driver
= true;
440 data
->gfxoff_controlled_by_driver
= false;
445 static int vega12_init_sclk_threshold(struct pp_hwmgr
*hwmgr
)
447 struct vega12_hwmgr
*data
=
448 (struct vega12_hwmgr
*)(hwmgr
->backend
);
450 data
->low_sclk_interrupt_threshold
= 0;
455 static int vega12_setup_asic_task(struct pp_hwmgr
*hwmgr
)
457 PP_ASSERT_WITH_CODE(!vega12_init_sclk_threshold(hwmgr
),
458 "Failed to init sclk threshold!",
465 * @fn vega12_init_dpm_state
466 * @brief Function to initialize all Soft Min/Max and Hard Min/Max to 0xff.
468 * @param dpm_state - the address of the DPM Table to initiailize.
471 static void vega12_init_dpm_state(struct vega12_dpm_state
*dpm_state
)
473 dpm_state
->soft_min_level
= 0x0;
474 dpm_state
->soft_max_level
= 0xffff;
475 dpm_state
->hard_min_level
= 0x0;
476 dpm_state
->hard_max_level
= 0xffff;
479 static int vega12_get_number_of_dpm_level(struct pp_hwmgr
*hwmgr
,
480 PPCLK_e clk_id
, uint32_t *num_of_levels
)
484 ret
= smum_send_msg_to_smc_with_parameter(hwmgr
,
485 PPSMC_MSG_GetDpmFreqByIndex
,
486 (clk_id
<< 16 | 0xFF));
487 PP_ASSERT_WITH_CODE(!ret
,
488 "[GetNumOfDpmLevel] failed to get dpm levels!",
491 *num_of_levels
= smum_get_argument(hwmgr
);
492 PP_ASSERT_WITH_CODE(*num_of_levels
> 0,
493 "[GetNumOfDpmLevel] number of clk levels is invalid!",
499 static int vega12_get_dpm_frequency_by_index(struct pp_hwmgr
*hwmgr
,
500 PPCLK_e clkID
, uint32_t index
, uint32_t *clock
)
505 *SMU expects the Clock ID to be in the top 16 bits.
506 *Lower 16 bits specify the level
508 PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr
,
509 PPSMC_MSG_GetDpmFreqByIndex
, (clkID
<< 16 | index
)) == 0,
510 "[GetDpmFrequencyByIndex] Failed to get dpm frequency from SMU!",
513 *clock
= smum_get_argument(hwmgr
);
518 static int vega12_setup_single_dpm_table(struct pp_hwmgr
*hwmgr
,
519 struct vega12_single_dpm_table
*dpm_table
, PPCLK_e clk_id
)
522 uint32_t i
, num_of_levels
, clk
;
524 ret
= vega12_get_number_of_dpm_level(hwmgr
, clk_id
, &num_of_levels
);
525 PP_ASSERT_WITH_CODE(!ret
,
526 "[SetupSingleDpmTable] failed to get clk levels!",
529 dpm_table
->count
= num_of_levels
;
531 for (i
= 0; i
< num_of_levels
; i
++) {
532 ret
= vega12_get_dpm_frequency_by_index(hwmgr
, clk_id
, i
, &clk
);
533 PP_ASSERT_WITH_CODE(!ret
,
534 "[SetupSingleDpmTable] failed to get clk of specific level!",
536 dpm_table
->dpm_levels
[i
].value
= clk
;
537 dpm_table
->dpm_levels
[i
].enabled
= true;
544 * This function is to initialize all DPM state tables
545 * for SMU based on the dependency table.
546 * Dynamic state patching function will then trim these
547 * state tables to the allowed range based
548 * on the power policy or external client requests,
549 * such as UVD request, etc.
551 static int vega12_setup_default_dpm_tables(struct pp_hwmgr
*hwmgr
)
554 struct vega12_hwmgr
*data
=
555 (struct vega12_hwmgr
*)(hwmgr
->backend
);
556 struct vega12_single_dpm_table
*dpm_table
;
559 memset(&data
->dpm_table
, 0, sizeof(data
->dpm_table
));
562 dpm_table
= &(data
->dpm_table
.soc_table
);
563 if (data
->smu_features
[GNLD_DPM_SOCCLK
].enabled
) {
564 ret
= vega12_setup_single_dpm_table(hwmgr
, dpm_table
, PPCLK_SOCCLK
);
565 PP_ASSERT_WITH_CODE(!ret
,
566 "[SetupDefaultDpmTable] failed to get socclk dpm levels!",
569 dpm_table
->count
= 1;
570 dpm_table
->dpm_levels
[0].value
= data
->vbios_boot_state
.soc_clock
/ 100;
572 vega12_init_dpm_state(&(dpm_table
->dpm_state
));
575 dpm_table
= &(data
->dpm_table
.gfx_table
);
576 if (data
->smu_features
[GNLD_DPM_GFXCLK
].enabled
) {
577 ret
= vega12_setup_single_dpm_table(hwmgr
, dpm_table
, PPCLK_GFXCLK
);
578 PP_ASSERT_WITH_CODE(!ret
,
579 "[SetupDefaultDpmTable] failed to get gfxclk dpm levels!",
582 dpm_table
->count
= 1;
583 dpm_table
->dpm_levels
[0].value
= data
->vbios_boot_state
.gfx_clock
/ 100;
585 vega12_init_dpm_state(&(dpm_table
->dpm_state
));
588 dpm_table
= &(data
->dpm_table
.mem_table
);
589 if (data
->smu_features
[GNLD_DPM_UCLK
].enabled
) {
590 ret
= vega12_setup_single_dpm_table(hwmgr
, dpm_table
, PPCLK_UCLK
);
591 PP_ASSERT_WITH_CODE(!ret
,
592 "[SetupDefaultDpmTable] failed to get memclk dpm levels!",
595 dpm_table
->count
= 1;
596 dpm_table
->dpm_levels
[0].value
= data
->vbios_boot_state
.mem_clock
/ 100;
598 vega12_init_dpm_state(&(dpm_table
->dpm_state
));
601 dpm_table
= &(data
->dpm_table
.eclk_table
);
602 if (data
->smu_features
[GNLD_DPM_VCE
].enabled
) {
603 ret
= vega12_setup_single_dpm_table(hwmgr
, dpm_table
, PPCLK_ECLK
);
604 PP_ASSERT_WITH_CODE(!ret
,
605 "[SetupDefaultDpmTable] failed to get eclk dpm levels!",
608 dpm_table
->count
= 1;
609 dpm_table
->dpm_levels
[0].value
= data
->vbios_boot_state
.eclock
/ 100;
611 vega12_init_dpm_state(&(dpm_table
->dpm_state
));
614 dpm_table
= &(data
->dpm_table
.vclk_table
);
615 if (data
->smu_features
[GNLD_DPM_UVD
].enabled
) {
616 ret
= vega12_setup_single_dpm_table(hwmgr
, dpm_table
, PPCLK_VCLK
);
617 PP_ASSERT_WITH_CODE(!ret
,
618 "[SetupDefaultDpmTable] failed to get vclk dpm levels!",
621 dpm_table
->count
= 1;
622 dpm_table
->dpm_levels
[0].value
= data
->vbios_boot_state
.vclock
/ 100;
624 vega12_init_dpm_state(&(dpm_table
->dpm_state
));
627 dpm_table
= &(data
->dpm_table
.dclk_table
);
628 if (data
->smu_features
[GNLD_DPM_UVD
].enabled
) {
629 ret
= vega12_setup_single_dpm_table(hwmgr
, dpm_table
, PPCLK_DCLK
);
630 PP_ASSERT_WITH_CODE(!ret
,
631 "[SetupDefaultDpmTable] failed to get dclk dpm levels!",
634 dpm_table
->count
= 1;
635 dpm_table
->dpm_levels
[0].value
= data
->vbios_boot_state
.dclock
/ 100;
637 vega12_init_dpm_state(&(dpm_table
->dpm_state
));
640 dpm_table
= &(data
->dpm_table
.dcef_table
);
641 if (data
->smu_features
[GNLD_DPM_DCEFCLK
].enabled
) {
642 ret
= vega12_setup_single_dpm_table(hwmgr
, dpm_table
, PPCLK_DCEFCLK
);
643 PP_ASSERT_WITH_CODE(!ret
,
644 "[SetupDefaultDpmTable] failed to get dcefclk dpm levels!",
647 dpm_table
->count
= 1;
648 dpm_table
->dpm_levels
[0].value
= data
->vbios_boot_state
.dcef_clock
/ 100;
650 vega12_init_dpm_state(&(dpm_table
->dpm_state
));
653 dpm_table
= &(data
->dpm_table
.pixel_table
);
654 if (data
->smu_features
[GNLD_DPM_DCEFCLK
].enabled
) {
655 ret
= vega12_setup_single_dpm_table(hwmgr
, dpm_table
, PPCLK_PIXCLK
);
656 PP_ASSERT_WITH_CODE(!ret
,
657 "[SetupDefaultDpmTable] failed to get pixclk dpm levels!",
660 dpm_table
->count
= 0;
661 vega12_init_dpm_state(&(dpm_table
->dpm_state
));
664 dpm_table
= &(data
->dpm_table
.display_table
);
665 if (data
->smu_features
[GNLD_DPM_DCEFCLK
].enabled
) {
666 ret
= vega12_setup_single_dpm_table(hwmgr
, dpm_table
, PPCLK_DISPCLK
);
667 PP_ASSERT_WITH_CODE(!ret
,
668 "[SetupDefaultDpmTable] failed to get dispclk dpm levels!",
671 dpm_table
->count
= 0;
672 vega12_init_dpm_state(&(dpm_table
->dpm_state
));
675 dpm_table
= &(data
->dpm_table
.phy_table
);
676 if (data
->smu_features
[GNLD_DPM_DCEFCLK
].enabled
) {
677 ret
= vega12_setup_single_dpm_table(hwmgr
, dpm_table
, PPCLK_PHYCLK
);
678 PP_ASSERT_WITH_CODE(!ret
,
679 "[SetupDefaultDpmTable] failed to get phyclk dpm levels!",
682 dpm_table
->count
= 0;
683 vega12_init_dpm_state(&(dpm_table
->dpm_state
));
685 /* save a copy of the default DPM table */
686 memcpy(&(data
->golden_dpm_table
), &(data
->dpm_table
),
687 sizeof(struct vega12_dpm_table
));
693 static int vega12_save_default_power_profile(struct pp_hwmgr
*hwmgr
)
695 struct vega12_hwmgr
*data
= (struct vega12_hwmgr
*)(hwmgr
->backend
);
696 struct vega12_single_dpm_table
*dpm_table
= &(data
->dpm_table
.gfx_table
);
699 hwmgr
->default_gfx_power_profile
.type
= AMD_PP_GFX_PROFILE
;
700 hwmgr
->default_compute_power_profile
.type
= AMD_PP_COMPUTE_PROFILE
;
702 /* Optimize compute power profile: Use only highest
703 * 2 power levels (if more than 2 are available)
705 if (dpm_table
->count
> 2)
706 min_level
= dpm_table
->count
- 2;
707 else if (dpm_table
->count
== 2)
712 hwmgr
->default_compute_power_profile
.min_sclk
=
713 dpm_table
->dpm_levels
[min_level
].value
;
715 hwmgr
->gfx_power_profile
= hwmgr
->default_gfx_power_profile
;
716 hwmgr
->compute_power_profile
= hwmgr
->default_compute_power_profile
;
723 * Initializes the SMC table and uploads it
725 * @param hwmgr the address of the powerplay hardware manager.
726 * @param pInput the pointer to input data (PowerState)
729 static int vega12_init_smc_table(struct pp_hwmgr
*hwmgr
)
732 struct vega12_hwmgr
*data
=
733 (struct vega12_hwmgr
*)(hwmgr
->backend
);
734 PPTable_t
*pp_table
= &(data
->smc_state_table
.pp_table
);
735 struct pp_atomfwctrl_bios_boot_up_values boot_up_values
;
736 struct phm_ppt_v3_information
*pptable_information
=
737 (struct phm_ppt_v3_information
*)hwmgr
->pptable
;
739 result
= pp_atomfwctrl_get_vbios_bootup_values(hwmgr
, &boot_up_values
);
741 data
->vbios_boot_state
.vddc
= boot_up_values
.usVddc
;
742 data
->vbios_boot_state
.vddci
= boot_up_values
.usVddci
;
743 data
->vbios_boot_state
.mvddc
= boot_up_values
.usMvddc
;
744 data
->vbios_boot_state
.gfx_clock
= boot_up_values
.ulGfxClk
;
745 data
->vbios_boot_state
.mem_clock
= boot_up_values
.ulUClk
;
746 data
->vbios_boot_state
.soc_clock
= boot_up_values
.ulSocClk
;
747 data
->vbios_boot_state
.dcef_clock
= boot_up_values
.ulDCEFClk
;
748 data
->vbios_boot_state
.uc_cooling_id
= boot_up_values
.ucCoolingID
;
749 data
->vbios_boot_state
.eclock
= boot_up_values
.ulEClk
;
750 data
->vbios_boot_state
.dclock
= boot_up_values
.ulDClk
;
751 data
->vbios_boot_state
.vclock
= boot_up_values
.ulVClk
;
752 smum_send_msg_to_smc_with_parameter(hwmgr
,
753 PPSMC_MSG_SetMinDeepSleepDcefclk
,
754 (uint32_t)(data
->vbios_boot_state
.dcef_clock
/ 100));
757 memcpy(pp_table
, pptable_information
->smc_pptable
, sizeof(PPTable_t
));
759 result
= smum_smc_table_manager(hwmgr
,
760 (uint8_t *)pp_table
, TABLE_PPTABLE
, false);
761 PP_ASSERT_WITH_CODE(!result
,
762 "Failed to upload PPtable!", return result
);
767 static int vega12_run_acg_btc(struct pp_hwmgr
*hwmgr
)
772 smum_send_msg_to_smc(hwmgr
, PPSMC_MSG_RunAcgBtc
) == 0,
773 "[Run_ACG_BTC] Attempt to run ACG BTC failed!",
776 result
= smum_get_argument(hwmgr
);
777 PP_ASSERT_WITH_CODE(result
== 1,
778 "Failed to run ACG BTC!", return -EINVAL
);
783 static int vega12_set_allowed_featuresmask(struct pp_hwmgr
*hwmgr
)
785 struct vega12_hwmgr
*data
=
786 (struct vega12_hwmgr
*)(hwmgr
->backend
);
788 uint32_t allowed_features_low
= 0, allowed_features_high
= 0;
790 for (i
= 0; i
< GNLD_FEATURES_MAX
; i
++)
791 if (data
->smu_features
[i
].allowed
)
792 data
->smu_features
[i
].smu_feature_id
> 31 ?
793 (allowed_features_high
|= ((data
->smu_features
[i
].smu_feature_bitmap
>> SMU_FEATURES_HIGH_SHIFT
) & 0xFFFFFFFF)) :
794 (allowed_features_low
|= ((data
->smu_features
[i
].smu_feature_bitmap
>> SMU_FEATURES_LOW_SHIFT
) & 0xFFFFFFFF));
797 smum_send_msg_to_smc_with_parameter(hwmgr
, PPSMC_MSG_SetAllowedFeaturesMaskHigh
, allowed_features_high
) == 0,
798 "[SetAllowedFeaturesMask] Attempt to set allowed features mask (high) failed!",
802 smum_send_msg_to_smc_with_parameter(hwmgr
, PPSMC_MSG_SetAllowedFeaturesMaskLow
, allowed_features_low
) == 0,
803 "[SetAllowedFeaturesMask] Attempt to set allowed features mask (low) failed!",
809 static void vega12_init_powergate_state(struct pp_hwmgr
*hwmgr
)
811 struct vega12_hwmgr
*data
=
812 (struct vega12_hwmgr
*)(hwmgr
->backend
);
814 data
->uvd_power_gated
= true;
815 data
->vce_power_gated
= true;
817 if (data
->smu_features
[GNLD_DPM_UVD
].enabled
)
818 data
->uvd_power_gated
= false;
820 if (data
->smu_features
[GNLD_DPM_VCE
].enabled
)
821 data
->vce_power_gated
= false;
824 static int vega12_enable_all_smu_features(struct pp_hwmgr
*hwmgr
)
826 struct vega12_hwmgr
*data
=
827 (struct vega12_hwmgr
*)(hwmgr
->backend
);
828 uint64_t features_enabled
;
833 smum_send_msg_to_smc(hwmgr
, PPSMC_MSG_EnableAllSmuFeatures
) == 0,
834 "[EnableAllSMUFeatures] Failed to enable all smu features!",
837 if (vega12_get_enabled_smc_features(hwmgr
, &features_enabled
) == 0) {
838 for (i
= 0; i
< GNLD_FEATURES_MAX
; i
++) {
839 enabled
= (features_enabled
& data
->smu_features
[i
].smu_feature_bitmap
) ? true : false;
840 data
->smu_features
[i
].enabled
= enabled
;
841 data
->smu_features
[i
].supported
= enabled
;
845 vega12_init_powergate_state(hwmgr
);
850 static int vega12_disable_all_smu_features(struct pp_hwmgr
*hwmgr
)
852 struct vega12_hwmgr
*data
=
853 (struct vega12_hwmgr
*)(hwmgr
->backend
);
854 uint64_t features_enabled
;
859 smum_send_msg_to_smc(hwmgr
, PPSMC_MSG_DisableAllSmuFeatures
) == 0,
860 "[DisableAllSMUFeatures] Failed to disable all smu features!",
863 if (vega12_get_enabled_smc_features(hwmgr
, &features_enabled
) == 0) {
864 for (i
= 0; i
< GNLD_FEATURES_MAX
; i
++) {
865 enabled
= (features_enabled
& data
->smu_features
[i
].smu_feature_bitmap
) ? true : false;
866 data
->smu_features
[i
].enabled
= enabled
;
867 data
->smu_features
[i
].supported
= enabled
;
874 static int vega12_odn_initialize_default_settings(
875 struct pp_hwmgr
*hwmgr
)
880 static int vega12_set_overdrive_target_percentage(struct pp_hwmgr
*hwmgr
,
881 uint32_t adjust_percent
)
883 return smum_send_msg_to_smc_with_parameter(hwmgr
,
884 PPSMC_MSG_OverDriveSetPercentage
, adjust_percent
);
887 static int vega12_power_control_set_level(struct pp_hwmgr
*hwmgr
)
889 int adjust_percent
, result
= 0;
891 if (PP_CAP(PHM_PlatformCaps_PowerContainment
)) {
893 hwmgr
->platform_descriptor
.TDPAdjustmentPolarity
?
894 hwmgr
->platform_descriptor
.TDPAdjustment
:
895 (-1 * hwmgr
->platform_descriptor
.TDPAdjustment
);
896 result
= vega12_set_overdrive_target_percentage(hwmgr
,
897 (uint32_t)adjust_percent
);
902 static int vega12_get_all_clock_ranges_helper(struct pp_hwmgr
*hwmgr
,
903 PPCLK_e clkid
, struct vega12_clock_range
*clock
)
907 smum_send_msg_to_smc_with_parameter(hwmgr
, PPSMC_MSG_GetMaxDpmFreq
, (clkid
<< 16)) == 0,
908 "[GetClockRanges] Failed to get max ac clock from SMC!",
910 clock
->ACMax
= smum_get_argument(hwmgr
);
914 smum_send_msg_to_smc_with_parameter(hwmgr
, PPSMC_MSG_GetMinDpmFreq
, (clkid
<< 16)) == 0,
915 "[GetClockRanges] Failed to get min ac clock from SMC!",
917 clock
->ACMin
= smum_get_argument(hwmgr
);
921 smum_send_msg_to_smc_with_parameter(hwmgr
, PPSMC_MSG_GetDcModeMaxDpmFreq
, (clkid
<< 16)) == 0,
922 "[GetClockRanges] Failed to get max dc clock from SMC!",
924 clock
->DCMax
= smum_get_argument(hwmgr
);
929 static int vega12_get_all_clock_ranges(struct pp_hwmgr
*hwmgr
)
931 struct vega12_hwmgr
*data
=
932 (struct vega12_hwmgr
*)(hwmgr
->backend
);
935 for (i
= 0; i
< PPCLK_COUNT
; i
++)
936 PP_ASSERT_WITH_CODE(!vega12_get_all_clock_ranges_helper(hwmgr
,
937 i
, &(data
->clk_range
[i
])),
938 "Failed to get clk range from SMC!",
944 static int vega12_enable_dpm_tasks(struct pp_hwmgr
*hwmgr
)
946 int tmp_result
, result
= 0;
948 smum_send_msg_to_smc_with_parameter(hwmgr
,
949 PPSMC_MSG_NumOfDisplays
, 0);
951 result
= vega12_set_allowed_featuresmask(hwmgr
);
952 PP_ASSERT_WITH_CODE(result
== 0,
953 "[EnableDPMTasks] Failed to set allowed featuresmask!\n",
956 tmp_result
= vega12_init_smc_table(hwmgr
);
957 PP_ASSERT_WITH_CODE(!tmp_result
,
958 "Failed to initialize SMC table!",
959 result
= tmp_result
);
961 tmp_result
= vega12_run_acg_btc(hwmgr
);
962 PP_ASSERT_WITH_CODE(!tmp_result
,
963 "Failed to run ACG BTC!",
964 result
= tmp_result
);
966 result
= vega12_enable_all_smu_features(hwmgr
);
967 PP_ASSERT_WITH_CODE(!result
,
968 "Failed to enable all smu features!",
971 tmp_result
= vega12_power_control_set_level(hwmgr
);
972 PP_ASSERT_WITH_CODE(!tmp_result
,
973 "Failed to power control set level!",
974 result
= tmp_result
);
976 result
= vega12_get_all_clock_ranges(hwmgr
);
977 PP_ASSERT_WITH_CODE(!result
,
978 "Failed to get all clock ranges!",
981 result
= vega12_odn_initialize_default_settings(hwmgr
);
982 PP_ASSERT_WITH_CODE(!result
,
983 "Failed to power control set level!",
986 result
= vega12_setup_default_dpm_tables(hwmgr
);
987 PP_ASSERT_WITH_CODE(!result
,
988 "Failed to setup default DPM tables!",
993 static int vega12_patch_boot_state(struct pp_hwmgr
*hwmgr
,
994 struct pp_hw_power_state
*hw_ps
)
999 static uint32_t vega12_find_lowest_dpm_level(
1000 struct vega12_single_dpm_table
*table
)
1004 for (i
= 0; i
< table
->count
; i
++) {
1005 if (table
->dpm_levels
[i
].enabled
)
1009 if (i
>= table
->count
) {
1011 table
->dpm_levels
[i
].enabled
= true;
1017 static uint32_t vega12_find_highest_dpm_level(
1018 struct vega12_single_dpm_table
*table
)
1021 PP_ASSERT_WITH_CODE(table
->count
<= MAX_REGULAR_DPM_NUMBER
,
1022 "[FindHighestDPMLevel] DPM Table has too many entries!",
1023 return MAX_REGULAR_DPM_NUMBER
- 1);
1025 for (i
= table
->count
- 1; i
>= 0; i
--) {
1026 if (table
->dpm_levels
[i
].enabled
)
1032 table
->dpm_levels
[i
].enabled
= true;
1038 static int vega12_upload_dpm_min_level(struct pp_hwmgr
*hwmgr
)
1040 struct vega12_hwmgr
*data
= hwmgr
->backend
;
1044 if (data
->smu_features
[GNLD_DPM_GFXCLK
].enabled
) {
1045 min_freq
= data
->dpm_table
.gfx_table
.dpm_state
.soft_min_level
;
1046 PP_ASSERT_WITH_CODE(!(ret
= smum_send_msg_to_smc_with_parameter(
1047 hwmgr
, PPSMC_MSG_SetSoftMinByFreq
,
1048 (PPCLK_GFXCLK
<< 16) | (min_freq
& 0xffff))),
1049 "Failed to set soft min gfxclk !",
1053 if (data
->smu_features
[GNLD_DPM_UCLK
].enabled
) {
1054 min_freq
= data
->dpm_table
.mem_table
.dpm_state
.soft_min_level
;
1055 PP_ASSERT_WITH_CODE(!(ret
= smum_send_msg_to_smc_with_parameter(
1056 hwmgr
, PPSMC_MSG_SetSoftMinByFreq
,
1057 (PPCLK_UCLK
<< 16) | (min_freq
& 0xffff))),
1058 "Failed to set soft min memclk !",
1061 min_freq
= data
->dpm_table
.mem_table
.dpm_state
.hard_min_level
;
1062 PP_ASSERT_WITH_CODE(!(ret
= smum_send_msg_to_smc_with_parameter(
1063 hwmgr
, PPSMC_MSG_SetHardMinByFreq
,
1064 (PPCLK_UCLK
<< 16) | (min_freq
& 0xffff))),
1065 "Failed to set hard min memclk !",
1069 if (data
->smu_features
[GNLD_DPM_UVD
].enabled
) {
1070 min_freq
= data
->dpm_table
.vclk_table
.dpm_state
.soft_min_level
;
1072 PP_ASSERT_WITH_CODE(!(ret
= smum_send_msg_to_smc_with_parameter(
1073 hwmgr
, PPSMC_MSG_SetSoftMinByFreq
,
1074 (PPCLK_VCLK
<< 16) | (min_freq
& 0xffff))),
1075 "Failed to set soft min vclk!",
1078 min_freq
= data
->dpm_table
.dclk_table
.dpm_state
.soft_min_level
;
1080 PP_ASSERT_WITH_CODE(!(ret
= smum_send_msg_to_smc_with_parameter(
1081 hwmgr
, PPSMC_MSG_SetSoftMinByFreq
,
1082 (PPCLK_DCLK
<< 16) | (min_freq
& 0xffff))),
1083 "Failed to set soft min dclk!",
1087 if (data
->smu_features
[GNLD_DPM_VCE
].enabled
) {
1088 min_freq
= data
->dpm_table
.eclk_table
.dpm_state
.soft_min_level
;
1090 PP_ASSERT_WITH_CODE(!(ret
= smum_send_msg_to_smc_with_parameter(
1091 hwmgr
, PPSMC_MSG_SetSoftMinByFreq
,
1092 (PPCLK_ECLK
<< 16) | (min_freq
& 0xffff))),
1093 "Failed to set soft min eclk!",
1097 if (data
->smu_features
[GNLD_DPM_SOCCLK
].enabled
) {
1098 min_freq
= data
->dpm_table
.soc_table
.dpm_state
.soft_min_level
;
1100 PP_ASSERT_WITH_CODE(!(ret
= smum_send_msg_to_smc_with_parameter(
1101 hwmgr
, PPSMC_MSG_SetSoftMinByFreq
,
1102 (PPCLK_SOCCLK
<< 16) | (min_freq
& 0xffff))),
1103 "Failed to set soft min socclk!",
1107 if (data
->smu_features
[GNLD_DPM_DCEFCLK
].enabled
) {
1108 min_freq
= data
->dpm_table
.dcef_table
.dpm_state
.hard_min_level
;
1110 PP_ASSERT_WITH_CODE(!(ret
= smum_send_msg_to_smc_with_parameter(
1111 hwmgr
, PPSMC_MSG_SetHardMinByFreq
,
1112 (PPCLK_DCEFCLK
<< 16) | (min_freq
& 0xffff))),
1113 "Failed to set hard min dcefclk!",
1121 static int vega12_upload_dpm_max_level(struct pp_hwmgr
*hwmgr
)
1123 struct vega12_hwmgr
*data
= hwmgr
->backend
;
1127 if (data
->smu_features
[GNLD_DPM_GFXCLK
].enabled
) {
1128 max_freq
= data
->dpm_table
.gfx_table
.dpm_state
.soft_max_level
;
1130 PP_ASSERT_WITH_CODE(!(ret
= smum_send_msg_to_smc_with_parameter(
1131 hwmgr
, PPSMC_MSG_SetSoftMaxByFreq
,
1132 (PPCLK_GFXCLK
<< 16) | (max_freq
& 0xffff))),
1133 "Failed to set soft max gfxclk!",
1137 if (data
->smu_features
[GNLD_DPM_UCLK
].enabled
) {
1138 max_freq
= data
->dpm_table
.mem_table
.dpm_state
.soft_max_level
;
1140 PP_ASSERT_WITH_CODE(!(ret
= smum_send_msg_to_smc_with_parameter(
1141 hwmgr
, PPSMC_MSG_SetSoftMaxByFreq
,
1142 (PPCLK_UCLK
<< 16) | (max_freq
& 0xffff))),
1143 "Failed to set soft max memclk!",
1147 if (data
->smu_features
[GNLD_DPM_UVD
].enabled
) {
1148 max_freq
= data
->dpm_table
.vclk_table
.dpm_state
.soft_max_level
;
1150 PP_ASSERT_WITH_CODE(!(ret
= smum_send_msg_to_smc_with_parameter(
1151 hwmgr
, PPSMC_MSG_SetSoftMaxByFreq
,
1152 (PPCLK_VCLK
<< 16) | (max_freq
& 0xffff))),
1153 "Failed to set soft max vclk!",
1156 max_freq
= data
->dpm_table
.dclk_table
.dpm_state
.soft_max_level
;
1157 PP_ASSERT_WITH_CODE(!(ret
= smum_send_msg_to_smc_with_parameter(
1158 hwmgr
, PPSMC_MSG_SetSoftMaxByFreq
,
1159 (PPCLK_DCLK
<< 16) | (max_freq
& 0xffff))),
1160 "Failed to set soft max dclk!",
1164 if (data
->smu_features
[GNLD_DPM_VCE
].enabled
) {
1165 max_freq
= data
->dpm_table
.eclk_table
.dpm_state
.soft_max_level
;
1167 PP_ASSERT_WITH_CODE(!(ret
= smum_send_msg_to_smc_with_parameter(
1168 hwmgr
, PPSMC_MSG_SetSoftMaxByFreq
,
1169 (PPCLK_ECLK
<< 16) | (max_freq
& 0xffff))),
1170 "Failed to set soft max eclk!",
1174 if (data
->smu_features
[GNLD_DPM_SOCCLK
].enabled
) {
1175 max_freq
= data
->dpm_table
.soc_table
.dpm_state
.soft_max_level
;
1177 PP_ASSERT_WITH_CODE(!(ret
= smum_send_msg_to_smc_with_parameter(
1178 hwmgr
, PPSMC_MSG_SetSoftMaxByFreq
,
1179 (PPCLK_SOCCLK
<< 16) | (max_freq
& 0xffff))),
1180 "Failed to set soft max socclk!",
1187 int vega12_enable_disable_vce_dpm(struct pp_hwmgr
*hwmgr
, bool enable
)
1189 struct vega12_hwmgr
*data
=
1190 (struct vega12_hwmgr
*)(hwmgr
->backend
);
1192 if (data
->smu_features
[GNLD_DPM_VCE
].supported
) {
1193 PP_ASSERT_WITH_CODE(!vega12_enable_smc_features(hwmgr
,
1195 data
->smu_features
[GNLD_DPM_VCE
].smu_feature_bitmap
),
1196 "Attempt to Enable/Disable DPM VCE Failed!",
1198 data
->smu_features
[GNLD_DPM_VCE
].enabled
= enable
;
1204 static uint32_t vega12_dpm_get_sclk(struct pp_hwmgr
*hwmgr
, bool low
)
1206 struct vega12_hwmgr
*data
=
1207 (struct vega12_hwmgr
*)(hwmgr
->backend
);
1210 if (!data
->smu_features
[GNLD_DPM_GFXCLK
].enabled
)
1214 PP_ASSERT_WITH_CODE(
1215 vega12_get_clock_ranges(hwmgr
, &gfx_clk
, PPCLK_GFXCLK
, false) == 0,
1216 "[GetSclks]: fail to get min PPCLK_GFXCLK\n",
1219 PP_ASSERT_WITH_CODE(
1220 vega12_get_clock_ranges(hwmgr
, &gfx_clk
, PPCLK_GFXCLK
, true) == 0,
1221 "[GetSclks]: fail to get max PPCLK_GFXCLK\n",
1224 return (gfx_clk
* 100);
1227 static uint32_t vega12_dpm_get_mclk(struct pp_hwmgr
*hwmgr
, bool low
)
1229 struct vega12_hwmgr
*data
=
1230 (struct vega12_hwmgr
*)(hwmgr
->backend
);
1233 if (!data
->smu_features
[GNLD_DPM_UCLK
].enabled
)
1237 PP_ASSERT_WITH_CODE(
1238 vega12_get_clock_ranges(hwmgr
, &mem_clk
, PPCLK_UCLK
, false) == 0,
1239 "[GetMclks]: fail to get min PPCLK_UCLK\n",
1242 PP_ASSERT_WITH_CODE(
1243 vega12_get_clock_ranges(hwmgr
, &mem_clk
, PPCLK_UCLK
, true) == 0,
1244 "[GetMclks]: fail to get max PPCLK_UCLK\n",
1247 return (mem_clk
* 100);
1250 static int vega12_get_metrics_table(struct pp_hwmgr
*hwmgr
, SmuMetrics_t
*metrics_table
)
1252 struct vega12_hwmgr
*data
=
1253 (struct vega12_hwmgr
*)(hwmgr
->backend
);
1256 if (!data
->metrics_time
|| time_after(jiffies
, data
->metrics_time
+ HZ
/ 2)) {
1257 ret
= smum_smc_table_manager(hwmgr
, (uint8_t *)metrics_table
,
1258 TABLE_SMU_METRICS
, true);
1260 pr_info("Failed to export SMU metrics table!\n");
1263 memcpy(&data
->metrics_table
, metrics_table
, sizeof(SmuMetrics_t
));
1264 data
->metrics_time
= jiffies
;
1266 memcpy(metrics_table
, &data
->metrics_table
, sizeof(SmuMetrics_t
));
1271 static int vega12_get_gpu_power(struct pp_hwmgr
*hwmgr
, uint32_t *query
)
1273 SmuMetrics_t metrics_table
;
1276 ret
= vega12_get_metrics_table(hwmgr
, &metrics_table
);
1280 *query
= metrics_table
.CurrSocketPower
<< 8;
1285 static int vega12_get_current_gfx_clk_freq(struct pp_hwmgr
*hwmgr
, uint32_t *gfx_freq
)
1287 uint32_t gfx_clk
= 0;
1291 PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr
,
1292 PPSMC_MSG_GetDpmClockFreq
, (PPCLK_GFXCLK
<< 16)) == 0,
1293 "[GetCurrentGfxClkFreq] Attempt to get Current GFXCLK Frequency Failed!",
1295 gfx_clk
= smum_get_argument(hwmgr
);
1297 *gfx_freq
= gfx_clk
* 100;
1302 static int vega12_get_current_mclk_freq(struct pp_hwmgr
*hwmgr
, uint32_t *mclk_freq
)
1304 uint32_t mem_clk
= 0;
1308 PP_ASSERT_WITH_CODE(
1309 smum_send_msg_to_smc_with_parameter(hwmgr
, PPSMC_MSG_GetDpmClockFreq
, (PPCLK_UCLK
<< 16)) == 0,
1310 "[GetCurrentMClkFreq] Attempt to get Current MCLK Frequency Failed!",
1312 mem_clk
= smum_get_argument(hwmgr
);
1314 *mclk_freq
= mem_clk
* 100;
1319 static int vega12_get_current_activity_percent(
1320 struct pp_hwmgr
*hwmgr
,
1322 uint32_t *activity_percent
)
1324 SmuMetrics_t metrics_table
;
1327 ret
= vega12_get_metrics_table(hwmgr
, &metrics_table
);
1332 case AMDGPU_PP_SENSOR_GPU_LOAD
:
1333 *activity_percent
= metrics_table
.AverageGfxActivity
;
1335 case AMDGPU_PP_SENSOR_MEM_LOAD
:
1336 *activity_percent
= metrics_table
.AverageUclkActivity
;
1339 pr_err("Invalid index for retrieving clock activity\n");
1346 static int vega12_read_sensor(struct pp_hwmgr
*hwmgr
, int idx
,
1347 void *value
, int *size
)
1349 struct vega12_hwmgr
*data
= (struct vega12_hwmgr
*)(hwmgr
->backend
);
1350 SmuMetrics_t metrics_table
;
1354 case AMDGPU_PP_SENSOR_GFX_SCLK
:
1355 ret
= vega12_get_current_gfx_clk_freq(hwmgr
, (uint32_t *)value
);
1359 case AMDGPU_PP_SENSOR_GFX_MCLK
:
1360 ret
= vega12_get_current_mclk_freq(hwmgr
, (uint32_t *)value
);
1364 case AMDGPU_PP_SENSOR_GPU_LOAD
:
1365 case AMDGPU_PP_SENSOR_MEM_LOAD
:
1366 ret
= vega12_get_current_activity_percent(hwmgr
, idx
, (uint32_t *)value
);
1370 case AMDGPU_PP_SENSOR_GPU_TEMP
:
1371 *((uint32_t *)value
) = vega12_thermal_get_temperature(hwmgr
);
1374 case AMDGPU_PP_SENSOR_HOTSPOT_TEMP
:
1375 ret
= vega12_get_metrics_table(hwmgr
, &metrics_table
);
1379 *((uint32_t *)value
) = metrics_table
.TemperatureHotspot
*
1380 PP_TEMPERATURE_UNITS_PER_CENTIGRADES
;
1383 case AMDGPU_PP_SENSOR_MEM_TEMP
:
1384 ret
= vega12_get_metrics_table(hwmgr
, &metrics_table
);
1388 *((uint32_t *)value
) = metrics_table
.TemperatureHBM
*
1389 PP_TEMPERATURE_UNITS_PER_CENTIGRADES
;
1392 case AMDGPU_PP_SENSOR_UVD_POWER
:
1393 *((uint32_t *)value
) = data
->uvd_power_gated
? 0 : 1;
1396 case AMDGPU_PP_SENSOR_VCE_POWER
:
1397 *((uint32_t *)value
) = data
->vce_power_gated
? 0 : 1;
1400 case AMDGPU_PP_SENSOR_GPU_POWER
:
1401 ret
= vega12_get_gpu_power(hwmgr
, (uint32_t *)value
);
1405 case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK
:
1406 ret
= vega12_get_enabled_smc_features(hwmgr
, (uint64_t *)value
);
1417 static int vega12_notify_smc_display_change(struct pp_hwmgr
*hwmgr
,
1420 struct vega12_hwmgr
*data
= (struct vega12_hwmgr
*)(hwmgr
->backend
);
1422 if (data
->smu_features
[GNLD_DPM_UCLK
].enabled
)
1423 return smum_send_msg_to_smc_with_parameter(hwmgr
,
1424 PPSMC_MSG_SetUclkFastSwitch
,
1430 int vega12_display_clock_voltage_request(struct pp_hwmgr
*hwmgr
,
1431 struct pp_display_clock_request
*clock_req
)
1434 struct vega12_hwmgr
*data
= (struct vega12_hwmgr
*)(hwmgr
->backend
);
1435 enum amd_pp_clock_type clk_type
= clock_req
->clock_type
;
1436 uint32_t clk_freq
= clock_req
->clock_freq_in_khz
/ 1000;
1437 PPCLK_e clk_select
= 0;
1438 uint32_t clk_request
= 0;
1440 if (data
->smu_features
[GNLD_DPM_DCEFCLK
].enabled
) {
1442 case amd_pp_dcef_clock
:
1443 clk_select
= PPCLK_DCEFCLK
;
1445 case amd_pp_disp_clock
:
1446 clk_select
= PPCLK_DISPCLK
;
1448 case amd_pp_pixel_clock
:
1449 clk_select
= PPCLK_PIXCLK
;
1451 case amd_pp_phy_clock
:
1452 clk_select
= PPCLK_PHYCLK
;
1455 pr_info("[DisplayClockVoltageRequest]Invalid Clock Type!");
1461 clk_request
= (clk_select
<< 16) | clk_freq
;
1462 result
= smum_send_msg_to_smc_with_parameter(hwmgr
,
1463 PPSMC_MSG_SetHardMinByFreq
,
1471 static int vega12_notify_smc_display_config_after_ps_adjustment(
1472 struct pp_hwmgr
*hwmgr
)
1474 struct vega12_hwmgr
*data
=
1475 (struct vega12_hwmgr
*)(hwmgr
->backend
);
1476 struct PP_Clocks min_clocks
= {0};
1477 struct pp_display_clock_request clock_req
;
1479 if ((hwmgr
->display_config
->num_display
> 1) &&
1480 !hwmgr
->display_config
->multi_monitor_in_sync
&&
1481 !hwmgr
->display_config
->nb_pstate_switch_disable
)
1482 vega12_notify_smc_display_change(hwmgr
, false);
1484 vega12_notify_smc_display_change(hwmgr
, true);
1486 min_clocks
.dcefClock
= hwmgr
->display_config
->min_dcef_set_clk
;
1487 min_clocks
.dcefClockInSR
= hwmgr
->display_config
->min_dcef_deep_sleep_set_clk
;
1488 min_clocks
.memoryClock
= hwmgr
->display_config
->min_mem_set_clock
;
1490 if (data
->smu_features
[GNLD_DPM_DCEFCLK
].supported
) {
1491 clock_req
.clock_type
= amd_pp_dcef_clock
;
1492 clock_req
.clock_freq_in_khz
= min_clocks
.dcefClock
/10;
1493 if (!vega12_display_clock_voltage_request(hwmgr
, &clock_req
)) {
1494 if (data
->smu_features
[GNLD_DS_DCEFCLK
].supported
)
1495 PP_ASSERT_WITH_CODE(
1496 !smum_send_msg_to_smc_with_parameter(
1497 hwmgr
, PPSMC_MSG_SetMinDeepSleepDcefclk
,
1498 min_clocks
.dcefClockInSR
/100),
1499 "Attempt to set divider for DCEFCLK Failed!",
1502 pr_info("Attempt to set Hard Min for DCEFCLK Failed!");
1509 static int vega12_force_dpm_highest(struct pp_hwmgr
*hwmgr
)
1511 struct vega12_hwmgr
*data
=
1512 (struct vega12_hwmgr
*)(hwmgr
->backend
);
1514 uint32_t soft_level
;
1516 soft_level
= vega12_find_highest_dpm_level(&(data
->dpm_table
.gfx_table
));
1518 data
->dpm_table
.gfx_table
.dpm_state
.soft_min_level
=
1519 data
->dpm_table
.gfx_table
.dpm_state
.soft_max_level
=
1520 data
->dpm_table
.gfx_table
.dpm_levels
[soft_level
].value
;
1522 soft_level
= vega12_find_highest_dpm_level(&(data
->dpm_table
.mem_table
));
1524 data
->dpm_table
.mem_table
.dpm_state
.soft_min_level
=
1525 data
->dpm_table
.mem_table
.dpm_state
.soft_max_level
=
1526 data
->dpm_table
.mem_table
.dpm_levels
[soft_level
].value
;
1528 PP_ASSERT_WITH_CODE(!vega12_upload_dpm_min_level(hwmgr
),
1529 "Failed to upload boot level to highest!",
1532 PP_ASSERT_WITH_CODE(!vega12_upload_dpm_max_level(hwmgr
),
1533 "Failed to upload dpm max level to highest!",
1539 static int vega12_force_dpm_lowest(struct pp_hwmgr
*hwmgr
)
1541 struct vega12_hwmgr
*data
=
1542 (struct vega12_hwmgr
*)(hwmgr
->backend
);
1543 uint32_t soft_level
;
1545 soft_level
= vega12_find_lowest_dpm_level(&(data
->dpm_table
.gfx_table
));
1547 data
->dpm_table
.gfx_table
.dpm_state
.soft_min_level
=
1548 data
->dpm_table
.gfx_table
.dpm_state
.soft_max_level
=
1549 data
->dpm_table
.gfx_table
.dpm_levels
[soft_level
].value
;
1551 soft_level
= vega12_find_lowest_dpm_level(&(data
->dpm_table
.mem_table
));
1553 data
->dpm_table
.mem_table
.dpm_state
.soft_min_level
=
1554 data
->dpm_table
.mem_table
.dpm_state
.soft_max_level
=
1555 data
->dpm_table
.mem_table
.dpm_levels
[soft_level
].value
;
1557 PP_ASSERT_WITH_CODE(!vega12_upload_dpm_min_level(hwmgr
),
1558 "Failed to upload boot level to highest!",
1561 PP_ASSERT_WITH_CODE(!vega12_upload_dpm_max_level(hwmgr
),
1562 "Failed to upload dpm max level to highest!",
1569 static int vega12_unforce_dpm_levels(struct pp_hwmgr
*hwmgr
)
1571 PP_ASSERT_WITH_CODE(!vega12_upload_dpm_min_level(hwmgr
),
1572 "Failed to upload DPM Bootup Levels!",
1575 PP_ASSERT_WITH_CODE(!vega12_upload_dpm_max_level(hwmgr
),
1576 "Failed to upload DPM Max Levels!",
1582 static int vega12_get_profiling_clk_mask(struct pp_hwmgr
*hwmgr
, enum amd_dpm_forced_level level
,
1583 uint32_t *sclk_mask
, uint32_t *mclk_mask
, uint32_t *soc_mask
)
1585 struct vega12_hwmgr
*data
= (struct vega12_hwmgr
*)(hwmgr
->backend
);
1586 struct vega12_single_dpm_table
*gfx_dpm_table
= &(data
->dpm_table
.gfx_table
);
1587 struct vega12_single_dpm_table
*mem_dpm_table
= &(data
->dpm_table
.mem_table
);
1588 struct vega12_single_dpm_table
*soc_dpm_table
= &(data
->dpm_table
.soc_table
);
1594 if (gfx_dpm_table
->count
> VEGA12_UMD_PSTATE_GFXCLK_LEVEL
&&
1595 mem_dpm_table
->count
> VEGA12_UMD_PSTATE_MCLK_LEVEL
&&
1596 soc_dpm_table
->count
> VEGA12_UMD_PSTATE_SOCCLK_LEVEL
) {
1597 *sclk_mask
= VEGA12_UMD_PSTATE_GFXCLK_LEVEL
;
1598 *mclk_mask
= VEGA12_UMD_PSTATE_MCLK_LEVEL
;
1599 *soc_mask
= VEGA12_UMD_PSTATE_SOCCLK_LEVEL
;
1602 if (level
== AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK
) {
1604 } else if (level
== AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK
) {
1606 } else if (level
== AMD_DPM_FORCED_LEVEL_PROFILE_PEAK
) {
1607 *sclk_mask
= gfx_dpm_table
->count
- 1;
1608 *mclk_mask
= mem_dpm_table
->count
- 1;
1609 *soc_mask
= soc_dpm_table
->count
- 1;
1615 static void vega12_set_fan_control_mode(struct pp_hwmgr
*hwmgr
, uint32_t mode
)
1618 case AMD_FAN_CTRL_NONE
:
1620 case AMD_FAN_CTRL_MANUAL
:
1621 if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl
))
1622 vega12_fan_ctrl_stop_smc_fan_control(hwmgr
);
1624 case AMD_FAN_CTRL_AUTO
:
1625 if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl
))
1626 vega12_fan_ctrl_start_smc_fan_control(hwmgr
);
1633 static int vega12_dpm_force_dpm_level(struct pp_hwmgr
*hwmgr
,
1634 enum amd_dpm_forced_level level
)
1637 uint32_t sclk_mask
= 0;
1638 uint32_t mclk_mask
= 0;
1639 uint32_t soc_mask
= 0;
1642 case AMD_DPM_FORCED_LEVEL_HIGH
:
1643 ret
= vega12_force_dpm_highest(hwmgr
);
1645 case AMD_DPM_FORCED_LEVEL_LOW
:
1646 ret
= vega12_force_dpm_lowest(hwmgr
);
1648 case AMD_DPM_FORCED_LEVEL_AUTO
:
1649 ret
= vega12_unforce_dpm_levels(hwmgr
);
1651 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD
:
1652 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK
:
1653 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK
:
1654 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK
:
1655 ret
= vega12_get_profiling_clk_mask(hwmgr
, level
, &sclk_mask
, &mclk_mask
, &soc_mask
);
1658 vega12_force_clock_level(hwmgr
, PP_SCLK
, 1 << sclk_mask
);
1659 vega12_force_clock_level(hwmgr
, PP_MCLK
, 1 << mclk_mask
);
1661 case AMD_DPM_FORCED_LEVEL_MANUAL
:
1662 case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT
:
1670 static uint32_t vega12_get_fan_control_mode(struct pp_hwmgr
*hwmgr
)
1672 struct vega12_hwmgr
*data
= (struct vega12_hwmgr
*)(hwmgr
->backend
);
1674 if (data
->smu_features
[GNLD_FAN_CONTROL
].enabled
== false)
1675 return AMD_FAN_CTRL_MANUAL
;
1677 return AMD_FAN_CTRL_AUTO
;
1680 static int vega12_get_dal_power_level(struct pp_hwmgr
*hwmgr
,
1681 struct amd_pp_simple_clock_info
*info
)
1684 struct phm_ppt_v2_information
*table_info
=
1685 (struct phm_ppt_v2_information
*)hwmgr
->pptable
;
1686 struct phm_clock_and_voltage_limits
*max_limits
=
1687 &table_info
->max_clock_voltage_on_ac
;
1689 info
->engine_max_clock
= max_limits
->sclk
;
1690 info
->memory_max_clock
= max_limits
->mclk
;
1695 static int vega12_get_clock_ranges(struct pp_hwmgr
*hwmgr
,
1697 PPCLK_e clock_select
,
1700 struct vega12_hwmgr
*data
= (struct vega12_hwmgr
*)(hwmgr
->backend
);
1703 *clock
= data
->clk_range
[clock_select
].ACMax
;
1705 *clock
= data
->clk_range
[clock_select
].ACMin
;
1710 static int vega12_get_sclks(struct pp_hwmgr
*hwmgr
,
1711 struct pp_clock_levels_with_latency
*clocks
)
1713 struct vega12_hwmgr
*data
= (struct vega12_hwmgr
*)(hwmgr
->backend
);
1716 struct vega12_single_dpm_table
*dpm_table
;
1718 if (!data
->smu_features
[GNLD_DPM_GFXCLK
].enabled
)
1721 dpm_table
= &(data
->dpm_table
.gfx_table
);
1722 ucount
= (dpm_table
->count
> MAX_NUM_CLOCKS
) ?
1723 MAX_NUM_CLOCKS
: dpm_table
->count
;
1725 for (i
= 0; i
< ucount
; i
++) {
1726 clocks
->data
[i
].clocks_in_khz
=
1727 dpm_table
->dpm_levels
[i
].value
* 1000;
1729 clocks
->data
[i
].latency_in_us
= 0;
1732 clocks
->num_levels
= ucount
;
1737 static uint32_t vega12_get_mem_latency(struct pp_hwmgr
*hwmgr
,
1743 static int vega12_get_memclocks(struct pp_hwmgr
*hwmgr
,
1744 struct pp_clock_levels_with_latency
*clocks
)
1746 struct vega12_hwmgr
*data
= (struct vega12_hwmgr
*)(hwmgr
->backend
);
1749 struct vega12_single_dpm_table
*dpm_table
;
1750 if (!data
->smu_features
[GNLD_DPM_UCLK
].enabled
)
1753 dpm_table
= &(data
->dpm_table
.mem_table
);
1754 ucount
= (dpm_table
->count
> MAX_NUM_CLOCKS
) ?
1755 MAX_NUM_CLOCKS
: dpm_table
->count
;
1757 for (i
= 0; i
< ucount
; i
++) {
1758 clocks
->data
[i
].clocks_in_khz
= dpm_table
->dpm_levels
[i
].value
* 1000;
1759 data
->mclk_latency_table
.entries
[i
].frequency
= dpm_table
->dpm_levels
[i
].value
* 100;
1760 clocks
->data
[i
].latency_in_us
=
1761 data
->mclk_latency_table
.entries
[i
].latency
=
1762 vega12_get_mem_latency(hwmgr
, dpm_table
->dpm_levels
[i
].value
);
1765 clocks
->num_levels
= data
->mclk_latency_table
.count
= ucount
;
1770 static int vega12_get_dcefclocks(struct pp_hwmgr
*hwmgr
,
1771 struct pp_clock_levels_with_latency
*clocks
)
1773 struct vega12_hwmgr
*data
= (struct vega12_hwmgr
*)(hwmgr
->backend
);
1776 struct vega12_single_dpm_table
*dpm_table
;
1778 if (!data
->smu_features
[GNLD_DPM_DCEFCLK
].enabled
)
1782 dpm_table
= &(data
->dpm_table
.dcef_table
);
1783 ucount
= (dpm_table
->count
> MAX_NUM_CLOCKS
) ?
1784 MAX_NUM_CLOCKS
: dpm_table
->count
;
1786 for (i
= 0; i
< ucount
; i
++) {
1787 clocks
->data
[i
].clocks_in_khz
=
1788 dpm_table
->dpm_levels
[i
].value
* 1000;
1790 clocks
->data
[i
].latency_in_us
= 0;
1793 clocks
->num_levels
= ucount
;
1798 static int vega12_get_socclocks(struct pp_hwmgr
*hwmgr
,
1799 struct pp_clock_levels_with_latency
*clocks
)
1801 struct vega12_hwmgr
*data
= (struct vega12_hwmgr
*)(hwmgr
->backend
);
1804 struct vega12_single_dpm_table
*dpm_table
;
1806 if (!data
->smu_features
[GNLD_DPM_SOCCLK
].enabled
)
1810 dpm_table
= &(data
->dpm_table
.soc_table
);
1811 ucount
= (dpm_table
->count
> MAX_NUM_CLOCKS
) ?
1812 MAX_NUM_CLOCKS
: dpm_table
->count
;
1814 for (i
= 0; i
< ucount
; i
++) {
1815 clocks
->data
[i
].clocks_in_khz
=
1816 dpm_table
->dpm_levels
[i
].value
* 1000;
1818 clocks
->data
[i
].latency_in_us
= 0;
1821 clocks
->num_levels
= ucount
;
1827 static int vega12_get_clock_by_type_with_latency(struct pp_hwmgr
*hwmgr
,
1828 enum amd_pp_clock_type type
,
1829 struct pp_clock_levels_with_latency
*clocks
)
1834 case amd_pp_sys_clock
:
1835 ret
= vega12_get_sclks(hwmgr
, clocks
);
1837 case amd_pp_mem_clock
:
1838 ret
= vega12_get_memclocks(hwmgr
, clocks
);
1840 case amd_pp_dcef_clock
:
1841 ret
= vega12_get_dcefclocks(hwmgr
, clocks
);
1843 case amd_pp_soc_clock
:
1844 ret
= vega12_get_socclocks(hwmgr
, clocks
);
1853 static int vega12_get_clock_by_type_with_voltage(struct pp_hwmgr
*hwmgr
,
1854 enum amd_pp_clock_type type
,
1855 struct pp_clock_levels_with_voltage
*clocks
)
1857 clocks
->num_levels
= 0;
1862 static int vega12_set_watermarks_for_clocks_ranges(struct pp_hwmgr
*hwmgr
,
1865 struct vega12_hwmgr
*data
= (struct vega12_hwmgr
*)(hwmgr
->backend
);
1866 Watermarks_t
*table
= &(data
->smc_state_table
.water_marks_table
);
1867 struct dm_pp_wm_sets_with_clock_ranges_soc15
*wm_with_clock_ranges
= clock_ranges
;
1869 if (!data
->registry_data
.disable_water_mark
&&
1870 data
->smu_features
[GNLD_DPM_DCEFCLK
].supported
&&
1871 data
->smu_features
[GNLD_DPM_SOCCLK
].supported
) {
1872 smu_set_watermarks_for_clocks_ranges(table
, wm_with_clock_ranges
);
1873 data
->water_marks_bitmap
|= WaterMarksExist
;
1874 data
->water_marks_bitmap
&= ~WaterMarksLoaded
;
1880 static int vega12_force_clock_level(struct pp_hwmgr
*hwmgr
,
1881 enum pp_clock_type type
, uint32_t mask
)
1883 struct vega12_hwmgr
*data
= (struct vega12_hwmgr
*)(hwmgr
->backend
);
1884 uint32_t soft_min_level
, soft_max_level
, hard_min_level
;
1889 soft_min_level
= mask
? (ffs(mask
) - 1) : 0;
1890 soft_max_level
= mask
? (fls(mask
) - 1) : 0;
1892 data
->dpm_table
.gfx_table
.dpm_state
.soft_min_level
=
1893 data
->dpm_table
.gfx_table
.dpm_levels
[soft_min_level
].value
;
1894 data
->dpm_table
.gfx_table
.dpm_state
.soft_max_level
=
1895 data
->dpm_table
.gfx_table
.dpm_levels
[soft_max_level
].value
;
1897 ret
= vega12_upload_dpm_min_level(hwmgr
);
1898 PP_ASSERT_WITH_CODE(!ret
,
1899 "Failed to upload boot level to lowest!",
1902 ret
= vega12_upload_dpm_max_level(hwmgr
);
1903 PP_ASSERT_WITH_CODE(!ret
,
1904 "Failed to upload dpm max level to highest!",
1909 soft_min_level
= mask
? (ffs(mask
) - 1) : 0;
1910 soft_max_level
= mask
? (fls(mask
) - 1) : 0;
1912 data
->dpm_table
.mem_table
.dpm_state
.soft_min_level
=
1913 data
->dpm_table
.mem_table
.dpm_levels
[soft_min_level
].value
;
1914 data
->dpm_table
.mem_table
.dpm_state
.soft_max_level
=
1915 data
->dpm_table
.mem_table
.dpm_levels
[soft_max_level
].value
;
1917 ret
= vega12_upload_dpm_min_level(hwmgr
);
1918 PP_ASSERT_WITH_CODE(!ret
,
1919 "Failed to upload boot level to lowest!",
1922 ret
= vega12_upload_dpm_max_level(hwmgr
);
1923 PP_ASSERT_WITH_CODE(!ret
,
1924 "Failed to upload dpm max level to highest!",
1930 soft_min_level
= mask
? (ffs(mask
) - 1) : 0;
1931 soft_max_level
= mask
? (fls(mask
) - 1) : 0;
1933 if (soft_max_level
>= data
->dpm_table
.soc_table
.count
) {
1934 pr_err("Clock level specified %d is over max allowed %d\n",
1936 data
->dpm_table
.soc_table
.count
- 1);
1940 data
->dpm_table
.soc_table
.dpm_state
.soft_min_level
=
1941 data
->dpm_table
.soc_table
.dpm_levels
[soft_min_level
].value
;
1942 data
->dpm_table
.soc_table
.dpm_state
.soft_max_level
=
1943 data
->dpm_table
.soc_table
.dpm_levels
[soft_max_level
].value
;
1945 ret
= vega12_upload_dpm_min_level(hwmgr
);
1946 PP_ASSERT_WITH_CODE(!ret
,
1947 "Failed to upload boot level to lowest!",
1950 ret
= vega12_upload_dpm_max_level(hwmgr
);
1951 PP_ASSERT_WITH_CODE(!ret
,
1952 "Failed to upload dpm max level to highest!",
1958 hard_min_level
= mask
? (ffs(mask
) - 1) : 0;
1960 if (hard_min_level
>= data
->dpm_table
.dcef_table
.count
) {
1961 pr_err("Clock level specified %d is over max allowed %d\n",
1963 data
->dpm_table
.dcef_table
.count
- 1);
1967 data
->dpm_table
.dcef_table
.dpm_state
.hard_min_level
=
1968 data
->dpm_table
.dcef_table
.dpm_levels
[hard_min_level
].value
;
1970 ret
= vega12_upload_dpm_min_level(hwmgr
);
1971 PP_ASSERT_WITH_CODE(!ret
,
1972 "Failed to upload boot level to lowest!",
1975 //TODO: Setting DCEFCLK max dpm level is not supported
1989 static int vega12_get_ppfeature_status(struct pp_hwmgr
*hwmgr
, char *buf
)
1991 static const char *ppfeature_name
[] = {
2021 static const char *output_title
[] = {
2025 uint64_t features_enabled
;
2030 ret
= vega12_get_enabled_smc_features(hwmgr
, &features_enabled
);
2031 PP_ASSERT_WITH_CODE(!ret
,
2032 "[EnableAllSmuFeatures] Failed to get enabled smc features!",
2035 size
+= sprintf(buf
+ size
, "Current ppfeatures: 0x%016llx\n", features_enabled
);
2036 size
+= sprintf(buf
+ size
, "%-19s %-22s %s\n",
2040 for (i
= 0; i
< GNLD_FEATURES_MAX
; i
++) {
2041 size
+= sprintf(buf
+ size
, "%-19s 0x%016llx %6s\n",
2044 (features_enabled
& (1ULL << i
)) ? "Y" : "N");
2050 static int vega12_set_ppfeature_status(struct pp_hwmgr
*hwmgr
, uint64_t new_ppfeature_masks
)
2052 uint64_t features_enabled
;
2053 uint64_t features_to_enable
;
2054 uint64_t features_to_disable
;
2057 if (new_ppfeature_masks
>= (1ULL << GNLD_FEATURES_MAX
))
2060 ret
= vega12_get_enabled_smc_features(hwmgr
, &features_enabled
);
2064 features_to_disable
=
2065 features_enabled
& ~new_ppfeature_masks
;
2066 features_to_enable
=
2067 ~features_enabled
& new_ppfeature_masks
;
2069 pr_debug("features_to_disable 0x%llx\n", features_to_disable
);
2070 pr_debug("features_to_enable 0x%llx\n", features_to_enable
);
2072 if (features_to_disable
) {
2073 ret
= vega12_enable_smc_features(hwmgr
, false, features_to_disable
);
2078 if (features_to_enable
) {
2079 ret
= vega12_enable_smc_features(hwmgr
, true, features_to_enable
);
2087 static int vega12_print_clock_levels(struct pp_hwmgr
*hwmgr
,
2088 enum pp_clock_type type
, char *buf
)
2090 int i
, now
, size
= 0;
2091 struct pp_clock_levels_with_latency clocks
;
2095 PP_ASSERT_WITH_CODE(
2096 vega12_get_current_gfx_clk_freq(hwmgr
, &now
) == 0,
2097 "Attempt to get current gfx clk Failed!",
2100 PP_ASSERT_WITH_CODE(
2101 vega12_get_sclks(hwmgr
, &clocks
) == 0,
2102 "Attempt to get gfx clk levels Failed!",
2104 for (i
= 0; i
< clocks
.num_levels
; i
++)
2105 size
+= sprintf(buf
+ size
, "%d: %uMhz %s\n",
2106 i
, clocks
.data
[i
].clocks_in_khz
/ 1000,
2107 (clocks
.data
[i
].clocks_in_khz
/ 1000 == now
/ 100) ? "*" : "");
2111 PP_ASSERT_WITH_CODE(
2112 vega12_get_current_mclk_freq(hwmgr
, &now
) == 0,
2113 "Attempt to get current mclk freq Failed!",
2116 PP_ASSERT_WITH_CODE(
2117 vega12_get_memclocks(hwmgr
, &clocks
) == 0,
2118 "Attempt to get memory clk levels Failed!",
2120 for (i
= 0; i
< clocks
.num_levels
; i
++)
2121 size
+= sprintf(buf
+ size
, "%d: %uMhz %s\n",
2122 i
, clocks
.data
[i
].clocks_in_khz
/ 1000,
2123 (clocks
.data
[i
].clocks_in_khz
/ 1000 == now
/ 100) ? "*" : "");
2127 PP_ASSERT_WITH_CODE(
2128 smum_send_msg_to_smc_with_parameter(hwmgr
,
2129 PPSMC_MSG_GetDpmClockFreq
, (PPCLK_SOCCLK
<< 16)) == 0,
2130 "Attempt to get Current SOCCLK Frequency Failed!",
2132 now
= smum_get_argument(hwmgr
);
2134 PP_ASSERT_WITH_CODE(
2135 vega12_get_socclocks(hwmgr
, &clocks
) == 0,
2136 "Attempt to get soc clk levels Failed!",
2138 for (i
= 0; i
< clocks
.num_levels
; i
++)
2139 size
+= sprintf(buf
+ size
, "%d: %uMhz %s\n",
2140 i
, clocks
.data
[i
].clocks_in_khz
/ 1000,
2141 (clocks
.data
[i
].clocks_in_khz
/ 1000 == now
) ? "*" : "");
2145 PP_ASSERT_WITH_CODE(
2146 smum_send_msg_to_smc_with_parameter(hwmgr
,
2147 PPSMC_MSG_GetDpmClockFreq
, (PPCLK_DCEFCLK
<< 16)) == 0,
2148 "Attempt to get Current DCEFCLK Frequency Failed!",
2150 now
= smum_get_argument(hwmgr
);
2152 PP_ASSERT_WITH_CODE(
2153 vega12_get_dcefclocks(hwmgr
, &clocks
) == 0,
2154 "Attempt to get dcef clk levels Failed!",
2156 for (i
= 0; i
< clocks
.num_levels
; i
++)
2157 size
+= sprintf(buf
+ size
, "%d: %uMhz %s\n",
2158 i
, clocks
.data
[i
].clocks_in_khz
/ 1000,
2159 (clocks
.data
[i
].clocks_in_khz
/ 1000 == now
) ? "*" : "");
2171 static int vega12_apply_clocks_adjust_rules(struct pp_hwmgr
*hwmgr
)
2173 struct vega12_hwmgr
*data
= (struct vega12_hwmgr
*)(hwmgr
->backend
);
2174 struct vega12_single_dpm_table
*dpm_table
;
2175 bool vblank_too_short
= false;
2176 bool disable_mclk_switching
;
2177 uint32_t i
, latency
;
2179 disable_mclk_switching
= ((1 < hwmgr
->display_config
->num_display
) &&
2180 !hwmgr
->display_config
->multi_monitor_in_sync
) ||
2182 latency
= hwmgr
->display_config
->dce_tolerable_mclk_in_active_latency
;
2185 dpm_table
= &(data
->dpm_table
.gfx_table
);
2186 dpm_table
->dpm_state
.soft_min_level
= dpm_table
->dpm_levels
[0].value
;
2187 dpm_table
->dpm_state
.soft_max_level
= dpm_table
->dpm_levels
[dpm_table
->count
- 1].value
;
2188 dpm_table
->dpm_state
.hard_min_level
= dpm_table
->dpm_levels
[0].value
;
2189 dpm_table
->dpm_state
.hard_max_level
= dpm_table
->dpm_levels
[dpm_table
->count
- 1].value
;
2191 if (PP_CAP(PHM_PlatformCaps_UMDPState
)) {
2192 if (VEGA12_UMD_PSTATE_GFXCLK_LEVEL
< dpm_table
->count
) {
2193 dpm_table
->dpm_state
.soft_min_level
= dpm_table
->dpm_levels
[VEGA12_UMD_PSTATE_GFXCLK_LEVEL
].value
;
2194 dpm_table
->dpm_state
.soft_max_level
= dpm_table
->dpm_levels
[VEGA12_UMD_PSTATE_GFXCLK_LEVEL
].value
;
2197 if (hwmgr
->dpm_level
== AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK
) {
2198 dpm_table
->dpm_state
.soft_min_level
= dpm_table
->dpm_levels
[0].value
;
2199 dpm_table
->dpm_state
.soft_max_level
= dpm_table
->dpm_levels
[0].value
;
2202 if (hwmgr
->dpm_level
== AMD_DPM_FORCED_LEVEL_PROFILE_PEAK
) {
2203 dpm_table
->dpm_state
.soft_min_level
= dpm_table
->dpm_levels
[dpm_table
->count
- 1].value
;
2204 dpm_table
->dpm_state
.soft_max_level
= dpm_table
->dpm_levels
[dpm_table
->count
- 1].value
;
2209 dpm_table
= &(data
->dpm_table
.mem_table
);
2210 dpm_table
->dpm_state
.soft_min_level
= dpm_table
->dpm_levels
[0].value
;
2211 dpm_table
->dpm_state
.soft_max_level
= dpm_table
->dpm_levels
[dpm_table
->count
- 1].value
;
2212 dpm_table
->dpm_state
.hard_min_level
= dpm_table
->dpm_levels
[0].value
;
2213 dpm_table
->dpm_state
.hard_max_level
= dpm_table
->dpm_levels
[dpm_table
->count
- 1].value
;
2215 if (PP_CAP(PHM_PlatformCaps_UMDPState
)) {
2216 if (VEGA12_UMD_PSTATE_MCLK_LEVEL
< dpm_table
->count
) {
2217 dpm_table
->dpm_state
.soft_min_level
= dpm_table
->dpm_levels
[VEGA12_UMD_PSTATE_MCLK_LEVEL
].value
;
2218 dpm_table
->dpm_state
.soft_max_level
= dpm_table
->dpm_levels
[VEGA12_UMD_PSTATE_MCLK_LEVEL
].value
;
2221 if (hwmgr
->dpm_level
== AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK
) {
2222 dpm_table
->dpm_state
.soft_min_level
= dpm_table
->dpm_levels
[0].value
;
2223 dpm_table
->dpm_state
.soft_max_level
= dpm_table
->dpm_levels
[0].value
;
2226 if (hwmgr
->dpm_level
== AMD_DPM_FORCED_LEVEL_PROFILE_PEAK
) {
2227 dpm_table
->dpm_state
.soft_min_level
= dpm_table
->dpm_levels
[dpm_table
->count
- 1].value
;
2228 dpm_table
->dpm_state
.soft_max_level
= dpm_table
->dpm_levels
[dpm_table
->count
- 1].value
;
2232 /* honour DAL's UCLK Hardmin */
2233 if (dpm_table
->dpm_state
.hard_min_level
< (hwmgr
->display_config
->min_mem_set_clock
/ 100))
2234 dpm_table
->dpm_state
.hard_min_level
= hwmgr
->display_config
->min_mem_set_clock
/ 100;
2236 /* Hardmin is dependent on displayconfig */
2237 if (disable_mclk_switching
) {
2238 dpm_table
->dpm_state
.hard_min_level
= dpm_table
->dpm_levels
[dpm_table
->count
- 1].value
;
2239 for (i
= 0; i
< data
->mclk_latency_table
.count
- 1; i
++) {
2240 if (data
->mclk_latency_table
.entries
[i
].latency
<= latency
) {
2241 if (dpm_table
->dpm_levels
[i
].value
>= (hwmgr
->display_config
->min_mem_set_clock
/ 100)) {
2242 dpm_table
->dpm_state
.hard_min_level
= dpm_table
->dpm_levels
[i
].value
;
2249 if (hwmgr
->display_config
->nb_pstate_switch_disable
)
2250 dpm_table
->dpm_state
.hard_min_level
= dpm_table
->dpm_levels
[dpm_table
->count
- 1].value
;
2253 dpm_table
= &(data
->dpm_table
.vclk_table
);
2254 dpm_table
->dpm_state
.soft_min_level
= dpm_table
->dpm_levels
[0].value
;
2255 dpm_table
->dpm_state
.soft_max_level
= dpm_table
->dpm_levels
[dpm_table
->count
- 1].value
;
2256 dpm_table
->dpm_state
.hard_min_level
= dpm_table
->dpm_levels
[0].value
;
2257 dpm_table
->dpm_state
.hard_max_level
= dpm_table
->dpm_levels
[dpm_table
->count
- 1].value
;
2259 if (PP_CAP(PHM_PlatformCaps_UMDPState
)) {
2260 if (VEGA12_UMD_PSTATE_UVDCLK_LEVEL
< dpm_table
->count
) {
2261 dpm_table
->dpm_state
.soft_min_level
= dpm_table
->dpm_levels
[VEGA12_UMD_PSTATE_UVDCLK_LEVEL
].value
;
2262 dpm_table
->dpm_state
.soft_max_level
= dpm_table
->dpm_levels
[VEGA12_UMD_PSTATE_UVDCLK_LEVEL
].value
;
2265 if (hwmgr
->dpm_level
== AMD_DPM_FORCED_LEVEL_PROFILE_PEAK
) {
2266 dpm_table
->dpm_state
.soft_min_level
= dpm_table
->dpm_levels
[dpm_table
->count
- 1].value
;
2267 dpm_table
->dpm_state
.soft_max_level
= dpm_table
->dpm_levels
[dpm_table
->count
- 1].value
;
2272 dpm_table
= &(data
->dpm_table
.dclk_table
);
2273 dpm_table
->dpm_state
.soft_min_level
= dpm_table
->dpm_levels
[0].value
;
2274 dpm_table
->dpm_state
.soft_max_level
= dpm_table
->dpm_levels
[dpm_table
->count
- 1].value
;
2275 dpm_table
->dpm_state
.hard_min_level
= dpm_table
->dpm_levels
[0].value
;
2276 dpm_table
->dpm_state
.hard_max_level
= dpm_table
->dpm_levels
[dpm_table
->count
- 1].value
;
2278 if (PP_CAP(PHM_PlatformCaps_UMDPState
)) {
2279 if (VEGA12_UMD_PSTATE_UVDCLK_LEVEL
< dpm_table
->count
) {
2280 dpm_table
->dpm_state
.soft_min_level
= dpm_table
->dpm_levels
[VEGA12_UMD_PSTATE_UVDCLK_LEVEL
].value
;
2281 dpm_table
->dpm_state
.soft_max_level
= dpm_table
->dpm_levels
[VEGA12_UMD_PSTATE_UVDCLK_LEVEL
].value
;
2284 if (hwmgr
->dpm_level
== AMD_DPM_FORCED_LEVEL_PROFILE_PEAK
) {
2285 dpm_table
->dpm_state
.soft_min_level
= dpm_table
->dpm_levels
[dpm_table
->count
- 1].value
;
2286 dpm_table
->dpm_state
.soft_max_level
= dpm_table
->dpm_levels
[dpm_table
->count
- 1].value
;
2291 dpm_table
= &(data
->dpm_table
.soc_table
);
2292 dpm_table
->dpm_state
.soft_min_level
= dpm_table
->dpm_levels
[0].value
;
2293 dpm_table
->dpm_state
.soft_max_level
= dpm_table
->dpm_levels
[dpm_table
->count
- 1].value
;
2294 dpm_table
->dpm_state
.hard_min_level
= dpm_table
->dpm_levels
[0].value
;
2295 dpm_table
->dpm_state
.hard_max_level
= dpm_table
->dpm_levels
[dpm_table
->count
- 1].value
;
2297 if (PP_CAP(PHM_PlatformCaps_UMDPState
)) {
2298 if (VEGA12_UMD_PSTATE_SOCCLK_LEVEL
< dpm_table
->count
) {
2299 dpm_table
->dpm_state
.soft_min_level
= dpm_table
->dpm_levels
[VEGA12_UMD_PSTATE_SOCCLK_LEVEL
].value
;
2300 dpm_table
->dpm_state
.soft_max_level
= dpm_table
->dpm_levels
[VEGA12_UMD_PSTATE_SOCCLK_LEVEL
].value
;
2303 if (hwmgr
->dpm_level
== AMD_DPM_FORCED_LEVEL_PROFILE_PEAK
) {
2304 dpm_table
->dpm_state
.soft_min_level
= dpm_table
->dpm_levels
[dpm_table
->count
- 1].value
;
2305 dpm_table
->dpm_state
.soft_max_level
= dpm_table
->dpm_levels
[dpm_table
->count
- 1].value
;
2310 dpm_table
= &(data
->dpm_table
.eclk_table
);
2311 dpm_table
->dpm_state
.soft_min_level
= dpm_table
->dpm_levels
[0].value
;
2312 dpm_table
->dpm_state
.soft_max_level
= dpm_table
->dpm_levels
[dpm_table
->count
- 1].value
;
2313 dpm_table
->dpm_state
.hard_min_level
= dpm_table
->dpm_levels
[0].value
;
2314 dpm_table
->dpm_state
.hard_max_level
= dpm_table
->dpm_levels
[dpm_table
->count
- 1].value
;
2316 if (PP_CAP(PHM_PlatformCaps_UMDPState
)) {
2317 if (VEGA12_UMD_PSTATE_VCEMCLK_LEVEL
< dpm_table
->count
) {
2318 dpm_table
->dpm_state
.soft_min_level
= dpm_table
->dpm_levels
[VEGA12_UMD_PSTATE_VCEMCLK_LEVEL
].value
;
2319 dpm_table
->dpm_state
.soft_max_level
= dpm_table
->dpm_levels
[VEGA12_UMD_PSTATE_VCEMCLK_LEVEL
].value
;
2322 if (hwmgr
->dpm_level
== AMD_DPM_FORCED_LEVEL_PROFILE_PEAK
) {
2323 dpm_table
->dpm_state
.soft_min_level
= dpm_table
->dpm_levels
[dpm_table
->count
- 1].value
;
2324 dpm_table
->dpm_state
.soft_max_level
= dpm_table
->dpm_levels
[dpm_table
->count
- 1].value
;
2331 static int vega12_set_uclk_to_highest_dpm_level(struct pp_hwmgr
*hwmgr
,
2332 struct vega12_single_dpm_table
*dpm_table
)
2334 struct vega12_hwmgr
*data
= (struct vega12_hwmgr
*)(hwmgr
->backend
);
2337 if (data
->smu_features
[GNLD_DPM_UCLK
].enabled
) {
2338 PP_ASSERT_WITH_CODE(dpm_table
->count
> 0,
2339 "[SetUclkToHightestDpmLevel] Dpm table has no entry!",
2341 PP_ASSERT_WITH_CODE(dpm_table
->count
<= NUM_UCLK_DPM_LEVELS
,
2342 "[SetUclkToHightestDpmLevel] Dpm table has too many entries!",
2345 dpm_table
->dpm_state
.hard_min_level
= dpm_table
->dpm_levels
[dpm_table
->count
- 1].value
;
2346 PP_ASSERT_WITH_CODE(!(ret
= smum_send_msg_to_smc_with_parameter(hwmgr
,
2347 PPSMC_MSG_SetHardMinByFreq
,
2348 (PPCLK_UCLK
<< 16 ) | dpm_table
->dpm_state
.hard_min_level
)),
2349 "[SetUclkToHightestDpmLevel] Set hard min uclk failed!",
2356 static int vega12_pre_display_configuration_changed_task(struct pp_hwmgr
*hwmgr
)
2358 struct vega12_hwmgr
*data
= (struct vega12_hwmgr
*)(hwmgr
->backend
);
2361 smum_send_msg_to_smc_with_parameter(hwmgr
,
2362 PPSMC_MSG_NumOfDisplays
, 0);
2364 ret
= vega12_set_uclk_to_highest_dpm_level(hwmgr
,
2365 &data
->dpm_table
.mem_table
);
2370 static int vega12_display_configuration_changed_task(struct pp_hwmgr
*hwmgr
)
2372 struct vega12_hwmgr
*data
= (struct vega12_hwmgr
*)(hwmgr
->backend
);
2374 Watermarks_t
*wm_table
= &(data
->smc_state_table
.water_marks_table
);
2376 if ((data
->water_marks_bitmap
& WaterMarksExist
) &&
2377 !(data
->water_marks_bitmap
& WaterMarksLoaded
)) {
2378 result
= smum_smc_table_manager(hwmgr
,
2379 (uint8_t *)wm_table
, TABLE_WATERMARKS
, false);
2380 PP_ASSERT_WITH_CODE(result
, "Failed to update WMTABLE!", return EINVAL
);
2381 data
->water_marks_bitmap
|= WaterMarksLoaded
;
2384 if ((data
->water_marks_bitmap
& WaterMarksExist
) &&
2385 data
->smu_features
[GNLD_DPM_DCEFCLK
].supported
&&
2386 data
->smu_features
[GNLD_DPM_SOCCLK
].supported
)
2387 smum_send_msg_to_smc_with_parameter(hwmgr
,
2388 PPSMC_MSG_NumOfDisplays
, hwmgr
->display_config
->num_display
);
2393 int vega12_enable_disable_uvd_dpm(struct pp_hwmgr
*hwmgr
, bool enable
)
2395 struct vega12_hwmgr
*data
=
2396 (struct vega12_hwmgr
*)(hwmgr
->backend
);
2398 if (data
->smu_features
[GNLD_DPM_UVD
].supported
) {
2399 PP_ASSERT_WITH_CODE(!vega12_enable_smc_features(hwmgr
,
2401 data
->smu_features
[GNLD_DPM_UVD
].smu_feature_bitmap
),
2402 "Attempt to Enable/Disable DPM UVD Failed!",
2404 data
->smu_features
[GNLD_DPM_UVD
].enabled
= enable
;
2410 static void vega12_power_gate_vce(struct pp_hwmgr
*hwmgr
, bool bgate
)
2412 struct vega12_hwmgr
*data
= (struct vega12_hwmgr
*)(hwmgr
->backend
);
2414 if (data
->vce_power_gated
== bgate
)
2417 data
->vce_power_gated
= bgate
;
2418 vega12_enable_disable_vce_dpm(hwmgr
, !bgate
);
2421 static void vega12_power_gate_uvd(struct pp_hwmgr
*hwmgr
, bool bgate
)
2423 struct vega12_hwmgr
*data
= (struct vega12_hwmgr
*)(hwmgr
->backend
);
2425 if (data
->uvd_power_gated
== bgate
)
2428 data
->uvd_power_gated
= bgate
;
2429 vega12_enable_disable_uvd_dpm(hwmgr
, !bgate
);
2433 vega12_check_smc_update_required_for_display_configuration(struct pp_hwmgr
*hwmgr
)
2435 struct vega12_hwmgr
*data
= (struct vega12_hwmgr
*)(hwmgr
->backend
);
2436 bool is_update_required
= false;
2438 if (data
->display_timing
.num_existing_displays
!= hwmgr
->display_config
->num_display
)
2439 is_update_required
= true;
2441 if (data
->registry_data
.gfx_clk_deep_sleep_support
) {
2442 if (data
->display_timing
.min_clock_in_sr
!= hwmgr
->display_config
->min_core_set_clock_in_sr
)
2443 is_update_required
= true;
2446 return is_update_required
;
2449 static int vega12_disable_dpm_tasks(struct pp_hwmgr
*hwmgr
)
2451 int tmp_result
, result
= 0;
2453 tmp_result
= vega12_disable_all_smu_features(hwmgr
);
2454 PP_ASSERT_WITH_CODE((tmp_result
== 0),
2455 "Failed to disable all smu features!", result
= tmp_result
);
2460 static int vega12_power_off_asic(struct pp_hwmgr
*hwmgr
)
2462 struct vega12_hwmgr
*data
= (struct vega12_hwmgr
*)(hwmgr
->backend
);
2465 result
= vega12_disable_dpm_tasks(hwmgr
);
2466 PP_ASSERT_WITH_CODE((0 == result
),
2467 "[disable_dpm_tasks] Failed to disable DPM!",
2469 data
->water_marks_bitmap
&= ~(WaterMarksLoaded
);
2475 static void vega12_find_min_clock_index(struct pp_hwmgr
*hwmgr
,
2476 uint32_t *sclk_idx
, uint32_t *mclk_idx
,
2477 uint32_t min_sclk
, uint32_t min_mclk
)
2479 struct vega12_hwmgr
*data
= (struct vega12_hwmgr
*)(hwmgr
->backend
);
2480 struct vega12_dpm_table
*dpm_table
= &(data
->dpm_table
);
2483 for (i
= 0; i
< dpm_table
->gfx_table
.count
; i
++) {
2484 if (dpm_table
->gfx_table
.dpm_levels
[i
].enabled
&&
2485 dpm_table
->gfx_table
.dpm_levels
[i
].value
>= min_sclk
) {
2491 for (i
= 0; i
< dpm_table
->mem_table
.count
; i
++) {
2492 if (dpm_table
->mem_table
.dpm_levels
[i
].enabled
&&
2493 dpm_table
->mem_table
.dpm_levels
[i
].value
>= min_mclk
) {
2502 static int vega12_set_power_profile_state(struct pp_hwmgr
*hwmgr
,
2503 struct amd_pp_profile
*request
)
2508 static int vega12_get_sclk_od(struct pp_hwmgr
*hwmgr
)
2510 struct vega12_hwmgr
*data
= (struct vega12_hwmgr
*)(hwmgr
->backend
);
2511 struct vega12_single_dpm_table
*sclk_table
= &(data
->dpm_table
.gfx_table
);
2512 struct vega12_single_dpm_table
*golden_sclk_table
=
2513 &(data
->golden_dpm_table
.gfx_table
);
2514 int value
= sclk_table
->dpm_levels
[sclk_table
->count
- 1].value
;
2515 int golden_value
= golden_sclk_table
->dpm_levels
2516 [golden_sclk_table
->count
- 1].value
;
2518 value
-= golden_value
;
2519 value
= DIV_ROUND_UP(value
* 100, golden_value
);
2524 static int vega12_set_sclk_od(struct pp_hwmgr
*hwmgr
, uint32_t value
)
2529 static int vega12_get_mclk_od(struct pp_hwmgr
*hwmgr
)
2531 struct vega12_hwmgr
*data
= (struct vega12_hwmgr
*)(hwmgr
->backend
);
2532 struct vega12_single_dpm_table
*mclk_table
= &(data
->dpm_table
.mem_table
);
2533 struct vega12_single_dpm_table
*golden_mclk_table
=
2534 &(data
->golden_dpm_table
.mem_table
);
2535 int value
= mclk_table
->dpm_levels
[mclk_table
->count
- 1].value
;
2536 int golden_value
= golden_mclk_table
->dpm_levels
2537 [golden_mclk_table
->count
- 1].value
;
2539 value
-= golden_value
;
2540 value
= DIV_ROUND_UP(value
* 100, golden_value
);
2545 static int vega12_set_mclk_od(struct pp_hwmgr
*hwmgr
, uint32_t value
)
2551 static int vega12_notify_cac_buffer_info(struct pp_hwmgr
*hwmgr
,
2552 uint32_t virtual_addr_low
,
2553 uint32_t virtual_addr_hi
,
2554 uint32_t mc_addr_low
,
2555 uint32_t mc_addr_hi
,
2558 smum_send_msg_to_smc_with_parameter(hwmgr
,
2559 PPSMC_MSG_SetSystemVirtualDramAddrHigh
,
2561 smum_send_msg_to_smc_with_parameter(hwmgr
,
2562 PPSMC_MSG_SetSystemVirtualDramAddrLow
,
2564 smum_send_msg_to_smc_with_parameter(hwmgr
,
2565 PPSMC_MSG_DramLogSetDramAddrHigh
,
2568 smum_send_msg_to_smc_with_parameter(hwmgr
,
2569 PPSMC_MSG_DramLogSetDramAddrLow
,
2572 smum_send_msg_to_smc_with_parameter(hwmgr
,
2573 PPSMC_MSG_DramLogSetDramSize
,
2578 static int vega12_get_thermal_temperature_range(struct pp_hwmgr
*hwmgr
,
2579 struct PP_TemperatureRange
*thermal_data
)
2581 struct vega12_hwmgr
*data
=
2582 (struct vega12_hwmgr
*)(hwmgr
->backend
);
2583 PPTable_t
*pp_table
= &(data
->smc_state_table
.pp_table
);
2585 memcpy(thermal_data
, &SMU7ThermalWithDelayPolicy
[0], sizeof(struct PP_TemperatureRange
));
2587 thermal_data
->max
= pp_table
->TedgeLimit
*
2588 PP_TEMPERATURE_UNITS_PER_CENTIGRADES
;
2589 thermal_data
->edge_emergency_max
= (pp_table
->TedgeLimit
+ CTF_OFFSET_EDGE
) *
2590 PP_TEMPERATURE_UNITS_PER_CENTIGRADES
;
2591 thermal_data
->hotspot_crit_max
= pp_table
->ThotspotLimit
*
2592 PP_TEMPERATURE_UNITS_PER_CENTIGRADES
;
2593 thermal_data
->hotspot_emergency_max
= (pp_table
->ThotspotLimit
+ CTF_OFFSET_HOTSPOT
) *
2594 PP_TEMPERATURE_UNITS_PER_CENTIGRADES
;
2595 thermal_data
->mem_crit_max
= pp_table
->ThbmLimit
*
2596 PP_TEMPERATURE_UNITS_PER_CENTIGRADES
;
2597 thermal_data
->mem_emergency_max
= (pp_table
->ThbmLimit
+ CTF_OFFSET_HBM
)*
2598 PP_TEMPERATURE_UNITS_PER_CENTIGRADES
;
2603 static int vega12_enable_gfx_off(struct pp_hwmgr
*hwmgr
)
2605 struct vega12_hwmgr
*data
=
2606 (struct vega12_hwmgr
*)(hwmgr
->backend
);
2609 if (data
->gfxoff_controlled_by_driver
)
2610 ret
= smum_send_msg_to_smc(hwmgr
, PPSMC_MSG_AllowGfxOff
);
2615 static int vega12_disable_gfx_off(struct pp_hwmgr
*hwmgr
)
2617 struct vega12_hwmgr
*data
=
2618 (struct vega12_hwmgr
*)(hwmgr
->backend
);
2621 if (data
->gfxoff_controlled_by_driver
)
2622 ret
= smum_send_msg_to_smc(hwmgr
, PPSMC_MSG_DisallowGfxOff
);
2627 static int vega12_gfx_off_control(struct pp_hwmgr
*hwmgr
, bool enable
)
2630 return vega12_enable_gfx_off(hwmgr
);
2632 return vega12_disable_gfx_off(hwmgr
);
2635 static int vega12_get_performance_level(struct pp_hwmgr
*hwmgr
, const struct pp_hw_power_state
*state
,
2636 PHM_PerformanceLevelDesignation designation
, uint32_t index
,
2637 PHM_PerformanceLevel
*level
)
2642 static int vega12_set_mp1_state(struct pp_hwmgr
*hwmgr
,
2643 enum pp_mp1_state mp1_state
)
2648 switch (mp1_state
) {
2649 case PP_MP1_STATE_UNLOAD
:
2650 msg
= PPSMC_MSG_PrepareMp1ForUnload
;
2652 case PP_MP1_STATE_SHUTDOWN
:
2653 case PP_MP1_STATE_RESET
:
2654 case PP_MP1_STATE_NONE
:
2659 PP_ASSERT_WITH_CODE((ret
= smum_send_msg_to_smc(hwmgr
, msg
)) == 0,
2660 "[PrepareMp1] Failed!",
2666 static const struct pp_hwmgr_func vega12_hwmgr_funcs
= {
2667 .backend_init
= vega12_hwmgr_backend_init
,
2668 .backend_fini
= vega12_hwmgr_backend_fini
,
2669 .asic_setup
= vega12_setup_asic_task
,
2670 .dynamic_state_management_enable
= vega12_enable_dpm_tasks
,
2671 .dynamic_state_management_disable
= vega12_disable_dpm_tasks
,
2672 .patch_boot_state
= vega12_patch_boot_state
,
2673 .get_sclk
= vega12_dpm_get_sclk
,
2674 .get_mclk
= vega12_dpm_get_mclk
,
2675 .notify_smc_display_config_after_ps_adjustment
=
2676 vega12_notify_smc_display_config_after_ps_adjustment
,
2677 .force_dpm_level
= vega12_dpm_force_dpm_level
,
2678 .stop_thermal_controller
= vega12_thermal_stop_thermal_controller
,
2679 .get_fan_speed_info
= vega12_fan_ctrl_get_fan_speed_info
,
2680 .reset_fan_speed_to_default
=
2681 vega12_fan_ctrl_reset_fan_speed_to_default
,
2682 .get_fan_speed_rpm
= vega12_fan_ctrl_get_fan_speed_rpm
,
2683 .set_fan_control_mode
= vega12_set_fan_control_mode
,
2684 .get_fan_control_mode
= vega12_get_fan_control_mode
,
2685 .read_sensor
= vega12_read_sensor
,
2686 .get_dal_power_level
= vega12_get_dal_power_level
,
2687 .get_clock_by_type_with_latency
= vega12_get_clock_by_type_with_latency
,
2688 .get_clock_by_type_with_voltage
= vega12_get_clock_by_type_with_voltage
,
2689 .set_watermarks_for_clocks_ranges
= vega12_set_watermarks_for_clocks_ranges
,
2690 .display_clock_voltage_request
= vega12_display_clock_voltage_request
,
2691 .force_clock_level
= vega12_force_clock_level
,
2692 .print_clock_levels
= vega12_print_clock_levels
,
2693 .apply_clocks_adjust_rules
=
2694 vega12_apply_clocks_adjust_rules
,
2695 .pre_display_config_changed
=
2696 vega12_pre_display_configuration_changed_task
,
2697 .display_config_changed
= vega12_display_configuration_changed_task
,
2698 .powergate_uvd
= vega12_power_gate_uvd
,
2699 .powergate_vce
= vega12_power_gate_vce
,
2700 .check_smc_update_required_for_display_configuration
=
2701 vega12_check_smc_update_required_for_display_configuration
,
2702 .power_off_asic
= vega12_power_off_asic
,
2703 .disable_smc_firmware_ctf
= vega12_thermal_disable_alert
,
2705 .set_power_profile_state
= vega12_set_power_profile_state
,
2706 .get_sclk_od
= vega12_get_sclk_od
,
2707 .set_sclk_od
= vega12_set_sclk_od
,
2708 .get_mclk_od
= vega12_get_mclk_od
,
2709 .set_mclk_od
= vega12_set_mclk_od
,
2711 .notify_cac_buffer_info
= vega12_notify_cac_buffer_info
,
2712 .get_thermal_temperature_range
= vega12_get_thermal_temperature_range
,
2713 .register_irq_handlers
= smu9_register_irq_handlers
,
2714 .start_thermal_controller
= vega12_start_thermal_controller
,
2715 .powergate_gfx
= vega12_gfx_off_control
,
2716 .get_performance_level
= vega12_get_performance_level
,
2717 .get_asic_baco_capability
= smu9_baco_get_capability
,
2718 .get_asic_baco_state
= smu9_baco_get_state
,
2719 .set_asic_baco_state
= vega12_baco_set_state
,
2720 .get_ppfeature_status
= vega12_get_ppfeature_status
,
2721 .set_ppfeature_status
= vega12_set_ppfeature_status
,
2722 .set_mp1_state
= vega12_set_mp1_state
,
2725 int vega12_hwmgr_init(struct pp_hwmgr
*hwmgr
)
2727 hwmgr
->hwmgr_func
= &vega12_hwmgr_funcs
;
2728 hwmgr
->pptable_func
= &vega12_pptable_funcs
;