]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
drm/amdgpu: use LRU mapping policy for SDMA engines
[mirror_ubuntu-artful-kernel.git] / drivers / gpu / drm / amd / powerplay / hwmgr / vega10_hwmgr.c
CommitLineData
f83a9991
EH
1/*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#include <linux/module.h>
24#include <linux/slab.h>
25#include <linux/fb.h>
26#include "linux/delay.h"
27
28#include "hwmgr.h"
29#include "amd_powerplay.h"
30#include "vega10_smumgr.h"
31#include "hardwaremanager.h"
32#include "ppatomfwctrl.h"
33#include "atomfirmware.h"
34#include "cgs_common.h"
35#include "vega10_powertune.h"
36#include "smu9.h"
37#include "smu9_driver_if.h"
38#include "vega10_inc.h"
39#include "pp_soc15.h"
40#include "pppcielanes.h"
41#include "vega10_hwmgr.h"
42#include "vega10_processpptables.h"
43#include "vega10_pptable.h"
44#include "vega10_thermal.h"
45#include "pp_debug.h"
46#include "pp_acpi.h"
47#include "amd_pcie_helpers.h"
48#include "cgs_linux.h"
49#include "ppinterrupt.h"
ab5cf3a5 50#include "pp_overdriver.h"
f83a9991
EH
51
52#define VOLTAGE_SCALE 4
53#define VOLTAGE_VID_OFFSET_SCALE1 625
54#define VOLTAGE_VID_OFFSET_SCALE2 100
55
56#define HBM_MEMORY_CHANNEL_WIDTH 128
57
58uint32_t channel_number[] = {1, 2, 0, 4, 0, 8, 0, 16, 2};
59
60#define MEM_FREQ_LOW_LATENCY 25000
61#define MEM_FREQ_HIGH_LATENCY 80000
62#define MEM_LATENCY_HIGH 245
63#define MEM_LATENCY_LOW 35
64#define MEM_LATENCY_ERR 0xFFFF
65
66#define mmDF_CS_AON0_DramBaseAddress0 0x0044
67#define mmDF_CS_AON0_DramBaseAddress0_BASE_IDX 0
68
69//DF_CS_AON0_DramBaseAddress0
70#define DF_CS_AON0_DramBaseAddress0__AddrRngVal__SHIFT 0x0
71#define DF_CS_AON0_DramBaseAddress0__LgcyMmioHoleEn__SHIFT 0x1
72#define DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT 0x4
73#define DF_CS_AON0_DramBaseAddress0__IntLvAddrSel__SHIFT 0x8
74#define DF_CS_AON0_DramBaseAddress0__DramBaseAddr__SHIFT 0xc
75#define DF_CS_AON0_DramBaseAddress0__AddrRngVal_MASK 0x00000001L
76#define DF_CS_AON0_DramBaseAddress0__LgcyMmioHoleEn_MASK 0x00000002L
77#define DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK 0x000000F0L
78#define DF_CS_AON0_DramBaseAddress0__IntLvAddrSel_MASK 0x00000700L
79#define DF_CS_AON0_DramBaseAddress0__DramBaseAddr_MASK 0xFFFFF000L
80
81const ULONG PhwVega10_Magic = (ULONG)(PHM_VIslands_Magic);
82
83struct vega10_power_state *cast_phw_vega10_power_state(
84 struct pp_hw_power_state *hw_ps)
85{
86 PP_ASSERT_WITH_CODE((PhwVega10_Magic == hw_ps->magic),
87 "Invalid Powerstate Type!",
88 return NULL;);
89
90 return (struct vega10_power_state *)hw_ps;
91}
92
93const struct vega10_power_state *cast_const_phw_vega10_power_state(
94 const struct pp_hw_power_state *hw_ps)
95{
96 PP_ASSERT_WITH_CODE((PhwVega10_Magic == hw_ps->magic),
97 "Invalid Powerstate Type!",
98 return NULL;);
99
100 return (const struct vega10_power_state *)hw_ps;
101}
102
103static void vega10_set_default_registry_data(struct pp_hwmgr *hwmgr)
104{
105 struct vega10_hwmgr *data =
106 (struct vega10_hwmgr *)(hwmgr->backend);
107
108 data->registry_data.sclk_dpm_key_disabled =
109 hwmgr->feature_mask & PP_SCLK_DPM_MASK ? false : true;
110 data->registry_data.socclk_dpm_key_disabled =
111 hwmgr->feature_mask & PP_SOCCLK_DPM_MASK ? false : true;
112 data->registry_data.mclk_dpm_key_disabled =
113 hwmgr->feature_mask & PP_MCLK_DPM_MASK ? false : true;
2d5f5f94
RZ
114 data->registry_data.pcie_dpm_key_disabled =
115 hwmgr->feature_mask & PP_PCIE_DPM_MASK ? false : true;
f83a9991
EH
116
117 data->registry_data.dcefclk_dpm_key_disabled =
118 hwmgr->feature_mask & PP_DCEFCLK_DPM_MASK ? false : true;
119
120 if (hwmgr->feature_mask & PP_POWER_CONTAINMENT_MASK) {
121 data->registry_data.power_containment_support = 1;
122 data->registry_data.enable_pkg_pwr_tracking_feature = 1;
123 data->registry_data.enable_tdc_limit_feature = 1;
124 }
125
afc0255c 126 data->registry_data.clock_stretcher_support =
97782cc9 127 hwmgr->feature_mask & PP_CLOCK_STRETCH_MASK ? false : true;
afc0255c 128
4022e4f2
RZ
129 data->registry_data.ulv_support =
130 hwmgr->feature_mask & PP_ULV_MASK ? true : false;
131
df057e02
RZ
132 data->registry_data.sclk_deep_sleep_support =
133 hwmgr->feature_mask & PP_SCLK_DEEP_SLEEP_MASK ? true : false;
134
f83a9991
EH
135 data->registry_data.disable_water_mark = 0;
136
137 data->registry_data.fan_control_support = 1;
138 data->registry_data.thermal_support = 1;
139 data->registry_data.fw_ctf_enabled = 1;
140
141 data->registry_data.avfs_support = 1;
142 data->registry_data.led_dpm_enabled = 1;
143
144 data->registry_data.vr0hot_enabled = 1;
145 data->registry_data.vr1hot_enabled = 1;
146 data->registry_data.regulator_hot_gpio_support = 1;
147
148 data->display_voltage_mode = PPVEGA10_VEGA10DISPLAYVOLTAGEMODE_DFLT;
149 data->dcef_clk_quad_eqn_a = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
150 data->dcef_clk_quad_eqn_b = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
151 data->dcef_clk_quad_eqn_c = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
152 data->disp_clk_quad_eqn_a = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
153 data->disp_clk_quad_eqn_b = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
154 data->disp_clk_quad_eqn_c = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
155 data->pixel_clk_quad_eqn_a = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
156 data->pixel_clk_quad_eqn_b = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
157 data->pixel_clk_quad_eqn_c = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
158 data->phy_clk_quad_eqn_a = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
159 data->phy_clk_quad_eqn_b = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
160 data->phy_clk_quad_eqn_c = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
161
162 data->gfxclk_average_alpha = PPVEGA10_VEGA10GFXCLKAVERAGEALPHA_DFLT;
163 data->socclk_average_alpha = PPVEGA10_VEGA10SOCCLKAVERAGEALPHA_DFLT;
164 data->uclk_average_alpha = PPVEGA10_VEGA10UCLKCLKAVERAGEALPHA_DFLT;
165 data->gfx_activity_average_alpha = PPVEGA10_VEGA10GFXACTIVITYAVERAGEALPHA_DFLT;
166}
167
168static int vega10_set_features_platform_caps(struct pp_hwmgr *hwmgr)
169{
170 struct vega10_hwmgr *data =
171 (struct vega10_hwmgr *)(hwmgr->backend);
172 struct phm_ppt_v2_information *table_info =
173 (struct phm_ppt_v2_information *)hwmgr->pptable;
174 struct cgs_system_info sys_info = {0};
175 int result;
176
177 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
178 PHM_PlatformCaps_SclkDeepSleep);
179
180 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
181 PHM_PlatformCaps_DynamicPatchPowerState);
182
183 if (data->vddci_control == VEGA10_VOLTAGE_CONTROL_NONE)
184 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
185 PHM_PlatformCaps_ControlVDDCI);
186
187 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
188 PHM_PlatformCaps_TablelessHardwareInterface);
189
190 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
191 PHM_PlatformCaps_EnableSMU7ThermalManagement);
192
193 sys_info.size = sizeof(struct cgs_system_info);
194 sys_info.info_id = CGS_SYSTEM_INFO_PG_FLAGS;
195 result = cgs_query_system_info(hwmgr->device, &sys_info);
196
197 if (!result && (sys_info.value & AMD_PG_SUPPORT_UVD))
198 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
199 PHM_PlatformCaps_UVDPowerGating);
200
201 if (!result && (sys_info.value & AMD_PG_SUPPORT_VCE))
202 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
203 PHM_PlatformCaps_VCEPowerGating);
204
205 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
206 PHM_PlatformCaps_UnTabledHardwareInterface);
207
208 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
209 PHM_PlatformCaps_FanSpeedInTableIsRPM);
210
211 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
212 PHM_PlatformCaps_ODFuzzyFanControlSupport);
213
214 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
215 PHM_PlatformCaps_DynamicPowerManagement);
216
217 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
218 PHM_PlatformCaps_SMC);
219
220 /* power tune caps */
221 /* assume disabled */
222 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
223 PHM_PlatformCaps_PowerContainment);
224 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
225 PHM_PlatformCaps_SQRamping);
226 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
227 PHM_PlatformCaps_DBRamping);
228 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
229 PHM_PlatformCaps_TDRamping);
230 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
231 PHM_PlatformCaps_TCPRamping);
232
233 if (data->registry_data.power_containment_support)
234 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
235 PHM_PlatformCaps_PowerContainment);
236 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
237 PHM_PlatformCaps_CAC);
238
239 if (table_info->tdp_table->usClockStretchAmount &&
240 data->registry_data.clock_stretcher_support)
241 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
242 PHM_PlatformCaps_ClockStretcher);
243
244 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
245 PHM_PlatformCaps_RegulatorHot);
246 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
247 PHM_PlatformCaps_AutomaticDCTransition);
248
249 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
250 PHM_PlatformCaps_UVDDPM);
251 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
252 PHM_PlatformCaps_VCEDPM);
253
254 return 0;
255}
256
257static void vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr)
258{
259 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
260 int i;
261
262 vega10_initialize_power_tune_defaults(hwmgr);
263
264 for (i = 0; i < GNLD_FEATURES_MAX; i++) {
265 data->smu_features[i].smu_feature_id = 0xffff;
266 data->smu_features[i].smu_feature_bitmap = 1 << i;
267 data->smu_features[i].enabled = false;
268 data->smu_features[i].supported = false;
269 }
270
271 data->smu_features[GNLD_DPM_PREFETCHER].smu_feature_id =
272 FEATURE_DPM_PREFETCHER_BIT;
273 data->smu_features[GNLD_DPM_GFXCLK].smu_feature_id =
274 FEATURE_DPM_GFXCLK_BIT;
275 data->smu_features[GNLD_DPM_UCLK].smu_feature_id =
276 FEATURE_DPM_UCLK_BIT;
277 data->smu_features[GNLD_DPM_SOCCLK].smu_feature_id =
278 FEATURE_DPM_SOCCLK_BIT;
279 data->smu_features[GNLD_DPM_UVD].smu_feature_id =
280 FEATURE_DPM_UVD_BIT;
281 data->smu_features[GNLD_DPM_VCE].smu_feature_id =
282 FEATURE_DPM_VCE_BIT;
283 data->smu_features[GNLD_DPM_MP0CLK].smu_feature_id =
284 FEATURE_DPM_MP0CLK_BIT;
285 data->smu_features[GNLD_DPM_LINK].smu_feature_id =
286 FEATURE_DPM_LINK_BIT;
287 data->smu_features[GNLD_DPM_DCEFCLK].smu_feature_id =
288 FEATURE_DPM_DCEFCLK_BIT;
289 data->smu_features[GNLD_ULV].smu_feature_id =
290 FEATURE_ULV_BIT;
291 data->smu_features[GNLD_AVFS].smu_feature_id =
292 FEATURE_AVFS_BIT;
293 data->smu_features[GNLD_DS_GFXCLK].smu_feature_id =
294 FEATURE_DS_GFXCLK_BIT;
295 data->smu_features[GNLD_DS_SOCCLK].smu_feature_id =
296 FEATURE_DS_SOCCLK_BIT;
297 data->smu_features[GNLD_DS_LCLK].smu_feature_id =
298 FEATURE_DS_LCLK_BIT;
299 data->smu_features[GNLD_PPT].smu_feature_id =
300 FEATURE_PPT_BIT;
301 data->smu_features[GNLD_TDC].smu_feature_id =
302 FEATURE_TDC_BIT;
303 data->smu_features[GNLD_THERMAL].smu_feature_id =
304 FEATURE_THERMAL_BIT;
305 data->smu_features[GNLD_GFX_PER_CU_CG].smu_feature_id =
306 FEATURE_GFX_PER_CU_CG_BIT;
307 data->smu_features[GNLD_RM].smu_feature_id =
308 FEATURE_RM_BIT;
309 data->smu_features[GNLD_DS_DCEFCLK].smu_feature_id =
310 FEATURE_DS_DCEFCLK_BIT;
311 data->smu_features[GNLD_ACDC].smu_feature_id =
312 FEATURE_ACDC_BIT;
313 data->smu_features[GNLD_VR0HOT].smu_feature_id =
314 FEATURE_VR0HOT_BIT;
315 data->smu_features[GNLD_VR1HOT].smu_feature_id =
316 FEATURE_VR1HOT_BIT;
317 data->smu_features[GNLD_FW_CTF].smu_feature_id =
318 FEATURE_FW_CTF_BIT;
319 data->smu_features[GNLD_LED_DISPLAY].smu_feature_id =
320 FEATURE_LED_DISPLAY_BIT;
321 data->smu_features[GNLD_FAN_CONTROL].smu_feature_id =
322 FEATURE_FAN_CONTROL_BIT;
323 data->smu_features[GNLD_VOLTAGE_CONTROLLER].smu_feature_id =
324 FEATURE_VOLTAGE_CONTROLLER_BIT;
325
326 if (!data->registry_data.prefetcher_dpm_key_disabled)
327 data->smu_features[GNLD_DPM_PREFETCHER].supported = true;
328
329 if (!data->registry_data.sclk_dpm_key_disabled)
330 data->smu_features[GNLD_DPM_GFXCLK].supported = true;
331
332 if (!data->registry_data.mclk_dpm_key_disabled)
333 data->smu_features[GNLD_DPM_UCLK].supported = true;
334
335 if (!data->registry_data.socclk_dpm_key_disabled)
336 data->smu_features[GNLD_DPM_SOCCLK].supported = true;
337
338 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
339 PHM_PlatformCaps_UVDDPM))
340 data->smu_features[GNLD_DPM_UVD].supported = true;
341
342 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
343 PHM_PlatformCaps_VCEDPM))
344 data->smu_features[GNLD_DPM_VCE].supported = true;
345
346 if (!data->registry_data.pcie_dpm_key_disabled)
347 data->smu_features[GNLD_DPM_LINK].supported = true;
348
349 if (!data->registry_data.dcefclk_dpm_key_disabled)
350 data->smu_features[GNLD_DPM_DCEFCLK].supported = true;
351
352 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
353 PHM_PlatformCaps_SclkDeepSleep) &&
354 data->registry_data.sclk_deep_sleep_support) {
355 data->smu_features[GNLD_DS_GFXCLK].supported = true;
356 data->smu_features[GNLD_DS_SOCCLK].supported = true;
357 data->smu_features[GNLD_DS_LCLK].supported = true;
df057e02 358 data->smu_features[GNLD_DS_DCEFCLK].supported = true;
f83a9991
EH
359 }
360
361 if (data->registry_data.enable_pkg_pwr_tracking_feature)
362 data->smu_features[GNLD_PPT].supported = true;
363
364 if (data->registry_data.enable_tdc_limit_feature)
365 data->smu_features[GNLD_TDC].supported = true;
366
367 if (data->registry_data.thermal_support)
368 data->smu_features[GNLD_THERMAL].supported = true;
369
370 if (data->registry_data.fan_control_support)
371 data->smu_features[GNLD_FAN_CONTROL].supported = true;
372
373 if (data->registry_data.fw_ctf_enabled)
374 data->smu_features[GNLD_FW_CTF].supported = true;
375
376 if (data->registry_data.avfs_support)
377 data->smu_features[GNLD_AVFS].supported = true;
378
379 if (data->registry_data.led_dpm_enabled)
380 data->smu_features[GNLD_LED_DISPLAY].supported = true;
381
382 if (data->registry_data.vr1hot_enabled)
383 data->smu_features[GNLD_VR1HOT].supported = true;
384
385 if (data->registry_data.vr0hot_enabled)
386 data->smu_features[GNLD_VR0HOT].supported = true;
387
388}
389
390#ifdef PPLIB_VEGA10_EVV_SUPPORT
391static int vega10_get_socclk_for_voltage_evv(struct pp_hwmgr *hwmgr,
392 phm_ppt_v1_voltage_lookup_table *lookup_table,
393 uint16_t virtual_voltage_id, int32_t *socclk)
394{
395 uint8_t entry_id;
396 uint8_t voltage_id;
397 struct phm_ppt_v2_information *table_info =
398 (struct phm_ppt_v2_information *)(hwmgr->pptable);
399
400 PP_ASSERT_WITH_CODE(lookup_table->count != 0,
401 "Lookup table is empty",
402 return -EINVAL);
403
404 /* search for leakage voltage ID 0xff01 ~ 0xff08 and sclk */
405 for (entry_id = 0; entry_id < table_info->vdd_dep_on_sclk->count; entry_id++) {
406 voltage_id = table_info->vdd_dep_on_socclk->entries[entry_id].vddInd;
407 if (lookup_table->entries[voltage_id].us_vdd == virtual_voltage_id)
408 break;
409 }
410
411 PP_ASSERT_WITH_CODE(entry_id < table_info->vdd_dep_on_socclk->count,
412 "Can't find requested voltage id in vdd_dep_on_socclk table!",
413 return -EINVAL);
414
415 *socclk = table_info->vdd_dep_on_socclk->entries[entry_id].clk;
416
417 return 0;
418}
419
420#define ATOM_VIRTUAL_VOLTAGE_ID0 0xff01
421/**
422* Get Leakage VDDC based on leakage ID.
423*
424* @param hwmgr the address of the powerplay hardware manager.
425* @return always 0.
426*/
427static int vega10_get_evv_voltages(struct pp_hwmgr *hwmgr)
428{
429 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
430 uint16_t vv_id;
431 uint32_t vddc = 0;
432 uint16_t i, j;
433 uint32_t sclk = 0;
434 struct phm_ppt_v2_information *table_info =
435 (struct phm_ppt_v2_information *)hwmgr->pptable;
436 struct phm_ppt_v1_clock_voltage_dependency_table *socclk_table =
437 table_info->vdd_dep_on_socclk;
438 int result;
439
440 for (i = 0; i < VEGA10_MAX_LEAKAGE_COUNT; i++) {
441 vv_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
442
443 if (!vega10_get_socclk_for_voltage_evv(hwmgr,
444 table_info->vddc_lookup_table, vv_id, &sclk)) {
445 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
446 PHM_PlatformCaps_ClockStretcher)) {
447 for (j = 1; j < socclk_table->count; j++) {
448 if (socclk_table->entries[j].clk == sclk &&
449 socclk_table->entries[j].cks_enable == 0) {
450 sclk += 5000;
451 break;
452 }
453 }
454 }
455
456 PP_ASSERT_WITH_CODE(!atomctrl_get_voltage_evv_on_sclk_ai(hwmgr,
457 VOLTAGE_TYPE_VDDC, sclk, vv_id, &vddc),
458 "Error retrieving EVV voltage value!",
459 continue);
460
461
462 /* need to make sure vddc is less than 2v or else, it could burn the ASIC. */
463 PP_ASSERT_WITH_CODE((vddc < 2000 && vddc != 0),
464 "Invalid VDDC value", result = -EINVAL;);
465
466 /* the voltage should not be zero nor equal to leakage ID */
467 if (vddc != 0 && vddc != vv_id) {
468 data->vddc_leakage.actual_voltage[data->vddc_leakage.count] = (uint16_t)(vddc/100);
469 data->vddc_leakage.leakage_id[data->vddc_leakage.count] = vv_id;
470 data->vddc_leakage.count++;
471 }
472 }
473 }
474
475 return 0;
476}
477
478/**
479 * Change virtual leakage voltage to actual value.
480 *
481 * @param hwmgr the address of the powerplay hardware manager.
482 * @param pointer to changing voltage
483 * @param pointer to leakage table
484 */
485static void vega10_patch_with_vdd_leakage(struct pp_hwmgr *hwmgr,
486 uint16_t *voltage, struct vega10_leakage_voltage *leakage_table)
487{
488 uint32_t index;
489
490 /* search for leakage voltage ID 0xff01 ~ 0xff08 */
491 for (index = 0; index < leakage_table->count; index++) {
492 /* if this voltage matches a leakage voltage ID */
493 /* patch with actual leakage voltage */
494 if (leakage_table->leakage_id[index] == *voltage) {
495 *voltage = leakage_table->actual_voltage[index];
496 break;
497 }
498 }
499
500 if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0)
501 pr_info("Voltage value looks like a Leakage ID \
502 but it's not patched\n");
503}
504
505/**
506* Patch voltage lookup table by EVV leakages.
507*
508* @param hwmgr the address of the powerplay hardware manager.
509* @param pointer to voltage lookup table
510* @param pointer to leakage table
511* @return always 0
512*/
513static int vega10_patch_lookup_table_with_leakage(struct pp_hwmgr *hwmgr,
514 phm_ppt_v1_voltage_lookup_table *lookup_table,
515 struct vega10_leakage_voltage *leakage_table)
516{
517 uint32_t i;
518
519 for (i = 0; i < lookup_table->count; i++)
520 vega10_patch_with_vdd_leakage(hwmgr,
521 &lookup_table->entries[i].us_vdd, leakage_table);
522
523 return 0;
524}
525
526static int vega10_patch_clock_voltage_limits_with_vddc_leakage(
527 struct pp_hwmgr *hwmgr, struct vega10_leakage_voltage *leakage_table,
528 uint16_t *vddc)
529{
530 vega10_patch_with_vdd_leakage(hwmgr, (uint16_t *)vddc, leakage_table);
531
532 return 0;
533}
534#endif
535
536static int vega10_patch_voltage_dependency_tables_with_lookup_table(
537 struct pp_hwmgr *hwmgr)
538{
539 uint8_t entry_id;
540 uint8_t voltage_id;
541 struct phm_ppt_v2_information *table_info =
542 (struct phm_ppt_v2_information *)(hwmgr->pptable);
543 struct phm_ppt_v1_clock_voltage_dependency_table *socclk_table =
544 table_info->vdd_dep_on_socclk;
545 struct phm_ppt_v1_clock_voltage_dependency_table *gfxclk_table =
546 table_info->vdd_dep_on_sclk;
547 struct phm_ppt_v1_clock_voltage_dependency_table *dcefclk_table =
548 table_info->vdd_dep_on_dcefclk;
549 struct phm_ppt_v1_clock_voltage_dependency_table *pixclk_table =
550 table_info->vdd_dep_on_pixclk;
551 struct phm_ppt_v1_clock_voltage_dependency_table *dspclk_table =
552 table_info->vdd_dep_on_dispclk;
553 struct phm_ppt_v1_clock_voltage_dependency_table *phyclk_table =
554 table_info->vdd_dep_on_phyclk;
555 struct phm_ppt_v1_clock_voltage_dependency_table *mclk_table =
556 table_info->vdd_dep_on_mclk;
557 struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
558 table_info->mm_dep_table;
559
560 for (entry_id = 0; entry_id < socclk_table->count; entry_id++) {
561 voltage_id = socclk_table->entries[entry_id].vddInd;
562 socclk_table->entries[entry_id].vddc =
563 table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
564 }
565
566 for (entry_id = 0; entry_id < gfxclk_table->count; entry_id++) {
567 voltage_id = gfxclk_table->entries[entry_id].vddInd;
568 gfxclk_table->entries[entry_id].vddc =
569 table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
570 }
571
572 for (entry_id = 0; entry_id < dcefclk_table->count; entry_id++) {
573 voltage_id = dcefclk_table->entries[entry_id].vddInd;
574 dcefclk_table->entries[entry_id].vddc =
575 table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
576 }
577
578 for (entry_id = 0; entry_id < pixclk_table->count; entry_id++) {
579 voltage_id = pixclk_table->entries[entry_id].vddInd;
580 pixclk_table->entries[entry_id].vddc =
581 table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
582 }
583
584 for (entry_id = 0; entry_id < dspclk_table->count; entry_id++) {
585 voltage_id = dspclk_table->entries[entry_id].vddInd;
586 dspclk_table->entries[entry_id].vddc =
587 table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
588 }
589
590 for (entry_id = 0; entry_id < phyclk_table->count; entry_id++) {
591 voltage_id = phyclk_table->entries[entry_id].vddInd;
592 phyclk_table->entries[entry_id].vddc =
593 table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
594 }
595
596 for (entry_id = 0; entry_id < mclk_table->count; ++entry_id) {
597 voltage_id = mclk_table->entries[entry_id].vddInd;
598 mclk_table->entries[entry_id].vddc =
599 table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
600 voltage_id = mclk_table->entries[entry_id].vddciInd;
601 mclk_table->entries[entry_id].vddci =
602 table_info->vddci_lookup_table->entries[voltage_id].us_vdd;
603 voltage_id = mclk_table->entries[entry_id].mvddInd;
604 mclk_table->entries[entry_id].mvdd =
605 table_info->vddmem_lookup_table->entries[voltage_id].us_vdd;
606 }
607
608 for (entry_id = 0; entry_id < mm_table->count; ++entry_id) {
609 voltage_id = mm_table->entries[entry_id].vddcInd;
610 mm_table->entries[entry_id].vddc =
611 table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
612 }
613
614 return 0;
615
616}
617
618static int vega10_sort_lookup_table(struct pp_hwmgr *hwmgr,
619 struct phm_ppt_v1_voltage_lookup_table *lookup_table)
620{
621 uint32_t table_size, i, j;
622 struct phm_ppt_v1_voltage_lookup_record tmp_voltage_lookup_record;
623
624 PP_ASSERT_WITH_CODE(lookup_table && lookup_table->count,
625 "Lookup table is empty", return -EINVAL);
626
627 table_size = lookup_table->count;
628
629 /* Sorting voltages */
630 for (i = 0; i < table_size - 1; i++) {
631 for (j = i + 1; j > 0; j--) {
632 if (lookup_table->entries[j].us_vdd <
633 lookup_table->entries[j - 1].us_vdd) {
634 tmp_voltage_lookup_record = lookup_table->entries[j - 1];
635 lookup_table->entries[j - 1] = lookup_table->entries[j];
636 lookup_table->entries[j] = tmp_voltage_lookup_record;
637 }
638 }
639 }
640
641 return 0;
642}
643
644static int vega10_complete_dependency_tables(struct pp_hwmgr *hwmgr)
645{
646 int result = 0;
647 int tmp_result;
648 struct phm_ppt_v2_information *table_info =
649 (struct phm_ppt_v2_information *)(hwmgr->pptable);
650#ifdef PPLIB_VEGA10_EVV_SUPPORT
651 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
652
653 tmp_result = vega10_patch_lookup_table_with_leakage(hwmgr,
654 table_info->vddc_lookup_table, &(data->vddc_leakage));
655 if (tmp_result)
656 result = tmp_result;
657
658 tmp_result = vega10_patch_clock_voltage_limits_with_vddc_leakage(hwmgr,
659 &(data->vddc_leakage), &table_info->max_clock_voltage_on_dc.vddc);
660 if (tmp_result)
661 result = tmp_result;
662#endif
663
664 tmp_result = vega10_patch_voltage_dependency_tables_with_lookup_table(hwmgr);
665 if (tmp_result)
666 result = tmp_result;
667
668 tmp_result = vega10_sort_lookup_table(hwmgr, table_info->vddc_lookup_table);
669 if (tmp_result)
670 result = tmp_result;
671
672 return result;
673}
674
675static int vega10_set_private_data_based_on_pptable(struct pp_hwmgr *hwmgr)
676{
677 struct phm_ppt_v2_information *table_info =
678 (struct phm_ppt_v2_information *)(hwmgr->pptable);
679 struct phm_ppt_v1_clock_voltage_dependency_table *allowed_sclk_vdd_table =
680 table_info->vdd_dep_on_socclk;
681 struct phm_ppt_v1_clock_voltage_dependency_table *allowed_mclk_vdd_table =
682 table_info->vdd_dep_on_mclk;
683
684 PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table,
685 "VDD dependency on SCLK table is missing. \
686 This table is mandatory", return -EINVAL);
687 PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table->count >= 1,
688 "VDD dependency on SCLK table is empty. \
689 This table is mandatory", return -EINVAL);
690
691 PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table,
692 "VDD dependency on MCLK table is missing. \
693 This table is mandatory", return -EINVAL);
694 PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table->count >= 1,
695 "VDD dependency on MCLK table is empty. \
696 This table is mandatory", return -EINVAL);
697
698 table_info->max_clock_voltage_on_ac.sclk =
699 allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].clk;
700 table_info->max_clock_voltage_on_ac.mclk =
701 allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].clk;
702 table_info->max_clock_voltage_on_ac.vddc =
703 allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].vddc;
704 table_info->max_clock_voltage_on_ac.vddci =
705 allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].vddci;
706
707 hwmgr->dyn_state.max_clock_voltage_on_ac.sclk =
708 table_info->max_clock_voltage_on_ac.sclk;
709 hwmgr->dyn_state.max_clock_voltage_on_ac.mclk =
710 table_info->max_clock_voltage_on_ac.mclk;
711 hwmgr->dyn_state.max_clock_voltage_on_ac.vddc =
712 table_info->max_clock_voltage_on_ac.vddc;
713 hwmgr->dyn_state.max_clock_voltage_on_ac.vddci =
714 table_info->max_clock_voltage_on_ac.vddci;
715
716 return 0;
717}
718
719static int vega10_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
720{
721 kfree(hwmgr->dyn_state.vddc_dep_on_dal_pwrl);
722 hwmgr->dyn_state.vddc_dep_on_dal_pwrl = NULL;
723
724 kfree(hwmgr->backend);
725 hwmgr->backend = NULL;
726
727 return 0;
728}
729
730static int vega10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
731{
732 int result = 0;
733 struct vega10_hwmgr *data;
734 uint32_t config_telemetry = 0;
735 struct pp_atomfwctrl_voltage_table vol_table;
736 struct cgs_system_info sys_info = {0};
737
738 data = kzalloc(sizeof(struct vega10_hwmgr), GFP_KERNEL);
739 if (data == NULL)
740 return -ENOMEM;
741
742 hwmgr->backend = data;
743
744 vega10_set_default_registry_data(hwmgr);
745
746 data->disable_dpm_mask = 0xff;
747 data->workload_mask = 0xff;
748
749 /* need to set voltage control types before EVV patching */
750 data->vddc_control = VEGA10_VOLTAGE_CONTROL_NONE;
751 data->mvdd_control = VEGA10_VOLTAGE_CONTROL_NONE;
752 data->vddci_control = VEGA10_VOLTAGE_CONTROL_NONE;
753
754 /* VDDCR_SOC */
755 if (pp_atomfwctrl_is_voltage_controlled_by_gpio_v4(hwmgr,
756 VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2)) {
757 if (!pp_atomfwctrl_get_voltage_table_v4(hwmgr,
758 VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2,
759 &vol_table)) {
760 config_telemetry = ((vol_table.telemetry_slope << 8) & 0xff00) |
761 (vol_table.telemetry_offset & 0xff);
762 data->vddc_control = VEGA10_VOLTAGE_CONTROL_BY_SVID2;
763 }
764 } else {
765 kfree(hwmgr->backend);
766 hwmgr->backend = NULL;
767 PP_ASSERT_WITH_CODE(false,
768 "VDDCR_SOC is not SVID2!",
769 return -1);
770 }
771
772 /* MVDDC */
773 if (pp_atomfwctrl_is_voltage_controlled_by_gpio_v4(hwmgr,
774 VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2)) {
775 if (!pp_atomfwctrl_get_voltage_table_v4(hwmgr,
776 VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2,
777 &vol_table)) {
778 config_telemetry |=
779 ((vol_table.telemetry_slope << 24) & 0xff000000) |
780 ((vol_table.telemetry_offset << 16) & 0xff0000);
781 data->mvdd_control = VEGA10_VOLTAGE_CONTROL_BY_SVID2;
782 }
783 }
784
785 /* VDDCI_MEM */
786 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
787 PHM_PlatformCaps_ControlVDDCI)) {
788 if (pp_atomfwctrl_is_voltage_controlled_by_gpio_v4(hwmgr,
789 VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT))
790 data->vddci_control = VEGA10_VOLTAGE_CONTROL_BY_GPIO;
791 }
792
793 data->config_telemetry = config_telemetry;
794
795 vega10_set_features_platform_caps(hwmgr);
796
797 vega10_init_dpm_defaults(hwmgr);
798
799#ifdef PPLIB_VEGA10_EVV_SUPPORT
800 /* Get leakage voltage based on leakage ID. */
801 PP_ASSERT_WITH_CODE(!vega10_get_evv_voltages(hwmgr),
802 "Get EVV Voltage Failed. Abort Driver loading!",
803 return -1);
804#endif
805
806 /* Patch our voltage dependency table with actual leakage voltage
807 * We need to perform leakage translation before it's used by other functions
808 */
809 vega10_complete_dependency_tables(hwmgr);
810
811 /* Parse pptable data read from VBIOS */
812 vega10_set_private_data_based_on_pptable(hwmgr);
813
814 data->is_tlu_enabled = false;
815
816 hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
817 VEGA10_MAX_HARDWARE_POWERLEVELS;
818 hwmgr->platform_descriptor.hardwarePerformanceLevels = 2;
819 hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50;
820
821 hwmgr->platform_descriptor.vbiosInterruptId = 0x20000400; /* IRQ_SOURCE1_SW_INT */
822 /* The true clock step depends on the frequency, typically 4.5 or 9 MHz. Here we use 5. */
823 hwmgr->platform_descriptor.clockStep.engineClock = 500;
824 hwmgr->platform_descriptor.clockStep.memoryClock = 500;
825
826 sys_info.size = sizeof(struct cgs_system_info);
827 sys_info.info_id = CGS_SYSTEM_INFO_GFX_CU_INFO;
828 result = cgs_query_system_info(hwmgr->device, &sys_info);
829 data->total_active_cus = sys_info.value;
830 /* Setup default Overdrive Fan control settings */
831 data->odn_fan_table.target_fan_speed =
832 hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM;
833 data->odn_fan_table.target_temperature =
834 hwmgr->thermal_controller.
835 advanceFanControlParameters.ucTargetTemperature;
836 data->odn_fan_table.min_performance_clock =
837 hwmgr->thermal_controller.advanceFanControlParameters.
838 ulMinFanSCLKAcousticLimit;
839 data->odn_fan_table.min_fan_limit =
840 hwmgr->thermal_controller.
841 advanceFanControlParameters.usFanPWMMinLimit *
842 hwmgr->thermal_controller.fanInfo.ulMaxRPM / 100;
843
844 return result;
845}
846
847static int vega10_init_sclk_threshold(struct pp_hwmgr *hwmgr)
848{
849 struct vega10_hwmgr *data =
850 (struct vega10_hwmgr *)(hwmgr->backend);
851
852 data->low_sclk_interrupt_threshold = 0;
853
854 return 0;
855}
856
857static int vega10_setup_dpm_led_config(struct pp_hwmgr *hwmgr)
858{
859 struct vega10_hwmgr *data =
860 (struct vega10_hwmgr *)(hwmgr->backend);
861 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
862
863 struct pp_atomfwctrl_voltage_table table;
864 uint8_t i, j;
865 uint32_t mask = 0;
866 uint32_t tmp;
867 int32_t ret = 0;
868
869 ret = pp_atomfwctrl_get_voltage_table_v4(hwmgr, VOLTAGE_TYPE_LEDDPM,
870 VOLTAGE_OBJ_GPIO_LUT, &table);
871
872 if (!ret) {
873 tmp = table.mask_low;
874 for (i = 0, j = 0; i < 32; i++) {
875 if (tmp & 1) {
876 mask |= (uint32_t)(i << (8 * j));
877 if (++j >= 3)
878 break;
879 }
880 tmp >>= 1;
881 }
882 }
883
884 pp_table->LedPin0 = (uint8_t)(mask & 0xff);
885 pp_table->LedPin1 = (uint8_t)((mask >> 8) & 0xff);
886 pp_table->LedPin2 = (uint8_t)((mask >> 16) & 0xff);
887 return 0;
888}
889
890static int vega10_setup_asic_task(struct pp_hwmgr *hwmgr)
891{
892 PP_ASSERT_WITH_CODE(!vega10_init_sclk_threshold(hwmgr),
893 "Failed to init sclk threshold!",
894 return -EINVAL);
895
896 PP_ASSERT_WITH_CODE(!vega10_setup_dpm_led_config(hwmgr),
897 "Failed to set up led dpm config!",
898 return -EINVAL);
899
900 return 0;
901}
902
903static bool vega10_is_dpm_running(struct pp_hwmgr *hwmgr)
904{
905 uint32_t features_enabled;
906
907 if (!vega10_get_smc_features(hwmgr->smumgr, &features_enabled)) {
908 if (features_enabled & SMC_DPM_FEATURES)
909 return true;
910 }
911 return false;
912}
913
914/**
915* Remove repeated voltage values and create table with unique values.
916*
917* @param hwmgr the address of the powerplay hardware manager.
918* @param vol_table the pointer to changing voltage table
919* @return 0 in success
920*/
921
922static int vega10_trim_voltage_table(struct pp_hwmgr *hwmgr,
923 struct pp_atomfwctrl_voltage_table *vol_table)
924{
925 uint32_t i, j;
926 uint16_t vvalue;
927 bool found = false;
928 struct pp_atomfwctrl_voltage_table *table;
929
930 PP_ASSERT_WITH_CODE(vol_table,
931 "Voltage Table empty.", return -EINVAL);
932 table = kzalloc(sizeof(struct pp_atomfwctrl_voltage_table),
933 GFP_KERNEL);
934
935 if (!table)
936 return -ENOMEM;
937
938 table->mask_low = vol_table->mask_low;
939 table->phase_delay = vol_table->phase_delay;
940
941 for (i = 0; i < vol_table->count; i++) {
942 vvalue = vol_table->entries[i].value;
943 found = false;
944
945 for (j = 0; j < table->count; j++) {
946 if (vvalue == table->entries[j].value) {
947 found = true;
948 break;
949 }
950 }
951
952 if (!found) {
953 table->entries[table->count].value = vvalue;
954 table->entries[table->count].smio_low =
955 vol_table->entries[i].smio_low;
956 table->count++;
957 }
958 }
959
960 memcpy(vol_table, table, sizeof(struct pp_atomfwctrl_voltage_table));
961 kfree(table);
962
963 return 0;
964}
965
966static int vega10_get_mvdd_voltage_table(struct pp_hwmgr *hwmgr,
967 phm_ppt_v1_clock_voltage_dependency_table *dep_table,
968 struct pp_atomfwctrl_voltage_table *vol_table)
969{
970 int i;
971
972 PP_ASSERT_WITH_CODE(dep_table->count,
973 "Voltage Dependency Table empty.",
974 return -EINVAL);
975
976 vol_table->mask_low = 0;
977 vol_table->phase_delay = 0;
978 vol_table->count = dep_table->count;
979
980 for (i = 0; i < vol_table->count; i++) {
981 vol_table->entries[i].value = dep_table->entries[i].mvdd;
982 vol_table->entries[i].smio_low = 0;
983 }
984
985 PP_ASSERT_WITH_CODE(!vega10_trim_voltage_table(hwmgr,
986 vol_table),
987 "Failed to trim MVDD Table!",
988 return -1);
989
990 return 0;
991}
992
993static int vega10_get_vddci_voltage_table(struct pp_hwmgr *hwmgr,
994 phm_ppt_v1_clock_voltage_dependency_table *dep_table,
995 struct pp_atomfwctrl_voltage_table *vol_table)
996{
997 uint32_t i;
998
999 PP_ASSERT_WITH_CODE(dep_table->count,
1000 "Voltage Dependency Table empty.",
1001 return -EINVAL);
1002
1003 vol_table->mask_low = 0;
1004 vol_table->phase_delay = 0;
1005 vol_table->count = dep_table->count;
1006
1007 for (i = 0; i < dep_table->count; i++) {
1008 vol_table->entries[i].value = dep_table->entries[i].vddci;
1009 vol_table->entries[i].smio_low = 0;
1010 }
1011
1012 PP_ASSERT_WITH_CODE(!vega10_trim_voltage_table(hwmgr, vol_table),
1013 "Failed to trim VDDCI table.",
1014 return -1);
1015
1016 return 0;
1017}
1018
1019static int vega10_get_vdd_voltage_table(struct pp_hwmgr *hwmgr,
1020 phm_ppt_v1_clock_voltage_dependency_table *dep_table,
1021 struct pp_atomfwctrl_voltage_table *vol_table)
1022{
1023 int i;
1024
1025 PP_ASSERT_WITH_CODE(dep_table->count,
1026 "Voltage Dependency Table empty.",
1027 return -EINVAL);
1028
1029 vol_table->mask_low = 0;
1030 vol_table->phase_delay = 0;
1031 vol_table->count = dep_table->count;
1032
1033 for (i = 0; i < vol_table->count; i++) {
1034 vol_table->entries[i].value = dep_table->entries[i].vddc;
1035 vol_table->entries[i].smio_low = 0;
1036 }
1037
1038 return 0;
1039}
1040
1041/* ---- Voltage Tables ----
1042 * If the voltage table would be bigger than
1043 * what will fit into the state table on
1044 * the SMC keep only the higher entries.
1045 */
1046static void vega10_trim_voltage_table_to_fit_state_table(
1047 struct pp_hwmgr *hwmgr,
1048 uint32_t max_vol_steps,
1049 struct pp_atomfwctrl_voltage_table *vol_table)
1050{
1051 unsigned int i, diff;
1052
1053 if (vol_table->count <= max_vol_steps)
1054 return;
1055
1056 diff = vol_table->count - max_vol_steps;
1057
1058 for (i = 0; i < max_vol_steps; i++)
1059 vol_table->entries[i] = vol_table->entries[i + diff];
1060
1061 vol_table->count = max_vol_steps;
1062}
1063
1064/**
1065* Create Voltage Tables.
1066*
1067* @param hwmgr the address of the powerplay hardware manager.
1068* @return always 0
1069*/
1070static int vega10_construct_voltage_tables(struct pp_hwmgr *hwmgr)
1071{
1072 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
1073 struct phm_ppt_v2_information *table_info =
1074 (struct phm_ppt_v2_information *)hwmgr->pptable;
1075 int result;
1076
1077 if (data->mvdd_control == VEGA10_VOLTAGE_CONTROL_BY_SVID2 ||
1078 data->mvdd_control == VEGA10_VOLTAGE_CONTROL_NONE) {
1079 result = vega10_get_mvdd_voltage_table(hwmgr,
1080 table_info->vdd_dep_on_mclk,
1081 &(data->mvdd_voltage_table));
1082 PP_ASSERT_WITH_CODE(!result,
1083 "Failed to retrieve MVDDC table!",
1084 return result);
1085 }
1086
1087 if (data->vddci_control == VEGA10_VOLTAGE_CONTROL_NONE) {
1088 result = vega10_get_vddci_voltage_table(hwmgr,
1089 table_info->vdd_dep_on_mclk,
1090 &(data->vddci_voltage_table));
1091 PP_ASSERT_WITH_CODE(!result,
1092 "Failed to retrieve VDDCI_MEM table!",
1093 return result);
1094 }
1095
1096 if (data->vddc_control == VEGA10_VOLTAGE_CONTROL_BY_SVID2 ||
1097 data->vddc_control == VEGA10_VOLTAGE_CONTROL_NONE) {
1098 result = vega10_get_vdd_voltage_table(hwmgr,
1099 table_info->vdd_dep_on_sclk,
1100 &(data->vddc_voltage_table));
1101 PP_ASSERT_WITH_CODE(!result,
1102 "Failed to retrieve VDDCR_SOC table!",
1103 return result);
1104 }
1105
1106 PP_ASSERT_WITH_CODE(data->vddc_voltage_table.count <= 16,
1107 "Too many voltage values for VDDC. Trimming to fit state table.",
1108 vega10_trim_voltage_table_to_fit_state_table(hwmgr,
1109 16, &(data->vddc_voltage_table)));
1110
1111 PP_ASSERT_WITH_CODE(data->vddci_voltage_table.count <= 16,
1112 "Too many voltage values for VDDCI. Trimming to fit state table.",
1113 vega10_trim_voltage_table_to_fit_state_table(hwmgr,
1114 16, &(data->vddci_voltage_table)));
1115
1116 PP_ASSERT_WITH_CODE(data->mvdd_voltage_table.count <= 16,
1117 "Too many voltage values for MVDD. Trimming to fit state table.",
1118 vega10_trim_voltage_table_to_fit_state_table(hwmgr,
1119 16, &(data->mvdd_voltage_table)));
1120
1121
1122 return 0;
1123}
1124
1125/*
1126 * @fn vega10_init_dpm_state
1127 * @brief Function to initialize all Soft Min/Max and Hard Min/Max to 0xff.
1128 *
1129 * @param dpm_state - the address of the DPM Table to initiailize.
1130 * @return None.
1131 */
1132static void vega10_init_dpm_state(struct vega10_dpm_state *dpm_state)
1133{
1134 dpm_state->soft_min_level = 0xff;
1135 dpm_state->soft_max_level = 0xff;
1136 dpm_state->hard_min_level = 0xff;
1137 dpm_state->hard_max_level = 0xff;
1138}
1139
1140static void vega10_setup_default_single_dpm_table(struct pp_hwmgr *hwmgr,
1141 struct vega10_single_dpm_table *dpm_table,
1142 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table)
1143{
1144 int i;
1145
1146 for (i = 0; i < dep_table->count; i++) {
b7a1f0e3 1147 if (i == 0 || dpm_table->dpm_levels[dpm_table->count - 1].value <=
f83a9991
EH
1148 dep_table->entries[i].clk) {
1149 dpm_table->dpm_levels[dpm_table->count].value =
1150 dep_table->entries[i].clk;
1151 dpm_table->dpm_levels[dpm_table->count].enabled = true;
1152 dpm_table->count++;
1153 }
1154 }
1155}
1156static int vega10_setup_default_pcie_table(struct pp_hwmgr *hwmgr)
1157{
1158 struct vega10_hwmgr *data =
1159 (struct vega10_hwmgr *)(hwmgr->backend);
1160 struct vega10_pcie_table *pcie_table = &(data->dpm_table.pcie_table);
1161 struct phm_ppt_v2_information *table_info =
1162 (struct phm_ppt_v2_information *)(hwmgr->pptable);
1163 struct phm_ppt_v1_pcie_table *bios_pcie_table =
1164 table_info->pcie_table;
1165 uint32_t i;
1166
1167 PP_ASSERT_WITH_CODE(bios_pcie_table->count,
1168 "Incorrect number of PCIE States from VBIOS!",
1169 return -1);
1170
b6dc60cf 1171 for (i = 0; i < NUM_LINK_LEVELS; i++) {
f83a9991
EH
1172 if (data->registry_data.pcieSpeedOverride)
1173 pcie_table->pcie_gen[i] =
1174 data->registry_data.pcieSpeedOverride;
1175 else
1176 pcie_table->pcie_gen[i] =
1177 bios_pcie_table->entries[i].gen_speed;
1178
1179 if (data->registry_data.pcieLaneOverride)
676b4087
RZ
1180 pcie_table->pcie_lane[i] = (uint8_t)encode_pcie_lane_width(
1181 data->registry_data.pcieLaneOverride);
f83a9991 1182 else
676b4087
RZ
1183 pcie_table->pcie_lane[i] = (uint8_t)encode_pcie_lane_width(
1184 bios_pcie_table->entries[i].lane_width);
f83a9991
EH
1185 if (data->registry_data.pcieClockOverride)
1186 pcie_table->lclk[i] =
1187 data->registry_data.pcieClockOverride;
1188 else
1189 pcie_table->lclk[i] =
1190 bios_pcie_table->entries[i].pcie_sclk;
f83a9991
EH
1191 }
1192
00c4855e 1193 pcie_table->count = NUM_LINK_LEVELS;
f83a9991
EH
1194
1195 return 0;
1196}
1197
1198/*
1199 * This function is to initialize all DPM state tables
1200 * for SMU based on the dependency table.
1201 * Dynamic state patching function will then trim these
1202 * state tables to the allowed range based
1203 * on the power policy or external client requests,
1204 * such as UVD request, etc.
1205 */
1206static int vega10_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
1207{
1208 struct vega10_hwmgr *data =
1209 (struct vega10_hwmgr *)(hwmgr->backend);
1210 struct phm_ppt_v2_information *table_info =
1211 (struct phm_ppt_v2_information *)(hwmgr->pptable);
1212 struct vega10_single_dpm_table *dpm_table;
1213 uint32_t i;
1214
1215 struct phm_ppt_v1_clock_voltage_dependency_table *dep_soc_table =
1216 table_info->vdd_dep_on_socclk;
1217 struct phm_ppt_v1_clock_voltage_dependency_table *dep_gfx_table =
1218 table_info->vdd_dep_on_sclk;
1219 struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table =
1220 table_info->vdd_dep_on_mclk;
1221 struct phm_ppt_v1_mm_clock_voltage_dependency_table *dep_mm_table =
1222 table_info->mm_dep_table;
1223 struct phm_ppt_v1_clock_voltage_dependency_table *dep_dcef_table =
1224 table_info->vdd_dep_on_dcefclk;
1225 struct phm_ppt_v1_clock_voltage_dependency_table *dep_pix_table =
1226 table_info->vdd_dep_on_pixclk;
1227 struct phm_ppt_v1_clock_voltage_dependency_table *dep_disp_table =
1228 table_info->vdd_dep_on_dispclk;
1229 struct phm_ppt_v1_clock_voltage_dependency_table *dep_phy_table =
1230 table_info->vdd_dep_on_phyclk;
1231
1232 PP_ASSERT_WITH_CODE(dep_soc_table,
1233 "SOCCLK dependency table is missing. This table is mandatory",
1234 return -EINVAL);
1235 PP_ASSERT_WITH_CODE(dep_soc_table->count >= 1,
1236 "SOCCLK dependency table is empty. This table is mandatory",
1237 return -EINVAL);
1238
1239 PP_ASSERT_WITH_CODE(dep_gfx_table,
1240 "GFXCLK dependency table is missing. This table is mandatory",
1241 return -EINVAL);
1242 PP_ASSERT_WITH_CODE(dep_gfx_table->count >= 1,
1243 "GFXCLK dependency table is empty. This table is mandatory",
1244 return -EINVAL);
1245
1246 PP_ASSERT_WITH_CODE(dep_mclk_table,
1247 "MCLK dependency table is missing. This table is mandatory",
1248 return -EINVAL);
1249 PP_ASSERT_WITH_CODE(dep_mclk_table->count >= 1,
1250 "MCLK dependency table has to have is missing. This table is mandatory",
1251 return -EINVAL);
1252
1253 /* Initialize Sclk DPM table based on allow Sclk values */
1254 data->dpm_table.soc_table.count = 0;
1255 data->dpm_table.gfx_table.count = 0;
1256 data->dpm_table.dcef_table.count = 0;
1257
1258 dpm_table = &(data->dpm_table.soc_table);
1259 vega10_setup_default_single_dpm_table(hwmgr,
1260 dpm_table,
1261 dep_soc_table);
1262
1263 vega10_init_dpm_state(&(dpm_table->dpm_state));
1264
1265 dpm_table = &(data->dpm_table.gfx_table);
1266 vega10_setup_default_single_dpm_table(hwmgr,
1267 dpm_table,
1268 dep_gfx_table);
1269 vega10_init_dpm_state(&(dpm_table->dpm_state));
1270
1271 /* Initialize Mclk DPM table based on allow Mclk values */
1272 data->dpm_table.mem_table.count = 0;
1273 dpm_table = &(data->dpm_table.mem_table);
1274 vega10_setup_default_single_dpm_table(hwmgr,
1275 dpm_table,
1276 dep_mclk_table);
1277 vega10_init_dpm_state(&(dpm_table->dpm_state));
1278
1279 data->dpm_table.eclk_table.count = 0;
1280 dpm_table = &(data->dpm_table.eclk_table);
1281 for (i = 0; i < dep_mm_table->count; i++) {
1282 if (i == 0 || dpm_table->dpm_levels
b7a1f0e3 1283 [dpm_table->count - 1].value <=
f83a9991
EH
1284 dep_mm_table->entries[i].eclk) {
1285 dpm_table->dpm_levels[dpm_table->count].value =
1286 dep_mm_table->entries[i].eclk;
1287 dpm_table->dpm_levels[dpm_table->count].enabled =
1288 (i == 0) ? true : false;
1289 dpm_table->count++;
1290 }
1291 }
1292 vega10_init_dpm_state(&(dpm_table->dpm_state));
1293
1294 data->dpm_table.vclk_table.count = 0;
1295 data->dpm_table.dclk_table.count = 0;
1296 dpm_table = &(data->dpm_table.vclk_table);
1297 for (i = 0; i < dep_mm_table->count; i++) {
1298 if (i == 0 || dpm_table->dpm_levels
b7a1f0e3 1299 [dpm_table->count - 1].value <=
f83a9991
EH
1300 dep_mm_table->entries[i].vclk) {
1301 dpm_table->dpm_levels[dpm_table->count].value =
1302 dep_mm_table->entries[i].vclk;
1303 dpm_table->dpm_levels[dpm_table->count].enabled =
1304 (i == 0) ? true : false;
1305 dpm_table->count++;
1306 }
1307 }
1308 vega10_init_dpm_state(&(dpm_table->dpm_state));
1309
1310 dpm_table = &(data->dpm_table.dclk_table);
1311 for (i = 0; i < dep_mm_table->count; i++) {
1312 if (i == 0 || dpm_table->dpm_levels
b7a1f0e3 1313 [dpm_table->count - 1].value <=
f83a9991
EH
1314 dep_mm_table->entries[i].dclk) {
1315 dpm_table->dpm_levels[dpm_table->count].value =
1316 dep_mm_table->entries[i].dclk;
1317 dpm_table->dpm_levels[dpm_table->count].enabled =
1318 (i == 0) ? true : false;
1319 dpm_table->count++;
1320 }
1321 }
1322 vega10_init_dpm_state(&(dpm_table->dpm_state));
1323
1324 /* Assume there is no headless Vega10 for now */
1325 dpm_table = &(data->dpm_table.dcef_table);
1326 vega10_setup_default_single_dpm_table(hwmgr,
1327 dpm_table,
1328 dep_dcef_table);
1329
1330 vega10_init_dpm_state(&(dpm_table->dpm_state));
1331
1332 dpm_table = &(data->dpm_table.pixel_table);
1333 vega10_setup_default_single_dpm_table(hwmgr,
1334 dpm_table,
1335 dep_pix_table);
1336
1337 vega10_init_dpm_state(&(dpm_table->dpm_state));
1338
1339 dpm_table = &(data->dpm_table.display_table);
1340 vega10_setup_default_single_dpm_table(hwmgr,
1341 dpm_table,
1342 dep_disp_table);
1343
1344 vega10_init_dpm_state(&(dpm_table->dpm_state));
1345
1346 dpm_table = &(data->dpm_table.phy_table);
1347 vega10_setup_default_single_dpm_table(hwmgr,
1348 dpm_table,
1349 dep_phy_table);
1350
1351 vega10_init_dpm_state(&(dpm_table->dpm_state));
1352
1353 vega10_setup_default_pcie_table(hwmgr);
1354
1355 /* save a copy of the default DPM table */
1356 memcpy(&(data->golden_dpm_table), &(data->dpm_table),
1357 sizeof(struct vega10_dpm_table));
1358
1359 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1360 PHM_PlatformCaps_ODNinACSupport) ||
1361 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1362 PHM_PlatformCaps_ODNinDCSupport)) {
1363 data->odn_dpm_table.odn_core_clock_dpm_levels.
1364 number_of_performance_levels = data->dpm_table.gfx_table.count;
1365 for (i = 0; i < data->dpm_table.gfx_table.count; i++) {
1366 data->odn_dpm_table.odn_core_clock_dpm_levels.
1367 performance_level_entries[i].clock =
1368 data->dpm_table.gfx_table.dpm_levels[i].value;
1369 data->odn_dpm_table.odn_core_clock_dpm_levels.
1370 performance_level_entries[i].enabled = true;
1371 }
1372
1373 data->odn_dpm_table.vdd_dependency_on_sclk.count =
1374 dep_gfx_table->count;
1375 for (i = 0; i < dep_gfx_table->count; i++) {
1376 data->odn_dpm_table.vdd_dependency_on_sclk.entries[i].clk =
1377 dep_gfx_table->entries[i].clk;
1378 data->odn_dpm_table.vdd_dependency_on_sclk.entries[i].vddInd =
1379 dep_gfx_table->entries[i].vddInd;
1380 data->odn_dpm_table.vdd_dependency_on_sclk.entries[i].cks_enable =
1381 dep_gfx_table->entries[i].cks_enable;
1382 data->odn_dpm_table.vdd_dependency_on_sclk.entries[i].cks_voffset =
1383 dep_gfx_table->entries[i].cks_voffset;
1384 }
1385
1386 data->odn_dpm_table.odn_memory_clock_dpm_levels.
1387 number_of_performance_levels = data->dpm_table.mem_table.count;
1388 for (i = 0; i < data->dpm_table.mem_table.count; i++) {
1389 data->odn_dpm_table.odn_memory_clock_dpm_levels.
1390 performance_level_entries[i].clock =
1391 data->dpm_table.mem_table.dpm_levels[i].value;
1392 data->odn_dpm_table.odn_memory_clock_dpm_levels.
1393 performance_level_entries[i].enabled = true;
1394 }
1395
1396 data->odn_dpm_table.vdd_dependency_on_mclk.count = dep_mclk_table->count;
1397 for (i = 0; i < dep_mclk_table->count; i++) {
1398 data->odn_dpm_table.vdd_dependency_on_mclk.entries[i].clk =
1399 dep_mclk_table->entries[i].clk;
1400 data->odn_dpm_table.vdd_dependency_on_mclk.entries[i].vddInd =
1401 dep_mclk_table->entries[i].vddInd;
1402 data->odn_dpm_table.vdd_dependency_on_mclk.entries[i].vddci =
1403 dep_mclk_table->entries[i].vddci;
1404 }
1405 }
1406
1407 return 0;
1408}
1409
1410/*
1411 * @fn vega10_populate_ulv_state
1412 * @brief Function to provide parameters for Utral Low Voltage state to SMC.
1413 *
1414 * @param hwmgr - the address of the hardware manager.
1415 * @return Always 0.
1416 */
1417static int vega10_populate_ulv_state(struct pp_hwmgr *hwmgr)
1418{
1419 struct vega10_hwmgr *data =
1420 (struct vega10_hwmgr *)(hwmgr->backend);
1421 struct phm_ppt_v2_information *table_info =
1422 (struct phm_ppt_v2_information *)(hwmgr->pptable);
1423
1424 data->smc_state_table.pp_table.UlvOffsetVid =
effa290c 1425 (uint8_t)table_info->us_ulv_voltage_offset;
f83a9991
EH
1426
1427 data->smc_state_table.pp_table.UlvSmnclkDid =
1428 (uint8_t)(table_info->us_ulv_smnclk_did);
1429 data->smc_state_table.pp_table.UlvMp1clkDid =
1430 (uint8_t)(table_info->us_ulv_mp1clk_did);
1431 data->smc_state_table.pp_table.UlvGfxclkBypass =
1432 (uint8_t)(table_info->us_ulv_gfxclk_bypass);
1433 data->smc_state_table.pp_table.UlvPhaseSheddingPsi0 =
1434 (uint8_t)(data->vddc_voltage_table.psi0_enable);
1435 data->smc_state_table.pp_table.UlvPhaseSheddingPsi1 =
1436 (uint8_t)(data->vddc_voltage_table.psi1_enable);
1437
1438 return 0;
1439}
1440
1441static int vega10_populate_single_lclk_level(struct pp_hwmgr *hwmgr,
1442 uint32_t lclock, uint8_t *curr_lclk_did)
1443{
1444 struct pp_atomfwctrl_clock_dividers_soc15 dividers;
1445
1446 PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(
1447 hwmgr,
1448 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
1449 lclock, &dividers),
1450 "Failed to get LCLK clock settings from VBIOS!",
1451 return -1);
1452
1453 *curr_lclk_did = dividers.ulDid;
1454
1455 return 0;
1456}
1457
1458static int vega10_populate_smc_link_levels(struct pp_hwmgr *hwmgr)
1459{
1460 int result = -1;
1461 struct vega10_hwmgr *data =
1462 (struct vega10_hwmgr *)(hwmgr->backend);
1463 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
1464 struct vega10_pcie_table *pcie_table =
1465 &(data->dpm_table.pcie_table);
1466 uint32_t i, j;
1467
1468 for (i = 0; i < pcie_table->count; i++) {
1469 pp_table->PcieGenSpeed[i] = pcie_table->pcie_gen[i];
1470 pp_table->PcieLaneCount[i] = pcie_table->pcie_lane[i];
1471
1472 result = vega10_populate_single_lclk_level(hwmgr,
1473 pcie_table->lclk[i], &(pp_table->LclkDid[i]));
1474 if (result) {
1475 pr_info("Populate LClock Level %d Failed!\n", i);
1476 return result;
1477 }
1478 }
1479
1480 j = i - 1;
1481 while (i < NUM_LINK_LEVELS) {
1482 pp_table->PcieGenSpeed[i] = pcie_table->pcie_gen[j];
1483 pp_table->PcieLaneCount[i] = pcie_table->pcie_lane[j];
1484
1485 result = vega10_populate_single_lclk_level(hwmgr,
1486 pcie_table->lclk[j], &(pp_table->LclkDid[i]));
1487 if (result) {
1488 pr_info("Populate LClock Level %d Failed!\n", i);
1489 return result;
1490 }
1491 i++;
1492 }
1493
1494 return result;
1495}
1496
1497/**
1498* Populates single SMC GFXSCLK structure using the provided engine clock
1499*
1500* @param hwmgr the address of the hardware manager
1501* @param gfx_clock the GFX clock to use to populate the structure.
1502* @param current_gfxclk_level location in PPTable for the SMC GFXCLK structure.
1503*/
1504
1505static int vega10_populate_single_gfx_level(struct pp_hwmgr *hwmgr,
1506 uint32_t gfx_clock, PllSetting_t *current_gfxclk_level)
1507{
1508 struct phm_ppt_v2_information *table_info =
1509 (struct phm_ppt_v2_information *)(hwmgr->pptable);
1510 struct phm_ppt_v1_clock_voltage_dependency_table *dep_on_sclk =
1511 table_info->vdd_dep_on_sclk;
1512 struct vega10_hwmgr *data =
1513 (struct vega10_hwmgr *)(hwmgr->backend);
1514 struct pp_atomfwctrl_clock_dividers_soc15 dividers;
1515 uint32_t i;
1516
1517 if (data->apply_overdrive_next_settings_mask &
1518 DPMTABLE_OD_UPDATE_VDDC)
1519 dep_on_sclk = (struct phm_ppt_v1_clock_voltage_dependency_table *)
1520 &(data->odn_dpm_table.vdd_dependency_on_sclk);
1521
1522 PP_ASSERT_WITH_CODE(dep_on_sclk,
1523 "Invalid SOC_VDD-GFX_CLK Dependency Table!",
1524 return -EINVAL);
1525
1526 for (i = 0; i < dep_on_sclk->count; i++) {
1527 if (dep_on_sclk->entries[i].clk == gfx_clock)
1528 break;
1529 }
1530
1531 PP_ASSERT_WITH_CODE(dep_on_sclk->count > i,
1532 "Cannot find gfx_clk in SOC_VDD-GFX_CLK!",
1533 return -EINVAL);
1534 PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(hwmgr,
1535 COMPUTE_GPUCLK_INPUT_FLAG_GFXCLK,
1536 gfx_clock, &dividers),
1537 "Failed to get GFX Clock settings from VBIOS!",
1538 return -EINVAL);
1539
1540 /* Feedback Multiplier: bit 0:8 int, bit 15:12 post_div, bit 31:16 frac */
1541 current_gfxclk_level->FbMult =
1542 cpu_to_le32(dividers.ulPll_fb_mult);
1543 /* Spread FB Multiplier bit: bit 0:8 int, bit 31:16 frac */
93480f89 1544 current_gfxclk_level->SsOn = dividers.ucPll_ss_enable;
f83a9991
EH
1545 current_gfxclk_level->SsFbMult =
1546 cpu_to_le32(dividers.ulPll_ss_fbsmult);
1547 current_gfxclk_level->SsSlewFrac =
1548 cpu_to_le16(dividers.usPll_ss_slew_frac);
1549 current_gfxclk_level->Did = (uint8_t)(dividers.ulDid);
1550
1551 return 0;
1552}
1553
1554/**
1555 * @brief Populates single SMC SOCCLK structure using the provided clock.
1556 *
1557 * @param hwmgr - the address of the hardware manager.
1558 * @param soc_clock - the SOC clock to use to populate the structure.
1559 * @param current_socclk_level - location in PPTable for the SMC SOCCLK structure.
1560 * @return 0 on success..
1561 */
1562static int vega10_populate_single_soc_level(struct pp_hwmgr *hwmgr,
1563 uint32_t soc_clock, uint8_t *current_soc_did,
1564 uint8_t *current_vol_index)
1565{
1566 struct phm_ppt_v2_information *table_info =
1567 (struct phm_ppt_v2_information *)(hwmgr->pptable);
1568 struct phm_ppt_v1_clock_voltage_dependency_table *dep_on_soc =
1569 table_info->vdd_dep_on_socclk;
1570 struct pp_atomfwctrl_clock_dividers_soc15 dividers;
1571 uint32_t i;
1572
1573 PP_ASSERT_WITH_CODE(dep_on_soc,
1574 "Invalid SOC_VDD-SOC_CLK Dependency Table!",
1575 return -EINVAL);
1576 for (i = 0; i < dep_on_soc->count; i++) {
1577 if (dep_on_soc->entries[i].clk == soc_clock)
1578 break;
1579 }
1580 PP_ASSERT_WITH_CODE(dep_on_soc->count > i,
1581 "Cannot find SOC_CLK in SOC_VDD-SOC_CLK Dependency Table",
1582 return -EINVAL);
1583 PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(hwmgr,
1584 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
1585 soc_clock, &dividers),
1586 "Failed to get SOC Clock settings from VBIOS!",
1587 return -EINVAL);
1588
1589 *current_soc_did = (uint8_t)dividers.ulDid;
1590 *current_vol_index = (uint8_t)(dep_on_soc->entries[i].vddInd);
1591
1592 return 0;
1593}
1594
1595uint16_t vega10_locate_vddc_given_clock(struct pp_hwmgr *hwmgr,
1596 uint32_t clk,
1597 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table)
1598{
1599 uint16_t i;
1600
1601 for (i = 0; i < dep_table->count; i++) {
1602 if (dep_table->entries[i].clk == clk)
1603 return dep_table->entries[i].vddc;
1604 }
1605
1606 pr_info("[LocateVddcGivenClock] Cannot locate SOC Vddc for this clock!");
1607 return 0;
1608}
1609
1610/**
1611* Populates all SMC SCLK levels' structure based on the trimmed allowed dpm engine clock states
1612*
1613* @param hwmgr the address of the hardware manager
1614*/
1615static int vega10_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
1616{
1617 struct vega10_hwmgr *data =
1618 (struct vega10_hwmgr *)(hwmgr->backend);
1619 struct phm_ppt_v2_information *table_info =
1620 (struct phm_ppt_v2_information *)(hwmgr->pptable);
1621 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table =
1622 table_info->vdd_dep_on_socclk;
1623 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
1624 struct vega10_single_dpm_table *dpm_table = &(data->dpm_table.gfx_table);
1625 int result = 0;
1626 uint32_t i, j;
1627
1628 for (i = 0; i < dpm_table->count; i++) {
1629 result = vega10_populate_single_gfx_level(hwmgr,
1630 dpm_table->dpm_levels[i].value,
1631 &(pp_table->GfxclkLevel[i]));
1632 if (result)
1633 return result;
1634 }
1635
1636 j = i - 1;
1637 while (i < NUM_GFXCLK_DPM_LEVELS) {
1638 result = vega10_populate_single_gfx_level(hwmgr,
1639 dpm_table->dpm_levels[j].value,
1640 &(pp_table->GfxclkLevel[i]));
1641 if (result)
1642 return result;
1643 i++;
1644 }
1645
1646 pp_table->GfxclkSlewRate =
1647 cpu_to_le16(table_info->us_gfxclk_slew_rate);
1648
1649 dpm_table = &(data->dpm_table.soc_table);
1650 for (i = 0; i < dpm_table->count; i++) {
1651 pp_table->SocVid[i] =
1652 (uint8_t)convert_to_vid(
1653 vega10_locate_vddc_given_clock(hwmgr,
1654 dpm_table->dpm_levels[i].value,
1655 dep_table));
1656 result = vega10_populate_single_soc_level(hwmgr,
1657 dpm_table->dpm_levels[i].value,
1658 &(pp_table->SocclkDid[i]),
1659 &(pp_table->SocDpmVoltageIndex[i]));
1660 if (result)
1661 return result;
1662 }
1663
1664 j = i - 1;
1665 while (i < NUM_SOCCLK_DPM_LEVELS) {
1666 pp_table->SocVid[i] = pp_table->SocVid[j];
1667 result = vega10_populate_single_soc_level(hwmgr,
1668 dpm_table->dpm_levels[j].value,
1669 &(pp_table->SocclkDid[i]),
1670 &(pp_table->SocDpmVoltageIndex[i]));
1671 if (result)
1672 return result;
1673 i++;
1674 }
1675
1676 return result;
1677}
1678
1679/**
1680 * @brief Populates single SMC GFXCLK structure using the provided clock.
1681 *
1682 * @param hwmgr - the address of the hardware manager.
1683 * @param mem_clock - the memory clock to use to populate the structure.
1684 * @return 0 on success..
1685 */
1686static int vega10_populate_single_memory_level(struct pp_hwmgr *hwmgr,
1687 uint32_t mem_clock, uint8_t *current_mem_vid,
1688 PllSetting_t *current_memclk_level, uint8_t *current_mem_soc_vind)
1689{
1690 struct vega10_hwmgr *data =
1691 (struct vega10_hwmgr *)(hwmgr->backend);
1692 struct phm_ppt_v2_information *table_info =
1693 (struct phm_ppt_v2_information *)(hwmgr->pptable);
1694 struct phm_ppt_v1_clock_voltage_dependency_table *dep_on_mclk =
1695 table_info->vdd_dep_on_mclk;
1696 struct pp_atomfwctrl_clock_dividers_soc15 dividers;
1697 uint32_t i;
1698
1699 if (data->apply_overdrive_next_settings_mask &
1700 DPMTABLE_OD_UPDATE_VDDC)
1701 dep_on_mclk = (struct phm_ppt_v1_clock_voltage_dependency_table *)
1702 &data->odn_dpm_table.vdd_dependency_on_mclk;
1703
1704 PP_ASSERT_WITH_CODE(dep_on_mclk,
1705 "Invalid SOC_VDD-UCLK Dependency Table!",
1706 return -EINVAL);
1707
1708 for (i = 0; i < dep_on_mclk->count; i++) {
1709 if (dep_on_mclk->entries[i].clk == mem_clock)
1710 break;
1711 }
1712
1713 PP_ASSERT_WITH_CODE(dep_on_mclk->count > i,
1714 "Cannot find UCLK in SOC_VDD-UCLK Dependency Table!",
1715 return -EINVAL);
1716
1717 PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(
1718 hwmgr, COMPUTE_GPUCLK_INPUT_FLAG_UCLK, mem_clock, &dividers),
1719 "Failed to get UCLK settings from VBIOS!",
1720 return -1);
1721
1722 *current_mem_vid =
1723 (uint8_t)(convert_to_vid(dep_on_mclk->entries[i].mvdd));
1724 *current_mem_soc_vind =
1725 (uint8_t)(dep_on_mclk->entries[i].vddInd);
1726 current_memclk_level->FbMult = cpu_to_le32(dividers.ulPll_fb_mult);
1727 current_memclk_level->Did = (uint8_t)(dividers.ulDid);
1728
1729 PP_ASSERT_WITH_CODE(current_memclk_level->Did >= 1,
1730 "Invalid Divider ID!",
1731 return -EINVAL);
1732
1733 return 0;
1734}
1735
1736/**
1737 * @brief Populates all SMC MCLK levels' structure based on the trimmed allowed dpm memory clock states.
1738 *
1739 * @param pHwMgr - the address of the hardware manager.
1740 * @return PP_Result_OK on success.
1741 */
1742static int vega10_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
1743{
1744 struct vega10_hwmgr *data =
1745 (struct vega10_hwmgr *)(hwmgr->backend);
1746 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
1747 struct vega10_single_dpm_table *dpm_table =
1748 &(data->dpm_table.mem_table);
1749 int result = 0;
1750 uint32_t i, j, reg, mem_channels;
1751
1752 for (i = 0; i < dpm_table->count; i++) {
1753 result = vega10_populate_single_memory_level(hwmgr,
1754 dpm_table->dpm_levels[i].value,
1755 &(pp_table->MemVid[i]),
1756 &(pp_table->UclkLevel[i]),
1757 &(pp_table->MemSocVoltageIndex[i]));
1758 if (result)
1759 return result;
1760 }
1761
1762 j = i - 1;
1763 while (i < NUM_UCLK_DPM_LEVELS) {
1764 result = vega10_populate_single_memory_level(hwmgr,
1765 dpm_table->dpm_levels[j].value,
1766 &(pp_table->MemVid[i]),
1767 &(pp_table->UclkLevel[i]),
1768 &(pp_table->MemSocVoltageIndex[i]));
1769 if (result)
1770 return result;
1771 i++;
1772 }
1773
1774 reg = soc15_get_register_offset(DF_HWID, 0,
1775 mmDF_CS_AON0_DramBaseAddress0_BASE_IDX,
1776 mmDF_CS_AON0_DramBaseAddress0);
1777 mem_channels = (cgs_read_register(hwmgr->device, reg) &
1778 DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK) >>
1779 DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT;
1780 pp_table->NumMemoryChannels = cpu_to_le16(mem_channels);
1781 pp_table->MemoryChannelWidth =
1782 cpu_to_le16(HBM_MEMORY_CHANNEL_WIDTH *
1783 channel_number[mem_channels]);
1784
1785 pp_table->LowestUclkReservedForUlv =
1786 (uint8_t)(data->lowest_uclk_reserved_for_ulv);
1787
1788 return result;
1789}
1790
1791static int vega10_populate_single_display_type(struct pp_hwmgr *hwmgr,
1792 DSPCLK_e disp_clock)
1793{
1794 struct vega10_hwmgr *data =
1795 (struct vega10_hwmgr *)(hwmgr->backend);
1796 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
1797 struct phm_ppt_v2_information *table_info =
1798 (struct phm_ppt_v2_information *)
1799 (hwmgr->pptable);
1800 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table;
1801 uint32_t i;
1802 uint16_t clk = 0, vddc = 0;
1803 uint8_t vid = 0;
1804
1805 switch (disp_clock) {
1806 case DSPCLK_DCEFCLK:
1807 dep_table = table_info->vdd_dep_on_dcefclk;
1808 break;
1809 case DSPCLK_DISPCLK:
1810 dep_table = table_info->vdd_dep_on_dispclk;
1811 break;
1812 case DSPCLK_PIXCLK:
1813 dep_table = table_info->vdd_dep_on_pixclk;
1814 break;
1815 case DSPCLK_PHYCLK:
1816 dep_table = table_info->vdd_dep_on_phyclk;
1817 break;
1818 default:
1819 return -1;
1820 }
1821
1822 PP_ASSERT_WITH_CODE(dep_table->count <= NUM_DSPCLK_LEVELS,
1823 "Number Of Entries Exceeded maximum!",
1824 return -1);
1825
1826 for (i = 0; i < dep_table->count; i++) {
1827 clk = (uint16_t)(dep_table->entries[i].clk / 100);
1828 vddc = table_info->vddc_lookup_table->
1829 entries[dep_table->entries[i].vddInd].us_vdd;
1830 vid = (uint8_t)convert_to_vid(vddc);
1831 pp_table->DisplayClockTable[disp_clock][i].Freq =
1832 cpu_to_le16(clk);
1833 pp_table->DisplayClockTable[disp_clock][i].Vid =
1834 cpu_to_le16(vid);
1835 }
1836
1837 while (i < NUM_DSPCLK_LEVELS) {
1838 pp_table->DisplayClockTable[disp_clock][i].Freq =
1839 cpu_to_le16(clk);
1840 pp_table->DisplayClockTable[disp_clock][i].Vid =
1841 cpu_to_le16(vid);
1842 i++;
1843 }
1844
1845 return 0;
1846}
1847
1848static int vega10_populate_all_display_clock_levels(struct pp_hwmgr *hwmgr)
1849{
1850 uint32_t i;
1851
1852 for (i = 0; i < DSPCLK_COUNT; i++) {
1853 PP_ASSERT_WITH_CODE(!vega10_populate_single_display_type(hwmgr, i),
1854 "Failed to populate Clock in DisplayClockTable!",
1855 return -1);
1856 }
1857
1858 return 0;
1859}
1860
1861static int vega10_populate_single_eclock_level(struct pp_hwmgr *hwmgr,
1862 uint32_t eclock, uint8_t *current_eclk_did,
1863 uint8_t *current_soc_vol)
1864{
1865 struct phm_ppt_v2_information *table_info =
1866 (struct phm_ppt_v2_information *)(hwmgr->pptable);
1867 struct phm_ppt_v1_mm_clock_voltage_dependency_table *dep_table =
1868 table_info->mm_dep_table;
1869 struct pp_atomfwctrl_clock_dividers_soc15 dividers;
1870 uint32_t i;
1871
1872 PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(hwmgr,
1873 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
1874 eclock, &dividers),
1875 "Failed to get ECLK clock settings from VBIOS!",
1876 return -1);
1877
1878 *current_eclk_did = (uint8_t)dividers.ulDid;
1879
1880 for (i = 0; i < dep_table->count; i++) {
1881 if (dep_table->entries[i].eclk == eclock)
1882 *current_soc_vol = dep_table->entries[i].vddcInd;
1883 }
1884
1885 return 0;
1886}
1887
1888static int vega10_populate_smc_vce_levels(struct pp_hwmgr *hwmgr)
1889{
1890 struct vega10_hwmgr *data =
1891 (struct vega10_hwmgr *)(hwmgr->backend);
1892 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
1893 struct vega10_single_dpm_table *dpm_table = &(data->dpm_table.eclk_table);
1894 int result = -EINVAL;
1895 uint32_t i, j;
1896
1897 for (i = 0; i < dpm_table->count; i++) {
1898 result = vega10_populate_single_eclock_level(hwmgr,
1899 dpm_table->dpm_levels[i].value,
1900 &(pp_table->EclkDid[i]),
1901 &(pp_table->VceDpmVoltageIndex[i]));
1902 if (result)
1903 return result;
1904 }
1905
1906 j = i - 1;
1907 while (i < NUM_VCE_DPM_LEVELS) {
1908 result = vega10_populate_single_eclock_level(hwmgr,
1909 dpm_table->dpm_levels[j].value,
1910 &(pp_table->EclkDid[i]),
1911 &(pp_table->VceDpmVoltageIndex[i]));
1912 if (result)
1913 return result;
1914 i++;
1915 }
1916
1917 return result;
1918}
1919
1920static int vega10_populate_single_vclock_level(struct pp_hwmgr *hwmgr,
1921 uint32_t vclock, uint8_t *current_vclk_did)
1922{
1923 struct pp_atomfwctrl_clock_dividers_soc15 dividers;
1924
1925 PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(hwmgr,
1926 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
1927 vclock, &dividers),
1928 "Failed to get VCLK clock settings from VBIOS!",
1929 return -EINVAL);
1930
1931 *current_vclk_did = (uint8_t)dividers.ulDid;
1932
1933 return 0;
1934}
1935
1936static int vega10_populate_single_dclock_level(struct pp_hwmgr *hwmgr,
1937 uint32_t dclock, uint8_t *current_dclk_did)
1938{
1939 struct pp_atomfwctrl_clock_dividers_soc15 dividers;
1940
1941 PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(hwmgr,
1942 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
1943 dclock, &dividers),
1944 "Failed to get DCLK clock settings from VBIOS!",
1945 return -EINVAL);
1946
1947 *current_dclk_did = (uint8_t)dividers.ulDid;
1948
1949 return 0;
1950}
1951
1952static int vega10_populate_smc_uvd_levels(struct pp_hwmgr *hwmgr)
1953{
1954 struct vega10_hwmgr *data =
1955 (struct vega10_hwmgr *)(hwmgr->backend);
1956 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
1957 struct vega10_single_dpm_table *vclk_dpm_table =
1958 &(data->dpm_table.vclk_table);
1959 struct vega10_single_dpm_table *dclk_dpm_table =
1960 &(data->dpm_table.dclk_table);
1961 struct phm_ppt_v2_information *table_info =
1962 (struct phm_ppt_v2_information *)(hwmgr->pptable);
1963 struct phm_ppt_v1_mm_clock_voltage_dependency_table *dep_table =
1964 table_info->mm_dep_table;
1965 int result = -EINVAL;
1966 uint32_t i, j;
1967
1968 for (i = 0; i < vclk_dpm_table->count; i++) {
1969 result = vega10_populate_single_vclock_level(hwmgr,
1970 vclk_dpm_table->dpm_levels[i].value,
1971 &(pp_table->VclkDid[i]));
1972 if (result)
1973 return result;
1974 }
1975
1976 j = i - 1;
1977 while (i < NUM_UVD_DPM_LEVELS) {
1978 result = vega10_populate_single_vclock_level(hwmgr,
1979 vclk_dpm_table->dpm_levels[j].value,
1980 &(pp_table->VclkDid[i]));
1981 if (result)
1982 return result;
1983 i++;
1984 }
1985
1986 for (i = 0; i < dclk_dpm_table->count; i++) {
1987 result = vega10_populate_single_dclock_level(hwmgr,
1988 dclk_dpm_table->dpm_levels[i].value,
1989 &(pp_table->DclkDid[i]));
1990 if (result)
1991 return result;
1992 }
1993
1994 j = i - 1;
1995 while (i < NUM_UVD_DPM_LEVELS) {
1996 result = vega10_populate_single_dclock_level(hwmgr,
1997 dclk_dpm_table->dpm_levels[j].value,
1998 &(pp_table->DclkDid[i]));
1999 if (result)
2000 return result;
2001 i++;
2002 }
2003
2004 for (i = 0; i < dep_table->count; i++) {
2005 if (dep_table->entries[i].vclk ==
2006 vclk_dpm_table->dpm_levels[i].value &&
2007 dep_table->entries[i].dclk ==
2008 dclk_dpm_table->dpm_levels[i].value)
2009 pp_table->UvdDpmVoltageIndex[i] =
2010 dep_table->entries[i].vddcInd;
2011 else
2012 return -1;
2013 }
2014
2015 j = i - 1;
2016 while (i < NUM_UVD_DPM_LEVELS) {
2017 pp_table->UvdDpmVoltageIndex[i] = dep_table->entries[j].vddcInd;
2018 i++;
2019 }
2020
2021 return 0;
2022}
2023
2024static int vega10_populate_clock_stretcher_table(struct pp_hwmgr *hwmgr)
2025{
2026 struct vega10_hwmgr *data =
2027 (struct vega10_hwmgr *)(hwmgr->backend);
2028 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
2029 struct phm_ppt_v2_information *table_info =
2030 (struct phm_ppt_v2_information *)(hwmgr->pptable);
2031 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table =
2032 table_info->vdd_dep_on_sclk;
2033 uint32_t i;
2034
afc0255c 2035 for (i = 0; i < dep_table->count; i++) {
f83a9991 2036 pp_table->CksEnable[i] = dep_table->entries[i].cks_enable;
afc0255c
RZ
2037 pp_table->CksVidOffset[i] = (uint8_t)(dep_table->entries[i].cks_voffset
2038 * VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
f83a9991
EH
2039 }
2040
2041 return 0;
2042}
2043
2044static int vega10_populate_avfs_parameters(struct pp_hwmgr *hwmgr)
2045{
2046 struct vega10_hwmgr *data =
2047 (struct vega10_hwmgr *)(hwmgr->backend);
2048 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
2049 struct phm_ppt_v2_information *table_info =
2050 (struct phm_ppt_v2_information *)(hwmgr->pptable);
2051 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table =
2052 table_info->vdd_dep_on_sclk;
2053 struct pp_atomfwctrl_avfs_parameters avfs_params = {0};
2054 int result = 0;
2055 uint32_t i;
2056
2057 pp_table->MinVoltageVid = (uint8_t)0xff;
2058 pp_table->MaxVoltageVid = (uint8_t)0;
2059
2060 if (data->smu_features[GNLD_AVFS].supported) {
2061 result = pp_atomfwctrl_get_avfs_information(hwmgr, &avfs_params);
2062 if (!result) {
2063 pp_table->MinVoltageVid = (uint8_t)
f83a9991 2064 convert_to_vid((uint16_t)(avfs_params.ulMinVddc));
6524e494
RZ
2065 pp_table->MaxVoltageVid = (uint8_t)
2066 convert_to_vid((uint16_t)(avfs_params.ulMaxVddc));
2067
2068 pp_table->AConstant[0] = cpu_to_le32(avfs_params.ulMeanNsigmaAcontant0);
2069 pp_table->AConstant[1] = cpu_to_le32(avfs_params.ulMeanNsigmaAcontant1);
2070 pp_table->AConstant[2] = cpu_to_le32(avfs_params.ulMeanNsigmaAcontant2);
2071 pp_table->DC_tol_sigma = cpu_to_le16(avfs_params.usMeanNsigmaDcTolSigma);
2072 pp_table->Platform_mean = cpu_to_le16(avfs_params.usMeanNsigmaPlatformMean);
2073 pp_table->Platform_sigma = cpu_to_le16(avfs_params.usMeanNsigmaDcTolSigma);
2074 pp_table->PSM_Age_CompFactor = cpu_to_le16(avfs_params.usPsmAgeComfactor);
f83a9991
EH
2075
2076 pp_table->BtcGbVdroopTableCksOff.a0 =
2077 cpu_to_le32(avfs_params.ulGbVdroopTableCksoffA0);
6524e494 2078 pp_table->BtcGbVdroopTableCksOff.a0_shift = 20;
f83a9991
EH
2079 pp_table->BtcGbVdroopTableCksOff.a1 =
2080 cpu_to_le32(avfs_params.ulGbVdroopTableCksoffA1);
6524e494 2081 pp_table->BtcGbVdroopTableCksOff.a1_shift = 20;
f83a9991
EH
2082 pp_table->BtcGbVdroopTableCksOff.a2 =
2083 cpu_to_le32(avfs_params.ulGbVdroopTableCksoffA2);
6524e494
RZ
2084 pp_table->BtcGbVdroopTableCksOff.a2_shift = 20;
2085
2086 pp_table->OverrideBtcGbCksOn = avfs_params.ucEnableGbVdroopTableCkson;
2087 pp_table->BtcGbVdroopTableCksOn.a0 =
2088 cpu_to_le32(avfs_params.ulGbVdroopTableCksonA0);
2089 pp_table->BtcGbVdroopTableCksOn.a0_shift = 20;
2090 pp_table->BtcGbVdroopTableCksOn.a1 =
2091 cpu_to_le32(avfs_params.ulGbVdroopTableCksonA1);
2092 pp_table->BtcGbVdroopTableCksOn.a1_shift = 20;
2093 pp_table->BtcGbVdroopTableCksOn.a2 =
2094 cpu_to_le32(avfs_params.ulGbVdroopTableCksonA2);
2095 pp_table->BtcGbVdroopTableCksOn.a2_shift = 20;
f83a9991
EH
2096
2097 pp_table->AvfsGbCksOn.m1 =
2098 cpu_to_le32(avfs_params.ulGbFuseTableCksonM1);
2099 pp_table->AvfsGbCksOn.m2 =
6524e494 2100 cpu_to_le16(avfs_params.ulGbFuseTableCksonM2);
f83a9991
EH
2101 pp_table->AvfsGbCksOn.b =
2102 cpu_to_le32(avfs_params.ulGbFuseTableCksonB);
2103 pp_table->AvfsGbCksOn.m1_shift = 24;
2104 pp_table->AvfsGbCksOn.m2_shift = 12;
6524e494 2105 pp_table->AvfsGbCksOn.b_shift = 0;
f83a9991 2106
6524e494
RZ
2107 pp_table->OverrideAvfsGbCksOn =
2108 avfs_params.ucEnableGbFuseTableCkson;
f83a9991
EH
2109 pp_table->AvfsGbCksOff.m1 =
2110 cpu_to_le32(avfs_params.ulGbFuseTableCksoffM1);
2111 pp_table->AvfsGbCksOff.m2 =
6524e494 2112 cpu_to_le16(avfs_params.ulGbFuseTableCksoffM2);
f83a9991
EH
2113 pp_table->AvfsGbCksOff.b =
2114 cpu_to_le32(avfs_params.ulGbFuseTableCksoffB);
2115 pp_table->AvfsGbCksOff.m1_shift = 24;
2116 pp_table->AvfsGbCksOff.m2_shift = 12;
6524e494
RZ
2117 pp_table->AvfsGbCksOff.b_shift = 0;
2118
2119 for (i = 0; i < dep_table->count; i++) {
2120 if (dep_table->entries[i].sclk_offset == 0)
2121 pp_table->StaticVoltageOffsetVid[i] = 248;
2122 else
2123 pp_table->StaticVoltageOffsetVid[i] =
2124 (uint8_t)(dep_table->entries[i].sclk_offset *
f83a9991
EH
2125 VOLTAGE_VID_OFFSET_SCALE2 /
2126 VOLTAGE_VID_OFFSET_SCALE1);
6524e494 2127 }
f83a9991
EH
2128
2129 if ((PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2130 data->disp_clk_quad_eqn_a) &&
2131 (PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2132 data->disp_clk_quad_eqn_b)) {
2133 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m1 =
2134 (int32_t)data->disp_clk_quad_eqn_a;
2135 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m2 =
6524e494 2136 (int32_t)data->disp_clk_quad_eqn_b;
f83a9991
EH
2137 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].b =
2138 (int32_t)data->disp_clk_quad_eqn_c;
2139 } else {
2140 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m1 =
2141 (int32_t)avfs_params.ulDispclk2GfxclkM1;
2142 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m2 =
6524e494 2143 (int32_t)avfs_params.ulDispclk2GfxclkM2;
f83a9991
EH
2144 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].b =
2145 (int32_t)avfs_params.ulDispclk2GfxclkB;
2146 }
2147
2148 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m1_shift = 24;
2149 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m2_shift = 12;
4bae05e1 2150 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].b_shift = 12;
f83a9991
EH
2151
2152 if ((PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2153 data->dcef_clk_quad_eqn_a) &&
2154 (PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2155 data->dcef_clk_quad_eqn_b)) {
2156 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m1 =
2157 (int32_t)data->dcef_clk_quad_eqn_a;
2158 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m2 =
6524e494 2159 (int32_t)data->dcef_clk_quad_eqn_b;
f83a9991
EH
2160 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].b =
2161 (int32_t)data->dcef_clk_quad_eqn_c;
2162 } else {
2163 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m1 =
2164 (int32_t)avfs_params.ulDcefclk2GfxclkM1;
2165 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m2 =
6524e494 2166 (int32_t)avfs_params.ulDcefclk2GfxclkM2;
f83a9991
EH
2167 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].b =
2168 (int32_t)avfs_params.ulDcefclk2GfxclkB;
2169 }
2170
2171 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m1_shift = 24;
2172 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m2_shift = 12;
4bae05e1 2173 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].b_shift = 12;
f83a9991
EH
2174
2175 if ((PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2176 data->pixel_clk_quad_eqn_a) &&
2177 (PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2178 data->pixel_clk_quad_eqn_b)) {
2179 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m1 =
2180 (int32_t)data->pixel_clk_quad_eqn_a;
2181 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m2 =
6524e494 2182 (int32_t)data->pixel_clk_quad_eqn_b;
f83a9991
EH
2183 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].b =
2184 (int32_t)data->pixel_clk_quad_eqn_c;
2185 } else {
2186 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m1 =
2187 (int32_t)avfs_params.ulPixelclk2GfxclkM1;
2188 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m2 =
6524e494 2189 (int32_t)avfs_params.ulPixelclk2GfxclkM2;
f83a9991
EH
2190 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].b =
2191 (int32_t)avfs_params.ulPixelclk2GfxclkB;
2192 }
2193
2194 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m1_shift = 24;
2195 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m2_shift = 12;
4bae05e1 2196 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].b_shift = 12;
f83a9991
EH
2197 if ((PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2198 data->phy_clk_quad_eqn_a) &&
2199 (PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2200 data->phy_clk_quad_eqn_b)) {
2201 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m1 =
2202 (int32_t)data->phy_clk_quad_eqn_a;
2203 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m2 =
6524e494 2204 (int32_t)data->phy_clk_quad_eqn_b;
f83a9991
EH
2205 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].b =
2206 (int32_t)data->phy_clk_quad_eqn_c;
2207 } else {
2208 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m1 =
2209 (int32_t)avfs_params.ulPhyclk2GfxclkM1;
2210 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m2 =
6524e494 2211 (int32_t)avfs_params.ulPhyclk2GfxclkM2;
f83a9991
EH
2212 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].b =
2213 (int32_t)avfs_params.ulPhyclk2GfxclkB;
2214 }
2215
2216 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m1_shift = 24;
2217 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m2_shift = 12;
4bae05e1 2218 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].b_shift = 12;
f83a9991
EH
2219 } else {
2220 data->smu_features[GNLD_AVFS].supported = false;
2221 }
2222 }
2223
2224 return 0;
2225}
2226
2227static int vega10_populate_gpio_parameters(struct pp_hwmgr *hwmgr)
2228{
2229 struct vega10_hwmgr *data =
2230 (struct vega10_hwmgr *)(hwmgr->backend);
2231 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
2232 struct pp_atomfwctrl_gpio_parameters gpio_params = {0};
2233 int result;
2234
2235 result = pp_atomfwctrl_get_gpio_information(hwmgr, &gpio_params);
2236 if (!result) {
2237 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2238 PHM_PlatformCaps_RegulatorHot) &&
2239 (data->registry_data.regulator_hot_gpio_support)) {
2240 pp_table->VR0HotGpio = gpio_params.ucVR0HotGpio;
2241 pp_table->VR0HotPolarity = gpio_params.ucVR0HotPolarity;
2242 pp_table->VR1HotGpio = gpio_params.ucVR1HotGpio;
2243 pp_table->VR1HotPolarity = gpio_params.ucVR1HotPolarity;
2244 } else {
2245 pp_table->VR0HotGpio = 0;
2246 pp_table->VR0HotPolarity = 0;
2247 pp_table->VR1HotGpio = 0;
2248 pp_table->VR1HotPolarity = 0;
2249 }
2250
2251 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2252 PHM_PlatformCaps_AutomaticDCTransition) &&
2253 (data->registry_data.ac_dc_switch_gpio_support)) {
2254 pp_table->AcDcGpio = gpio_params.ucAcDcGpio;
2255 pp_table->AcDcPolarity = gpio_params.ucAcDcPolarity;
2256 } else {
2257 pp_table->AcDcGpio = 0;
2258 pp_table->AcDcPolarity = 0;
2259 }
2260 }
2261
2262 return result;
2263}
2264
2265static int vega10_avfs_enable(struct pp_hwmgr *hwmgr, bool enable)
2266{
2267 struct vega10_hwmgr *data =
2268 (struct vega10_hwmgr *)(hwmgr->backend);
2269
2270 if (data->smu_features[GNLD_AVFS].supported) {
2271 if (enable) {
2272 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
2273 true,
2274 data->smu_features[GNLD_AVFS].smu_feature_bitmap),
2275 "[avfs_control] Attempt to Enable AVFS feature Failed!",
2276 return -1);
2277 data->smu_features[GNLD_AVFS].enabled = true;
2278 } else {
2279 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
2280 false,
2281 data->smu_features[GNLD_AVFS].smu_feature_id),
2282 "[avfs_control] Attempt to Disable AVFS feature Failed!",
2283 return -1);
2284 data->smu_features[GNLD_AVFS].enabled = false;
2285 }
2286 }
2287
2288 return 0;
2289}
2290
ab5cf3a5
RZ
2291static int vega10_populate_and_upload_avfs_fuse_override(struct pp_hwmgr *hwmgr)
2292{
2293 int result = 0;
2294
2295 uint64_t serial_number = 0;
2296 uint32_t top32, bottom32;
2297 struct phm_fuses_default fuse;
2298
2299 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
2300 AvfsFuseOverride_t *avfs_fuse_table = &(data->smc_state_table.avfs_fuse_override_table);
2301
2302 smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_ReadSerialNumTop32);
2303 vega10_read_arg_from_smc(hwmgr->smumgr, &top32);
2304
2305 smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_ReadSerialNumBottom32);
2306 vega10_read_arg_from_smc(hwmgr->smumgr, &bottom32);
2307
2308 serial_number = ((uint64_t)bottom32 << 32) | top32;
2309
2310 if (pp_override_get_default_fuse_value(serial_number, vega10_fuses_default, &fuse) == 0) {
2311 avfs_fuse_table->VFT0_b = fuse.VFT0_b;
2312 avfs_fuse_table->VFT0_m1 = fuse.VFT0_m1;
2313 avfs_fuse_table->VFT0_m2 = fuse.VFT0_m2;
2314 avfs_fuse_table->VFT1_b = fuse.VFT1_b;
2315 avfs_fuse_table->VFT1_m1 = fuse.VFT1_m1;
2316 avfs_fuse_table->VFT1_m2 = fuse.VFT1_m2;
2317 avfs_fuse_table->VFT2_b = fuse.VFT2_b;
2318 avfs_fuse_table->VFT2_m1 = fuse.VFT2_m1;
2319 avfs_fuse_table->VFT2_m2 = fuse.VFT2_m2;
2320 result = vega10_copy_table_to_smc(hwmgr->smumgr,
2321 (uint8_t *)avfs_fuse_table, AVFSFUSETABLE);
2322 PP_ASSERT_WITH_CODE(!result,
2323 "Failed to upload FuseOVerride!",
2324 );
2325 }
2326
2327 return result;
2328}
2329
d6c025d2
EH
2330static int vega10_save_default_power_profile(struct pp_hwmgr *hwmgr)
2331{
2332 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
2333 struct vega10_single_dpm_table *dpm_table = &(data->dpm_table.gfx_table);
2334 uint32_t min_level;
2335
2336 hwmgr->default_gfx_power_profile.type = AMD_PP_GFX_PROFILE;
2337 hwmgr->default_compute_power_profile.type = AMD_PP_COMPUTE_PROFILE;
2338
2339 /* Optimize compute power profile: Use only highest
2340 * 2 power levels (if more than 2 are available)
2341 */
2342 if (dpm_table->count > 2)
2343 min_level = dpm_table->count - 2;
2344 else if (dpm_table->count == 2)
2345 min_level = 1;
2346 else
2347 min_level = 0;
2348
2349 hwmgr->default_compute_power_profile.min_sclk =
2350 dpm_table->dpm_levels[min_level].value;
2351
2352 hwmgr->gfx_power_profile = hwmgr->default_gfx_power_profile;
2353 hwmgr->compute_power_profile = hwmgr->default_compute_power_profile;
2354
2355 return 0;
2356}
2357
f83a9991
EH
2358/**
2359* Initializes the SMC table and uploads it
2360*
2361* @param hwmgr the address of the powerplay hardware manager.
2362* @param pInput the pointer to input data (PowerState)
2363* @return always 0
2364*/
2365static int vega10_init_smc_table(struct pp_hwmgr *hwmgr)
2366{
2367 int result;
2368 struct vega10_hwmgr *data =
2369 (struct vega10_hwmgr *)(hwmgr->backend);
2370 struct phm_ppt_v2_information *table_info =
2371 (struct phm_ppt_v2_information *)(hwmgr->pptable);
2372 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
2373 struct pp_atomfwctrl_voltage_table voltage_table;
05ee3215 2374 struct pp_atomfwctrl_bios_boot_up_values boot_up_values;
f83a9991
EH
2375
2376 result = vega10_setup_default_dpm_tables(hwmgr);
2377 PP_ASSERT_WITH_CODE(!result,
2378 "Failed to setup default DPM tables!",
2379 return result);
2380
2381 pp_atomfwctrl_get_voltage_table_v4(hwmgr, VOLTAGE_TYPE_VDDC,
2382 VOLTAGE_OBJ_SVID2, &voltage_table);
2383 pp_table->MaxVidStep = voltage_table.max_vid_step;
2384
2385 pp_table->GfxDpmVoltageMode =
2386 (uint8_t)(table_info->uc_gfx_dpm_voltage_mode);
2387 pp_table->SocDpmVoltageMode =
2388 (uint8_t)(table_info->uc_soc_dpm_voltage_mode);
2389 pp_table->UclkDpmVoltageMode =
2390 (uint8_t)(table_info->uc_uclk_dpm_voltage_mode);
2391 pp_table->UvdDpmVoltageMode =
2392 (uint8_t)(table_info->uc_uvd_dpm_voltage_mode);
2393 pp_table->VceDpmVoltageMode =
2394 (uint8_t)(table_info->uc_vce_dpm_voltage_mode);
2395 pp_table->Mp0DpmVoltageMode =
2396 (uint8_t)(table_info->uc_mp0_dpm_voltage_mode);
effa290c 2397
f83a9991
EH
2398 pp_table->DisplayDpmVoltageMode =
2399 (uint8_t)(table_info->uc_dcef_dpm_voltage_mode);
2400
2401 if (data->registry_data.ulv_support &&
2402 table_info->us_ulv_voltage_offset) {
2403 result = vega10_populate_ulv_state(hwmgr);
2404 PP_ASSERT_WITH_CODE(!result,
2405 "Failed to initialize ULV state!",
2406 return result);
2407 }
2408
2409 result = vega10_populate_smc_link_levels(hwmgr);
2410 PP_ASSERT_WITH_CODE(!result,
2411 "Failed to initialize Link Level!",
2412 return result);
2413
2414 result = vega10_populate_all_graphic_levels(hwmgr);
2415 PP_ASSERT_WITH_CODE(!result,
2416 "Failed to initialize Graphics Level!",
2417 return result);
2418
2419 result = vega10_populate_all_memory_levels(hwmgr);
2420 PP_ASSERT_WITH_CODE(!result,
2421 "Failed to initialize Memory Level!",
2422 return result);
2423
2424 result = vega10_populate_all_display_clock_levels(hwmgr);
2425 PP_ASSERT_WITH_CODE(!result,
2426 "Failed to initialize Display Level!",
2427 return result);
2428
2429 result = vega10_populate_smc_vce_levels(hwmgr);
2430 PP_ASSERT_WITH_CODE(!result,
2431 "Failed to initialize VCE Level!",
2432 return result);
2433
2434 result = vega10_populate_smc_uvd_levels(hwmgr);
2435 PP_ASSERT_WITH_CODE(!result,
2436 "Failed to initialize UVD Level!",
2437 return result);
2438
afc0255c 2439 if (data->registry_data.clock_stretcher_support) {
f83a9991
EH
2440 result = vega10_populate_clock_stretcher_table(hwmgr);
2441 PP_ASSERT_WITH_CODE(!result,
2442 "Failed to populate Clock Stretcher Table!",
2443 return result);
2444 }
2445
05ee3215
RZ
2446 result = pp_atomfwctrl_get_vbios_bootup_values(hwmgr, &boot_up_values);
2447 if (!result) {
2448 data->vbios_boot_state.vddc = boot_up_values.usVddc;
2449 data->vbios_boot_state.vddci = boot_up_values.usVddci;
2450 data->vbios_boot_state.mvddc = boot_up_values.usMvddc;
2451 data->vbios_boot_state.gfx_clock = boot_up_values.ulGfxClk;
2452 data->vbios_boot_state.mem_clock = boot_up_values.ulUClk;
2453 data->vbios_boot_state.soc_clock = boot_up_values.ulSocClk;
2454 if (0 != boot_up_values.usVddc) {
2455 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
2456 PPSMC_MSG_SetFloorSocVoltage,
2457 (boot_up_values.usVddc * 4));
2458 data->vbios_boot_state.bsoc_vddc_lock = true;
2459 } else {
2460 data->vbios_boot_state.bsoc_vddc_lock = false;
2461 }
2462 }
2463
f83a9991
EH
2464 result = vega10_populate_avfs_parameters(hwmgr);
2465 PP_ASSERT_WITH_CODE(!result,
2466 "Failed to initialize AVFS Parameters!",
2467 return result);
2468
2469 result = vega10_populate_gpio_parameters(hwmgr);
2470 PP_ASSERT_WITH_CODE(!result,
2471 "Failed to initialize GPIO Parameters!",
2472 return result);
2473
2474 pp_table->GfxclkAverageAlpha = (uint8_t)
2475 (data->gfxclk_average_alpha);
2476 pp_table->SocclkAverageAlpha = (uint8_t)
2477 (data->socclk_average_alpha);
2478 pp_table->UclkAverageAlpha = (uint8_t)
2479 (data->uclk_average_alpha);
2480 pp_table->GfxActivityAverageAlpha = (uint8_t)
2481 (data->gfx_activity_average_alpha);
2482
ab5cf3a5
RZ
2483 vega10_populate_and_upload_avfs_fuse_override(hwmgr);
2484
f83a9991
EH
2485 result = vega10_copy_table_to_smc(hwmgr->smumgr,
2486 (uint8_t *)pp_table, PPTABLE);
2487 PP_ASSERT_WITH_CODE(!result,
2488 "Failed to upload PPtable!", return result);
2489
2211a787
RZ
2490 result = vega10_avfs_enable(hwmgr, true);
2491 PP_ASSERT_WITH_CODE(!result, "Attempt to enable AVFS feature Failed!",
f83a9991 2492 return result);
f83a9991 2493
d6c025d2
EH
2494 vega10_save_default_power_profile(hwmgr);
2495
f83a9991
EH
2496 return 0;
2497}
2498
2499static int vega10_enable_thermal_protection(struct pp_hwmgr *hwmgr)
2500{
2501 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
2502
2503 if (data->smu_features[GNLD_THERMAL].supported) {
2504 if (data->smu_features[GNLD_THERMAL].enabled)
2505 pr_info("THERMAL Feature Already enabled!");
2506
2507 PP_ASSERT_WITH_CODE(
2508 !vega10_enable_smc_features(hwmgr->smumgr,
2509 true,
2510 data->smu_features[GNLD_THERMAL].smu_feature_bitmap),
2511 "Enable THERMAL Feature Failed!",
2512 return -1);
2513 data->smu_features[GNLD_THERMAL].enabled = true;
2514 }
2515
2516 return 0;
2517}
2518
8b9242ed
RZ
2519static int vega10_disable_thermal_protection(struct pp_hwmgr *hwmgr)
2520{
2521 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
2522
2523 if (data->smu_features[GNLD_THERMAL].supported) {
2524 if (!data->smu_features[GNLD_THERMAL].enabled)
2525 pr_info("THERMAL Feature Already disabled!");
2526
2527 PP_ASSERT_WITH_CODE(
2528 !vega10_enable_smc_features(hwmgr->smumgr,
2529 false,
2530 data->smu_features[GNLD_THERMAL].smu_feature_bitmap),
2531 "disable THERMAL Feature Failed!",
2532 return -1);
2533 data->smu_features[GNLD_THERMAL].enabled = false;
2534 }
2535
2536 return 0;
2537}
2538
f83a9991
EH
2539static int vega10_enable_vrhot_feature(struct pp_hwmgr *hwmgr)
2540{
2541 struct vega10_hwmgr *data =
2542 (struct vega10_hwmgr *)(hwmgr->backend);
2543
2544 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2545 PHM_PlatformCaps_RegulatorHot)) {
2546 if (data->smu_features[GNLD_VR0HOT].supported) {
2547 PP_ASSERT_WITH_CODE(
2548 !vega10_enable_smc_features(hwmgr->smumgr,
2549 true,
2550 data->smu_features[GNLD_VR0HOT].smu_feature_bitmap),
2551 "Attempt to Enable VR0 Hot feature Failed!",
2552 return -1);
2553 data->smu_features[GNLD_VR0HOT].enabled = true;
2554 } else {
2555 if (data->smu_features[GNLD_VR1HOT].supported) {
2556 PP_ASSERT_WITH_CODE(
2557 !vega10_enable_smc_features(hwmgr->smumgr,
2558 true,
2559 data->smu_features[GNLD_VR1HOT].smu_feature_bitmap),
2560 "Attempt to Enable VR0 Hot feature Failed!",
2561 return -1);
2562 data->smu_features[GNLD_VR1HOT].enabled = true;
2563 }
2564 }
2565 }
2566 return 0;
2567}
2568
2569static int vega10_enable_ulv(struct pp_hwmgr *hwmgr)
2570{
2571 struct vega10_hwmgr *data =
2572 (struct vega10_hwmgr *)(hwmgr->backend);
2573
2574 if (data->registry_data.ulv_support) {
2575 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
2576 true, data->smu_features[GNLD_ULV].smu_feature_bitmap),
2577 "Enable ULV Feature Failed!",
2578 return -1);
2579 data->smu_features[GNLD_ULV].enabled = true;
2580 }
2581
2582 return 0;
2583}
2584
4022e4f2
RZ
2585static int vega10_disable_ulv(struct pp_hwmgr *hwmgr)
2586{
2587 struct vega10_hwmgr *data =
2588 (struct vega10_hwmgr *)(hwmgr->backend);
2589
2590 if (data->registry_data.ulv_support) {
2591 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
2592 false, data->smu_features[GNLD_ULV].smu_feature_bitmap),
2593 "disable ULV Feature Failed!",
2594 return -EINVAL);
2595 data->smu_features[GNLD_ULV].enabled = false;
2596 }
2597
2598 return 0;
2599}
2600
f83a9991
EH
2601static int vega10_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
2602{
2603 struct vega10_hwmgr *data =
2604 (struct vega10_hwmgr *)(hwmgr->backend);
2605
2606 if (data->smu_features[GNLD_DS_GFXCLK].supported) {
2607 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
2608 true, data->smu_features[GNLD_DS_GFXCLK].smu_feature_bitmap),
2609 "Attempt to Enable DS_GFXCLK Feature Failed!",
df057e02 2610 return -EINVAL);
f83a9991
EH
2611 data->smu_features[GNLD_DS_GFXCLK].enabled = true;
2612 }
2613
2614 if (data->smu_features[GNLD_DS_SOCCLK].supported) {
2615 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
2616 true, data->smu_features[GNLD_DS_SOCCLK].smu_feature_bitmap),
df057e02
RZ
2617 "Attempt to Enable DS_SOCCLK Feature Failed!",
2618 return -EINVAL);
f83a9991
EH
2619 data->smu_features[GNLD_DS_SOCCLK].enabled = true;
2620 }
2621
2622 if (data->smu_features[GNLD_DS_LCLK].supported) {
2623 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
2624 true, data->smu_features[GNLD_DS_LCLK].smu_feature_bitmap),
df057e02
RZ
2625 "Attempt to Enable DS_LCLK Feature Failed!",
2626 return -EINVAL);
f83a9991
EH
2627 data->smu_features[GNLD_DS_LCLK].enabled = true;
2628 }
2629
df057e02
RZ
2630 if (data->smu_features[GNLD_DS_DCEFCLK].supported) {
2631 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
2632 true, data->smu_features[GNLD_DS_DCEFCLK].smu_feature_bitmap),
2633 "Attempt to Enable DS_DCEFCLK Feature Failed!",
2634 return -EINVAL);
2635 data->smu_features[GNLD_DS_DCEFCLK].enabled = true;
2636 }
2637
2638 return 0;
2639}
2640
2641static int vega10_disable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
2642{
2643 struct vega10_hwmgr *data =
2644 (struct vega10_hwmgr *)(hwmgr->backend);
2645
2646 if (data->smu_features[GNLD_DS_GFXCLK].supported) {
2647 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
2648 false, data->smu_features[GNLD_DS_GFXCLK].smu_feature_bitmap),
2649 "Attempt to disable DS_GFXCLK Feature Failed!",
2650 return -EINVAL);
2651 data->smu_features[GNLD_DS_GFXCLK].enabled = false;
2652 }
2653
2654 if (data->smu_features[GNLD_DS_SOCCLK].supported) {
2655 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
2656 false, data->smu_features[GNLD_DS_SOCCLK].smu_feature_bitmap),
2657 "Attempt to disable DS_ Feature Failed!",
2658 return -EINVAL);
2659 data->smu_features[GNLD_DS_SOCCLK].enabled = false;
2660 }
2661
2662 if (data->smu_features[GNLD_DS_LCLK].supported) {
2663 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
2664 false, data->smu_features[GNLD_DS_LCLK].smu_feature_bitmap),
2665 "Attempt to disable DS_LCLK Feature Failed!",
2666 return -EINVAL);
2667 data->smu_features[GNLD_DS_LCLK].enabled = false;
2668 }
2669
2670 if (data->smu_features[GNLD_DS_DCEFCLK].supported) {
2671 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
2672 false, data->smu_features[GNLD_DS_DCEFCLK].smu_feature_bitmap),
2673 "Attempt to disable DS_DCEFCLK Feature Failed!",
2674 return -EINVAL);
2675 data->smu_features[GNLD_DS_DCEFCLK].enabled = false;
2676 }
2677
f83a9991
EH
2678 return 0;
2679}
2680
8b9242ed
RZ
2681static int vega10_stop_dpm(struct pp_hwmgr *hwmgr, uint32_t bitmap)
2682{
2683 struct vega10_hwmgr *data =
2684 (struct vega10_hwmgr *)(hwmgr->backend);
2685 uint32_t i, feature_mask = 0;
2686
2687
2688 if(data->smu_features[GNLD_LED_DISPLAY].supported == true){
2689 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
2690 true, data->smu_features[GNLD_LED_DISPLAY].smu_feature_bitmap),
2691 "Attempt to Enable LED DPM feature Failed!", return -EINVAL);
2692 data->smu_features[GNLD_LED_DISPLAY].enabled = true;
2693 }
2694
2695 for (i = 0; i < GNLD_DPM_MAX; i++) {
2696 if (data->smu_features[i].smu_feature_bitmap & bitmap) {
2697 if (data->smu_features[i].supported) {
2698 if (data->smu_features[i].enabled) {
2699 feature_mask |= data->smu_features[i].
2700 smu_feature_bitmap;
2701 data->smu_features[i].enabled = false;
2702 }
2703 }
2704 }
2705 }
2706
2707 vega10_enable_smc_features(hwmgr->smumgr, false, feature_mask);
2708
2709 return 0;
2710}
2711
f83a9991
EH
2712/**
2713 * @brief Tell SMC to enabled the supported DPMs.
2714 *
2715 * @param hwmgr - the address of the powerplay hardware manager.
2716 * @Param bitmap - bitmap for the features to enabled.
2717 * @return 0 on at least one DPM is successfully enabled.
2718 */
2719static int vega10_start_dpm(struct pp_hwmgr *hwmgr, uint32_t bitmap)
2720{
2721 struct vega10_hwmgr *data =
2722 (struct vega10_hwmgr *)(hwmgr->backend);
2723 uint32_t i, feature_mask = 0;
2724
2725 for (i = 0; i < GNLD_DPM_MAX; i++) {
2726 if (data->smu_features[i].smu_feature_bitmap & bitmap) {
2727 if (data->smu_features[i].supported) {
2728 if (!data->smu_features[i].enabled) {
2729 feature_mask |= data->smu_features[i].
2730 smu_feature_bitmap;
2731 data->smu_features[i].enabled = true;
2732 }
2733 }
2734 }
2735 }
2736
2737 if (vega10_enable_smc_features(hwmgr->smumgr,
2738 true, feature_mask)) {
2739 for (i = 0; i < GNLD_DPM_MAX; i++) {
2740 if (data->smu_features[i].smu_feature_bitmap &
2741 feature_mask)
2742 data->smu_features[i].enabled = false;
2743 }
2744 }
2745
2746 if(data->smu_features[GNLD_LED_DISPLAY].supported == true){
2747 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
2748 true, data->smu_features[GNLD_LED_DISPLAY].smu_feature_bitmap),
2749 "Attempt to Enable LED DPM feature Failed!", return -EINVAL);
2750 data->smu_features[GNLD_LED_DISPLAY].enabled = true;
2751 }
2752
05ee3215
RZ
2753 if (data->vbios_boot_state.bsoc_vddc_lock) {
2754 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
2755 PPSMC_MSG_SetFloorSocVoltage, 0);
2756 data->vbios_boot_state.bsoc_vddc_lock = false;
2757 }
2758
f83a9991
EH
2759 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2760 PHM_PlatformCaps_Falcon_QuickTransition)) {
2761 if (data->smu_features[GNLD_ACDC].supported) {
2762 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
2763 true, data->smu_features[GNLD_ACDC].smu_feature_bitmap),
2764 "Attempt to Enable DS_GFXCLK Feature Failed!",
2765 return -1);
2766 data->smu_features[GNLD_ACDC].enabled = true;
2767 }
2768 }
2769
2770 return 0;
2771}
2772
2773static int vega10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
2774{
2775 struct vega10_hwmgr *data =
2776 (struct vega10_hwmgr *)(hwmgr->backend);
2777 int tmp_result, result = 0;
2778
2779 tmp_result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
2780 PPSMC_MSG_ConfigureTelemetry, data->config_telemetry);
2781 PP_ASSERT_WITH_CODE(!tmp_result,
2782 "Failed to configure telemetry!",
2783 return tmp_result);
2784
f83a9991
EH
2785 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
2786 PPSMC_MSG_NumOfDisplays, 0);
2787
2788 tmp_result = (!vega10_is_dpm_running(hwmgr)) ? 0 : -1;
2789 PP_ASSERT_WITH_CODE(!tmp_result,
2790 "DPM is already running right , skipping re-enablement!",
2791 return 0);
2792
2793 tmp_result = vega10_construct_voltage_tables(hwmgr);
2794 PP_ASSERT_WITH_CODE(!tmp_result,
2795 "Failed to contruct voltage tables!",
2796 result = tmp_result);
2797
2798 tmp_result = vega10_init_smc_table(hwmgr);
2799 PP_ASSERT_WITH_CODE(!tmp_result,
2800 "Failed to initialize SMC table!",
2801 result = tmp_result);
2802
2803 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2804 PHM_PlatformCaps_ThermalController)) {
2805 tmp_result = vega10_enable_thermal_protection(hwmgr);
2806 PP_ASSERT_WITH_CODE(!tmp_result,
2807 "Failed to enable thermal protection!",
2808 result = tmp_result);
2809 }
2810
2811 tmp_result = vega10_enable_vrhot_feature(hwmgr);
2812 PP_ASSERT_WITH_CODE(!tmp_result,
2813 "Failed to enable VR hot feature!",
2814 result = tmp_result);
2815
f83a9991
EH
2816 tmp_result = vega10_enable_deep_sleep_master_switch(hwmgr);
2817 PP_ASSERT_WITH_CODE(!tmp_result,
2818 "Failed to enable deep sleep master switch!",
2819 result = tmp_result);
2820
2821 tmp_result = vega10_start_dpm(hwmgr, SMC_DPM_FEATURES);
2822 PP_ASSERT_WITH_CODE(!tmp_result,
2823 "Failed to start DPM!", result = tmp_result);
2824
2825 tmp_result = vega10_enable_power_containment(hwmgr);
2826 PP_ASSERT_WITH_CODE(!tmp_result,
2827 "Failed to enable power containment!",
2828 result = tmp_result);
2829
2830 tmp_result = vega10_power_control_set_level(hwmgr);
2831 PP_ASSERT_WITH_CODE(!tmp_result,
2832 "Failed to power control set level!",
2833 result = tmp_result);
2834
4022e4f2
RZ
2835 tmp_result = vega10_enable_ulv(hwmgr);
2836 PP_ASSERT_WITH_CODE(!tmp_result,
2837 "Failed to enable ULV!",
2838 result = tmp_result);
2839
f83a9991
EH
2840 return result;
2841}
2842
2843static int vega10_get_power_state_size(struct pp_hwmgr *hwmgr)
2844{
2845 return sizeof(struct vega10_power_state);
2846}
2847
2848static int vega10_get_pp_table_entry_callback_func(struct pp_hwmgr *hwmgr,
2849 void *state, struct pp_power_state *power_state,
2850 void *pp_table, uint32_t classification_flag)
2851{
2852 struct vega10_power_state *vega10_power_state =
2853 cast_phw_vega10_power_state(&(power_state->hardware));
2854 struct vega10_performance_level *performance_level;
2855 ATOM_Vega10_State *state_entry = (ATOM_Vega10_State *)state;
2856 ATOM_Vega10_POWERPLAYTABLE *powerplay_table =
2857 (ATOM_Vega10_POWERPLAYTABLE *)pp_table;
2858 ATOM_Vega10_SOCCLK_Dependency_Table *socclk_dep_table =
2859 (ATOM_Vega10_SOCCLK_Dependency_Table *)
2860 (((unsigned long)powerplay_table) +
2861 le16_to_cpu(powerplay_table->usSocclkDependencyTableOffset));
2862 ATOM_Vega10_GFXCLK_Dependency_Table *gfxclk_dep_table =
2863 (ATOM_Vega10_GFXCLK_Dependency_Table *)
2864 (((unsigned long)powerplay_table) +
2865 le16_to_cpu(powerplay_table->usGfxclkDependencyTableOffset));
2866 ATOM_Vega10_MCLK_Dependency_Table *mclk_dep_table =
2867 (ATOM_Vega10_MCLK_Dependency_Table *)
2868 (((unsigned long)powerplay_table) +
2869 le16_to_cpu(powerplay_table->usMclkDependencyTableOffset));
2870
2871
2872 /* The following fields are not initialized here:
2873 * id orderedList allStatesList
2874 */
2875 power_state->classification.ui_label =
2876 (le16_to_cpu(state_entry->usClassification) &
2877 ATOM_PPLIB_CLASSIFICATION_UI_MASK) >>
2878 ATOM_PPLIB_CLASSIFICATION_UI_SHIFT;
2879 power_state->classification.flags = classification_flag;
2880 /* NOTE: There is a classification2 flag in BIOS
2881 * that is not being used right now
2882 */
2883 power_state->classification.temporary_state = false;
2884 power_state->classification.to_be_deleted = false;
2885
2886 power_state->validation.disallowOnDC =
2887 ((le32_to_cpu(state_entry->ulCapsAndSettings) &
2888 ATOM_Vega10_DISALLOW_ON_DC) != 0);
2889
2890 power_state->display.disableFrameModulation = false;
2891 power_state->display.limitRefreshrate = false;
2892 power_state->display.enableVariBright =
2893 ((le32_to_cpu(state_entry->ulCapsAndSettings) &
2894 ATOM_Vega10_ENABLE_VARIBRIGHT) != 0);
2895
2896 power_state->validation.supportedPowerLevels = 0;
2897 power_state->uvd_clocks.VCLK = 0;
2898 power_state->uvd_clocks.DCLK = 0;
2899 power_state->temperatures.min = 0;
2900 power_state->temperatures.max = 0;
2901
2902 performance_level = &(vega10_power_state->performance_levels
2903 [vega10_power_state->performance_level_count++]);
2904
2905 PP_ASSERT_WITH_CODE(
2906 (vega10_power_state->performance_level_count <
2907 NUM_GFXCLK_DPM_LEVELS),
2908 "Performance levels exceeds SMC limit!",
2909 return -1);
2910
2911 PP_ASSERT_WITH_CODE(
2912 (vega10_power_state->performance_level_count <=
2913 hwmgr->platform_descriptor.
2914 hardwareActivityPerformanceLevels),
2915 "Performance levels exceeds Driver limit!",
2916 return -1);
2917
2918 /* Performance levels are arranged from low to high. */
2919 performance_level->soc_clock = socclk_dep_table->entries
2920 [state_entry->ucSocClockIndexLow].ulClk;
2921 performance_level->gfx_clock = gfxclk_dep_table->entries
2922 [state_entry->ucGfxClockIndexLow].ulClk;
2923 performance_level->mem_clock = mclk_dep_table->entries
2924 [state_entry->ucMemClockIndexLow].ulMemClk;
2925
2926 performance_level = &(vega10_power_state->performance_levels
2927 [vega10_power_state->performance_level_count++]);
2928
2929 performance_level->soc_clock = socclk_dep_table->entries
2930 [state_entry->ucSocClockIndexHigh].ulClk;
2931 performance_level->gfx_clock = gfxclk_dep_table->entries
2932 [state_entry->ucGfxClockIndexHigh].ulClk;
2933 performance_level->mem_clock = mclk_dep_table->entries
2934 [state_entry->ucMemClockIndexHigh].ulMemClk;
2935 return 0;
2936}
2937
2938static int vega10_get_pp_table_entry(struct pp_hwmgr *hwmgr,
2939 unsigned long entry_index, struct pp_power_state *state)
2940{
2941 int result;
2942 struct vega10_power_state *ps;
2943
2944 state->hardware.magic = PhwVega10_Magic;
2945
2946 ps = cast_phw_vega10_power_state(&state->hardware);
2947
2948 result = vega10_get_powerplay_table_entry(hwmgr, entry_index, state,
2949 vega10_get_pp_table_entry_callback_func);
2950
2951 /*
2952 * This is the earliest time we have all the dependency table
2953 * and the VBIOS boot state
2954 */
2955 /* set DC compatible flag if this state supports DC */
2956 if (!state->validation.disallowOnDC)
2957 ps->dc_compatible = true;
2958
2959 ps->uvd_clks.vclk = state->uvd_clocks.VCLK;
2960 ps->uvd_clks.dclk = state->uvd_clocks.DCLK;
2961
2962 return 0;
2963}
2964
2965static int vega10_patch_boot_state(struct pp_hwmgr *hwmgr,
2966 struct pp_hw_power_state *hw_ps)
2967{
2968 return 0;
2969}
2970
2971static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
2972 struct pp_power_state *request_ps,
2973 const struct pp_power_state *current_ps)
2974{
2975 struct vega10_power_state *vega10_ps =
2976 cast_phw_vega10_power_state(&request_ps->hardware);
2977 uint32_t sclk;
2978 uint32_t mclk;
2979 struct PP_Clocks minimum_clocks = {0};
2980 bool disable_mclk_switching;
2981 bool disable_mclk_switching_for_frame_lock;
2982 bool disable_mclk_switching_for_vr;
2983 bool force_mclk_high;
2984 struct cgs_display_info info = {0};
2985 const struct phm_clock_and_voltage_limits *max_limits;
2986 uint32_t i;
2987 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
2988 struct phm_ppt_v2_information *table_info =
2989 (struct phm_ppt_v2_information *)(hwmgr->pptable);
2990 int32_t count;
2991 uint32_t stable_pstate_sclk_dpm_percentage;
2992 uint32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0;
2993 uint32_t latency;
2994
2995 data->battery_state = (PP_StateUILabel_Battery ==
2996 request_ps->classification.ui_label);
2997
2998 if (vega10_ps->performance_level_count != 2)
2999 pr_info("VI should always have 2 performance levels");
3000
3001 max_limits = (PP_PowerSource_AC == hwmgr->power_source) ?
3002 &(hwmgr->dyn_state.max_clock_voltage_on_ac) :
3003 &(hwmgr->dyn_state.max_clock_voltage_on_dc);
3004
3005 /* Cap clock DPM tables at DC MAX if it is in DC. */
3006 if (PP_PowerSource_DC == hwmgr->power_source) {
3007 for (i = 0; i < vega10_ps->performance_level_count; i++) {
3008 if (vega10_ps->performance_levels[i].mem_clock >
3009 max_limits->mclk)
3010 vega10_ps->performance_levels[i].mem_clock =
3011 max_limits->mclk;
3012 if (vega10_ps->performance_levels[i].gfx_clock >
3013 max_limits->sclk)
3014 vega10_ps->performance_levels[i].gfx_clock =
3015 max_limits->sclk;
3016 }
3017 }
3018
3019 vega10_ps->vce_clks.evclk = hwmgr->vce_arbiter.evclk;
3020 vega10_ps->vce_clks.ecclk = hwmgr->vce_arbiter.ecclk;
3021
3022 cgs_get_active_displays_info(hwmgr->device, &info);
3023
3024 /* result = PHM_CheckVBlankTime(hwmgr, &vblankTooShort);*/
3025 minimum_clocks.engineClock = hwmgr->display_config.min_core_set_clock;
75f0e32b 3026 minimum_clocks.memoryClock = hwmgr->display_config.min_mem_set_clock;
f83a9991
EH
3027
3028 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3029 PHM_PlatformCaps_StablePState)) {
3030 PP_ASSERT_WITH_CODE(
3031 data->registry_data.stable_pstate_sclk_dpm_percentage >= 1 &&
3032 data->registry_data.stable_pstate_sclk_dpm_percentage <= 100,
3033 "percent sclk value must range from 1% to 100%, setting default value",
3034 stable_pstate_sclk_dpm_percentage = 75);
3035
3036 max_limits = &(hwmgr->dyn_state.max_clock_voltage_on_ac);
3037 stable_pstate_sclk = (max_limits->sclk *
3038 stable_pstate_sclk_dpm_percentage) / 100;
3039
3040 for (count = table_info->vdd_dep_on_sclk->count - 1;
3041 count >= 0; count--) {
3042 if (stable_pstate_sclk >=
3043 table_info->vdd_dep_on_sclk->entries[count].clk) {
3044 stable_pstate_sclk =
3045 table_info->vdd_dep_on_sclk->entries[count].clk;
3046 break;
3047 }
3048 }
3049
3050 if (count < 0)
3051 stable_pstate_sclk = table_info->vdd_dep_on_sclk->entries[0].clk;
3052
3053 stable_pstate_mclk = max_limits->mclk;
3054
3055 minimum_clocks.engineClock = stable_pstate_sclk;
3056 minimum_clocks.memoryClock = stable_pstate_mclk;
3057 }
3058
3059 if (minimum_clocks.engineClock < hwmgr->gfx_arbiter.sclk)
3060 minimum_clocks.engineClock = hwmgr->gfx_arbiter.sclk;
3061
3062 if (minimum_clocks.memoryClock < hwmgr->gfx_arbiter.mclk)
3063 minimum_clocks.memoryClock = hwmgr->gfx_arbiter.mclk;
3064
3065 vega10_ps->sclk_threshold = hwmgr->gfx_arbiter.sclk_threshold;
3066
3067 if (hwmgr->gfx_arbiter.sclk_over_drive) {
3068 PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.sclk_over_drive <=
3069 hwmgr->platform_descriptor.overdriveLimit.engineClock),
3070 "Overdrive sclk exceeds limit",
3071 hwmgr->gfx_arbiter.sclk_over_drive =
3072 hwmgr->platform_descriptor.overdriveLimit.engineClock);
3073
3074 if (hwmgr->gfx_arbiter.sclk_over_drive >= hwmgr->gfx_arbiter.sclk)
3075 vega10_ps->performance_levels[1].gfx_clock =
3076 hwmgr->gfx_arbiter.sclk_over_drive;
3077 }
3078
3079 if (hwmgr->gfx_arbiter.mclk_over_drive) {
3080 PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.mclk_over_drive <=
3081 hwmgr->platform_descriptor.overdriveLimit.memoryClock),
3082 "Overdrive mclk exceeds limit",
3083 hwmgr->gfx_arbiter.mclk_over_drive =
3084 hwmgr->platform_descriptor.overdriveLimit.memoryClock);
3085
3086 if (hwmgr->gfx_arbiter.mclk_over_drive >= hwmgr->gfx_arbiter.mclk)
3087 vega10_ps->performance_levels[1].mem_clock =
3088 hwmgr->gfx_arbiter.mclk_over_drive;
3089 }
3090
3091 disable_mclk_switching_for_frame_lock = phm_cap_enabled(
3092 hwmgr->platform_descriptor.platformCaps,
3093 PHM_PlatformCaps_DisableMclkSwitchingForFrameLock);
3094 disable_mclk_switching_for_vr = phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3095 PHM_PlatformCaps_DisableMclkSwitchForVR);
3096 force_mclk_high = phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3097 PHM_PlatformCaps_ForceMclkHigh);
3098
3099 disable_mclk_switching = (info.display_count > 1) ||
3100 disable_mclk_switching_for_frame_lock ||
3101 disable_mclk_switching_for_vr ||
3102 force_mclk_high;
3103
3104 sclk = vega10_ps->performance_levels[0].gfx_clock;
3105 mclk = vega10_ps->performance_levels[0].mem_clock;
3106
3107 if (sclk < minimum_clocks.engineClock)
3108 sclk = (minimum_clocks.engineClock > max_limits->sclk) ?
3109 max_limits->sclk : minimum_clocks.engineClock;
3110
3111 if (mclk < minimum_clocks.memoryClock)
3112 mclk = (minimum_clocks.memoryClock > max_limits->mclk) ?
3113 max_limits->mclk : minimum_clocks.memoryClock;
3114
3115 vega10_ps->performance_levels[0].gfx_clock = sclk;
3116 vega10_ps->performance_levels[0].mem_clock = mclk;
3117
3118 vega10_ps->performance_levels[1].gfx_clock =
3119 (vega10_ps->performance_levels[1].gfx_clock >=
3120 vega10_ps->performance_levels[0].gfx_clock) ?
3121 vega10_ps->performance_levels[1].gfx_clock :
3122 vega10_ps->performance_levels[0].gfx_clock;
3123
3124 if (disable_mclk_switching) {
3125 /* Set Mclk the max of level 0 and level 1 */
3126 if (mclk < vega10_ps->performance_levels[1].mem_clock)
3127 mclk = vega10_ps->performance_levels[1].mem_clock;
3128
3129 /* Find the lowest MCLK frequency that is within
3130 * the tolerable latency defined in DAL
3131 */
3132 latency = 0;
3133 for (i = 0; i < data->mclk_latency_table.count; i++) {
3134 if ((data->mclk_latency_table.entries[i].latency <= latency) &&
3135 (data->mclk_latency_table.entries[i].frequency >=
3136 vega10_ps->performance_levels[0].mem_clock) &&
3137 (data->mclk_latency_table.entries[i].frequency <=
3138 vega10_ps->performance_levels[1].mem_clock))
3139 mclk = data->mclk_latency_table.entries[i].frequency;
3140 }
3141 vega10_ps->performance_levels[0].mem_clock = mclk;
3142 } else {
3143 if (vega10_ps->performance_levels[1].mem_clock <
3144 vega10_ps->performance_levels[0].mem_clock)
3145 vega10_ps->performance_levels[1].mem_clock =
3146 vega10_ps->performance_levels[0].mem_clock;
3147 }
3148
3149 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3150 PHM_PlatformCaps_StablePState)) {
3151 for (i = 0; i < vega10_ps->performance_level_count; i++) {
3152 vega10_ps->performance_levels[i].gfx_clock = stable_pstate_sclk;
3153 vega10_ps->performance_levels[i].mem_clock = stable_pstate_mclk;
3154 }
3155 }
3156
3157 return 0;
3158}
3159
3160static int vega10_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, const void *input)
3161{
3162 const struct phm_set_power_state_input *states =
3163 (const struct phm_set_power_state_input *)input;
3164 const struct vega10_power_state *vega10_ps =
3165 cast_const_phw_vega10_power_state(states->pnew_state);
3166 struct vega10_hwmgr *data =
3167 (struct vega10_hwmgr *)(hwmgr->backend);
3168 struct vega10_single_dpm_table *sclk_table =
3169 &(data->dpm_table.gfx_table);
3170 uint32_t sclk = vega10_ps->performance_levels
3171 [vega10_ps->performance_level_count - 1].gfx_clock;
3172 struct vega10_single_dpm_table *mclk_table =
3173 &(data->dpm_table.mem_table);
3174 uint32_t mclk = vega10_ps->performance_levels
3175 [vega10_ps->performance_level_count - 1].mem_clock;
3176 struct PP_Clocks min_clocks = {0};
3177 uint32_t i;
3178 struct cgs_display_info info = {0};
3179
3180 data->need_update_dpm_table = 0;
3181
3182 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3183 PHM_PlatformCaps_ODNinACSupport) ||
3184 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3185 PHM_PlatformCaps_ODNinDCSupport)) {
3186 for (i = 0; i < sclk_table->count; i++) {
3187 if (sclk == sclk_table->dpm_levels[i].value)
3188 break;
3189 }
3190
3191 if (!(data->apply_overdrive_next_settings_mask &
3192 DPMTABLE_OD_UPDATE_SCLK) && i >= sclk_table->count) {
3193 /* Check SCLK in DAL's minimum clocks
3194 * in case DeepSleep divider update is required.
3195 */
3196 if (data->display_timing.min_clock_in_sr !=
3197 min_clocks.engineClockInSR &&
3198 (min_clocks.engineClockInSR >=
3199 VEGA10_MINIMUM_ENGINE_CLOCK ||
3200 data->display_timing.min_clock_in_sr >=
3201 VEGA10_MINIMUM_ENGINE_CLOCK))
3202 data->need_update_dpm_table |= DPMTABLE_UPDATE_SCLK;
3203 }
3204
3205 cgs_get_active_displays_info(hwmgr->device, &info);
3206
3207 if (data->display_timing.num_existing_displays !=
3208 info.display_count)
3209 data->need_update_dpm_table |= DPMTABLE_UPDATE_MCLK;
3210 } else {
3211 for (i = 0; i < sclk_table->count; i++) {
3212 if (sclk == sclk_table->dpm_levels[i].value)
3213 break;
3214 }
3215
3216 if (i >= sclk_table->count)
3217 data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
3218 else {
3219 /* Check SCLK in DAL's minimum clocks
3220 * in case DeepSleep divider update is required.
3221 */
3222 if (data->display_timing.min_clock_in_sr !=
3223 min_clocks.engineClockInSR &&
3224 (min_clocks.engineClockInSR >=
3225 VEGA10_MINIMUM_ENGINE_CLOCK ||
3226 data->display_timing.min_clock_in_sr >=
3227 VEGA10_MINIMUM_ENGINE_CLOCK))
3228 data->need_update_dpm_table |= DPMTABLE_UPDATE_SCLK;
3229 }
3230
3231 for (i = 0; i < mclk_table->count; i++) {
3232 if (mclk == mclk_table->dpm_levels[i].value)
3233 break;
3234 }
3235
3236 cgs_get_active_displays_info(hwmgr->device, &info);
3237
3238 if (i >= mclk_table->count)
3239 data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
3240
3241 if (data->display_timing.num_existing_displays !=
3242 info.display_count ||
3243 i >= mclk_table->count)
3244 data->need_update_dpm_table |= DPMTABLE_UPDATE_MCLK;
3245 }
3246 return 0;
3247}
3248
3249static int vega10_populate_and_upload_sclk_mclk_dpm_levels(
3250 struct pp_hwmgr *hwmgr, const void *input)
3251{
3252 int result = 0;
3253 const struct phm_set_power_state_input *states =
3254 (const struct phm_set_power_state_input *)input;
3255 const struct vega10_power_state *vega10_ps =
3256 cast_const_phw_vega10_power_state(states->pnew_state);
3257 struct vega10_hwmgr *data =
3258 (struct vega10_hwmgr *)(hwmgr->backend);
3259 uint32_t sclk = vega10_ps->performance_levels
3260 [vega10_ps->performance_level_count - 1].gfx_clock;
3261 uint32_t mclk = vega10_ps->performance_levels
3262 [vega10_ps->performance_level_count - 1].mem_clock;
3263 struct vega10_dpm_table *dpm_table = &data->dpm_table;
3264 struct vega10_dpm_table *golden_dpm_table =
3265 &data->golden_dpm_table;
3266 uint32_t dpm_count, clock_percent;
3267 uint32_t i;
3268
3269 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3270 PHM_PlatformCaps_ODNinACSupport) ||
3271 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3272 PHM_PlatformCaps_ODNinDCSupport)) {
3273
3274 if (!data->need_update_dpm_table &&
3275 !data->apply_optimized_settings &&
3276 !data->apply_overdrive_next_settings_mask)
3277 return 0;
3278
3279 if (data->apply_overdrive_next_settings_mask &
3280 DPMTABLE_OD_UPDATE_SCLK) {
3281 for (dpm_count = 0;
3282 dpm_count < dpm_table->gfx_table.count;
3283 dpm_count++) {
3284 dpm_table->gfx_table.dpm_levels[dpm_count].enabled =
3285 data->odn_dpm_table.odn_core_clock_dpm_levels.
3286 performance_level_entries[dpm_count].enabled;
3287 dpm_table->gfx_table.dpm_levels[dpm_count].value =
3288 data->odn_dpm_table.odn_core_clock_dpm_levels.
3289 performance_level_entries[dpm_count].clock;
3290 }
3291 }
3292
3293 if (data->apply_overdrive_next_settings_mask &
3294 DPMTABLE_OD_UPDATE_MCLK) {
3295 for (dpm_count = 0;
3296 dpm_count < dpm_table->mem_table.count;
3297 dpm_count++) {
3298 dpm_table->mem_table.dpm_levels[dpm_count].enabled =
3299 data->odn_dpm_table.odn_memory_clock_dpm_levels.
3300 performance_level_entries[dpm_count].enabled;
3301 dpm_table->mem_table.dpm_levels[dpm_count].value =
3302 data->odn_dpm_table.odn_memory_clock_dpm_levels.
3303 performance_level_entries[dpm_count].clock;
3304 }
3305 }
3306
3307 if ((data->need_update_dpm_table & DPMTABLE_UPDATE_SCLK) ||
3308 data->apply_optimized_settings ||
3309 (data->apply_overdrive_next_settings_mask &
3310 DPMTABLE_OD_UPDATE_SCLK)) {
3311 result = vega10_populate_all_graphic_levels(hwmgr);
3312 PP_ASSERT_WITH_CODE(!result,
3313 "Failed to populate SCLK during \
3314 PopulateNewDPMClocksStates Function!",
3315 return result);
3316 }
3317
3318 if ((data->need_update_dpm_table & DPMTABLE_UPDATE_MCLK) ||
3319 (data->apply_overdrive_next_settings_mask &
3320 DPMTABLE_OD_UPDATE_MCLK)){
3321 result = vega10_populate_all_memory_levels(hwmgr);
3322 PP_ASSERT_WITH_CODE(!result,
3323 "Failed to populate MCLK during \
3324 PopulateNewDPMClocksStates Function!",
3325 return result);
3326 }
3327 } else {
3328 if (!data->need_update_dpm_table &&
3329 !data->apply_optimized_settings)
3330 return 0;
3331
3332 if (data->need_update_dpm_table & DPMTABLE_OD_UPDATE_SCLK &&
3333 data->smu_features[GNLD_DPM_GFXCLK].supported) {
3334 dpm_table->
3335 gfx_table.dpm_levels[dpm_table->gfx_table.count - 1].
3336 value = sclk;
3337
3338 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3339 PHM_PlatformCaps_OD6PlusinACSupport) ||
3340 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3341 PHM_PlatformCaps_OD6PlusinDCSupport)) {
3342 /* Need to do calculation based on the golden DPM table
3343 * as the Heatmap GPU Clock axis is also based on
3344 * the default values
3345 */
3346 PP_ASSERT_WITH_CODE(
3347 golden_dpm_table->gfx_table.dpm_levels
3348 [golden_dpm_table->gfx_table.count - 1].value,
3349 "Divide by 0!",
3350 return -1);
3351
3352 dpm_count = dpm_table->gfx_table.count < 2 ?
3353 0 : dpm_table->gfx_table.count - 2;
3354 for (i = dpm_count; i > 1; i--) {
3355 if (sclk > golden_dpm_table->gfx_table.dpm_levels
3356 [golden_dpm_table->gfx_table.count - 1].value) {
3357 clock_percent =
3358 ((sclk - golden_dpm_table->gfx_table.dpm_levels
3359 [golden_dpm_table->gfx_table.count - 1].value) *
3360 100) /
3361 golden_dpm_table->gfx_table.dpm_levels
3362 [golden_dpm_table->gfx_table.count - 1].value;
3363
3364 dpm_table->gfx_table.dpm_levels[i].value =
3365 golden_dpm_table->gfx_table.dpm_levels[i].value +
3366 (golden_dpm_table->gfx_table.dpm_levels[i].value *
3367 clock_percent) / 100;
3368 } else if (golden_dpm_table->
3369 gfx_table.dpm_levels[dpm_table->gfx_table.count-1].value >
3370 sclk) {
3371 clock_percent =
3372 ((golden_dpm_table->gfx_table.dpm_levels
3373 [golden_dpm_table->gfx_table.count - 1].value -
3374 sclk) * 100) /
3375 golden_dpm_table->gfx_table.dpm_levels
3376 [golden_dpm_table->gfx_table.count-1].value;
3377
3378 dpm_table->gfx_table.dpm_levels[i].value =
3379 golden_dpm_table->gfx_table.dpm_levels[i].value -
3380 (golden_dpm_table->gfx_table.dpm_levels[i].value *
3381 clock_percent) / 100;
3382 } else
3383 dpm_table->gfx_table.dpm_levels[i].value =
3384 golden_dpm_table->gfx_table.dpm_levels[i].value;
3385 }
3386 }
3387 }
3388
3389 if (data->need_update_dpm_table & DPMTABLE_OD_UPDATE_MCLK &&
3390 data->smu_features[GNLD_DPM_UCLK].supported) {
3391 dpm_table->
3392 mem_table.dpm_levels[dpm_table->mem_table.count - 1].
3393 value = mclk;
3394
3395 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3396 PHM_PlatformCaps_OD6PlusinACSupport) ||
3397 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3398 PHM_PlatformCaps_OD6PlusinDCSupport)) {
3399
3400 PP_ASSERT_WITH_CODE(
3401 golden_dpm_table->mem_table.dpm_levels
3402 [golden_dpm_table->mem_table.count - 1].value,
3403 "Divide by 0!",
3404 return -1);
3405
3406 dpm_count = dpm_table->mem_table.count < 2 ?
3407 0 : dpm_table->mem_table.count - 2;
3408 for (i = dpm_count; i > 1; i--) {
3409 if (mclk > golden_dpm_table->mem_table.dpm_levels
3410 [golden_dpm_table->mem_table.count-1].value) {
3411 clock_percent = ((mclk -
3412 golden_dpm_table->mem_table.dpm_levels
3413 [golden_dpm_table->mem_table.count-1].value) *
3414 100) /
3415 golden_dpm_table->mem_table.dpm_levels
3416 [golden_dpm_table->mem_table.count-1].value;
3417
3418 dpm_table->mem_table.dpm_levels[i].value =
3419 golden_dpm_table->mem_table.dpm_levels[i].value +
3420 (golden_dpm_table->mem_table.dpm_levels[i].value *
3421 clock_percent) / 100;
3422 } else if (golden_dpm_table->mem_table.dpm_levels
3423 [dpm_table->mem_table.count-1].value > mclk) {
3424 clock_percent = ((golden_dpm_table->mem_table.dpm_levels
3425 [golden_dpm_table->mem_table.count-1].value - mclk) *
3426 100) /
3427 golden_dpm_table->mem_table.dpm_levels
3428 [golden_dpm_table->mem_table.count-1].value;
3429
3430 dpm_table->mem_table.dpm_levels[i].value =
3431 golden_dpm_table->mem_table.dpm_levels[i].value -
3432 (golden_dpm_table->mem_table.dpm_levels[i].value *
3433 clock_percent) / 100;
3434 } else
3435 dpm_table->mem_table.dpm_levels[i].value =
3436 golden_dpm_table->mem_table.dpm_levels[i].value;
3437 }
3438 }
3439 }
3440
3441 if ((data->need_update_dpm_table &
3442 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK)) ||
3443 data->apply_optimized_settings) {
3444 result = vega10_populate_all_graphic_levels(hwmgr);
3445 PP_ASSERT_WITH_CODE(!result,
3446 "Failed to populate SCLK during \
3447 PopulateNewDPMClocksStates Function!",
3448 return result);
3449 }
3450
3451 if (data->need_update_dpm_table &
3452 (DPMTABLE_OD_UPDATE_MCLK + DPMTABLE_UPDATE_MCLK)) {
3453 result = vega10_populate_all_memory_levels(hwmgr);
3454 PP_ASSERT_WITH_CODE(!result,
3455 "Failed to populate MCLK during \
3456 PopulateNewDPMClocksStates Function!",
3457 return result);
3458 }
3459 }
3460
3461 return result;
3462}
3463
3464static int vega10_trim_single_dpm_states(struct pp_hwmgr *hwmgr,
3465 struct vega10_single_dpm_table *dpm_table,
3466 uint32_t low_limit, uint32_t high_limit)
3467{
3468 uint32_t i;
3469
3470 for (i = 0; i < dpm_table->count; i++) {
3471 if ((dpm_table->dpm_levels[i].value < low_limit) ||
3472 (dpm_table->dpm_levels[i].value > high_limit))
3473 dpm_table->dpm_levels[i].enabled = false;
3474 else
3475 dpm_table->dpm_levels[i].enabled = true;
3476 }
3477 return 0;
3478}
3479
3480static int vega10_trim_single_dpm_states_with_mask(struct pp_hwmgr *hwmgr,
3481 struct vega10_single_dpm_table *dpm_table,
3482 uint32_t low_limit, uint32_t high_limit,
3483 uint32_t disable_dpm_mask)
3484{
3485 uint32_t i;
3486
3487 for (i = 0; i < dpm_table->count; i++) {
3488 if ((dpm_table->dpm_levels[i].value < low_limit) ||
3489 (dpm_table->dpm_levels[i].value > high_limit))
3490 dpm_table->dpm_levels[i].enabled = false;
3491 else if (!((1 << i) & disable_dpm_mask))
3492 dpm_table->dpm_levels[i].enabled = false;
3493 else
3494 dpm_table->dpm_levels[i].enabled = true;
3495 }
3496 return 0;
3497}
3498
3499static int vega10_trim_dpm_states(struct pp_hwmgr *hwmgr,
3500 const struct vega10_power_state *vega10_ps)
3501{
3502 struct vega10_hwmgr *data =
3503 (struct vega10_hwmgr *)(hwmgr->backend);
3504 uint32_t high_limit_count;
3505
3506 PP_ASSERT_WITH_CODE((vega10_ps->performance_level_count >= 1),
3507 "power state did not have any performance level",
3508 return -1);
3509
3510 high_limit_count = (vega10_ps->performance_level_count == 1) ? 0 : 1;
3511
3512 vega10_trim_single_dpm_states(hwmgr,
3513 &(data->dpm_table.soc_table),
3514 vega10_ps->performance_levels[0].soc_clock,
3515 vega10_ps->performance_levels[high_limit_count].soc_clock);
3516
3517 vega10_trim_single_dpm_states_with_mask(hwmgr,
3518 &(data->dpm_table.gfx_table),
3519 vega10_ps->performance_levels[0].gfx_clock,
3520 vega10_ps->performance_levels[high_limit_count].gfx_clock,
3521 data->disable_dpm_mask);
3522
3523 vega10_trim_single_dpm_states(hwmgr,
3524 &(data->dpm_table.mem_table),
3525 vega10_ps->performance_levels[0].mem_clock,
3526 vega10_ps->performance_levels[high_limit_count].mem_clock);
3527
3528 return 0;
3529}
3530
3531static uint32_t vega10_find_lowest_dpm_level(
3532 struct vega10_single_dpm_table *table)
3533{
3534 uint32_t i;
3535
3536 for (i = 0; i < table->count; i++) {
3537 if (table->dpm_levels[i].enabled)
3538 break;
3539 }
3540
3541 return i;
3542}
3543
3544static uint32_t vega10_find_highest_dpm_level(
3545 struct vega10_single_dpm_table *table)
3546{
3547 uint32_t i = 0;
3548
3549 if (table->count <= MAX_REGULAR_DPM_NUMBER) {
3550 for (i = table->count; i > 0; i--) {
3551 if (table->dpm_levels[i - 1].enabled)
3552 return i - 1;
3553 }
3554 } else {
3555 pr_info("DPM Table Has Too Many Entries!");
3556 return MAX_REGULAR_DPM_NUMBER - 1;
3557 }
3558
3559 return i;
3560}
3561
3562static void vega10_apply_dal_minimum_voltage_request(
3563 struct pp_hwmgr *hwmgr)
3564{
3565 return;
3566}
3567
3568static int vega10_upload_dpm_bootup_level(struct pp_hwmgr *hwmgr)
3569{
3570 struct vega10_hwmgr *data =
3571 (struct vega10_hwmgr *)(hwmgr->backend);
3572
3573 vega10_apply_dal_minimum_voltage_request(hwmgr);
3574
3575 if (!data->registry_data.sclk_dpm_key_disabled) {
3576 if (data->smc_state_table.gfx_boot_level !=
3577 data->dpm_table.gfx_table.dpm_state.soft_min_level) {
3578 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter(
3579 hwmgr->smumgr,
3580 PPSMC_MSG_SetSoftMinGfxclkByIndex,
3581 data->smc_state_table.gfx_boot_level),
3582 "Failed to set soft min sclk index!",
3583 return -EINVAL);
3584 data->dpm_table.gfx_table.dpm_state.soft_min_level =
3585 data->smc_state_table.gfx_boot_level;
3586 }
3587 }
3588
3589 if (!data->registry_data.mclk_dpm_key_disabled) {
3590 if (data->smc_state_table.mem_boot_level !=
3591 data->dpm_table.mem_table.dpm_state.soft_min_level) {
3592 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter(
3593 hwmgr->smumgr,
3594 PPSMC_MSG_SetSoftMinUclkByIndex,
3595 data->smc_state_table.mem_boot_level),
3596 "Failed to set soft min mclk index!",
3597 return -EINVAL);
3598
3599 data->dpm_table.mem_table.dpm_state.soft_min_level =
3600 data->smc_state_table.mem_boot_level;
3601 }
3602 }
3603
3604 return 0;
3605}
3606
3607static int vega10_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
3608{
3609 struct vega10_hwmgr *data =
3610 (struct vega10_hwmgr *)(hwmgr->backend);
3611
3612 vega10_apply_dal_minimum_voltage_request(hwmgr);
3613
3614 if (!data->registry_data.sclk_dpm_key_disabled) {
3615 if (data->smc_state_table.gfx_max_level !=
3616 data->dpm_table.gfx_table.dpm_state.soft_max_level) {
3617 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter(
3618 hwmgr->smumgr,
3619 PPSMC_MSG_SetSoftMaxGfxclkByIndex,
3620 data->smc_state_table.gfx_max_level),
3621 "Failed to set soft max sclk index!",
3622 return -EINVAL);
3623 data->dpm_table.gfx_table.dpm_state.soft_max_level =
3624 data->smc_state_table.gfx_max_level;
3625 }
3626 }
3627
3628 if (!data->registry_data.mclk_dpm_key_disabled) {
3629 if (data->smc_state_table.mem_max_level !=
3630 data->dpm_table.mem_table.dpm_state.soft_max_level) {
3631 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter(
3632 hwmgr->smumgr,
3633 PPSMC_MSG_SetSoftMaxUclkByIndex,
3634 data->smc_state_table.mem_max_level),
3635 "Failed to set soft max mclk index!",
3636 return -EINVAL);
3637 data->dpm_table.mem_table.dpm_state.soft_max_level =
3638 data->smc_state_table.mem_max_level;
3639 }
3640 }
3641
3642 return 0;
3643}
3644
3645static int vega10_generate_dpm_level_enable_mask(
3646 struct pp_hwmgr *hwmgr, const void *input)
3647{
3648 struct vega10_hwmgr *data =
3649 (struct vega10_hwmgr *)(hwmgr->backend);
3650 const struct phm_set_power_state_input *states =
3651 (const struct phm_set_power_state_input *)input;
3652 const struct vega10_power_state *vega10_ps =
3653 cast_const_phw_vega10_power_state(states->pnew_state);
3654 int i;
3655
3656 PP_ASSERT_WITH_CODE(!vega10_trim_dpm_states(hwmgr, vega10_ps),
3657 "Attempt to Trim DPM States Failed!",
3658 return -1);
3659
3660 data->smc_state_table.gfx_boot_level =
3661 vega10_find_lowest_dpm_level(&(data->dpm_table.gfx_table));
3662 data->smc_state_table.gfx_max_level =
3663 vega10_find_highest_dpm_level(&(data->dpm_table.gfx_table));
3664 data->smc_state_table.mem_boot_level =
3665 vega10_find_lowest_dpm_level(&(data->dpm_table.mem_table));
3666 data->smc_state_table.mem_max_level =
3667 vega10_find_highest_dpm_level(&(data->dpm_table.mem_table));
3668
3669 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr),
3670 "Attempt to upload DPM Bootup Levels Failed!",
3671 return -1);
3672 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr),
3673 "Attempt to upload DPM Max Levels Failed!",
3674 return -1);
3675 for(i = data->smc_state_table.gfx_boot_level; i < data->smc_state_table.gfx_max_level; i++)
3676 data->dpm_table.gfx_table.dpm_levels[i].enabled = true;
3677
3678
3679 for(i = data->smc_state_table.mem_boot_level; i < data->smc_state_table.mem_max_level; i++)
3680 data->dpm_table.mem_table.dpm_levels[i].enabled = true;
3681
3682 return 0;
3683}
3684
3685int vega10_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
3686{
3687 struct vega10_hwmgr *data =
3688 (struct vega10_hwmgr *)(hwmgr->backend);
3689
3690 if (data->smu_features[GNLD_DPM_VCE].supported) {
3691 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
3692 enable,
3693 data->smu_features[GNLD_DPM_VCE].smu_feature_bitmap),
3694 "Attempt to Enable/Disable DPM VCE Failed!",
3695 return -1);
3696 data->smu_features[GNLD_DPM_VCE].enabled = enable;
3697 }
3698
3699 return 0;
3700}
3701
3702static int vega10_update_sclk_threshold(struct pp_hwmgr *hwmgr)
3703{
3704 struct vega10_hwmgr *data =
3705 (struct vega10_hwmgr *)(hwmgr->backend);
3706 int result = 0;
3707 uint32_t low_sclk_interrupt_threshold = 0;
3708
3709 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3710 PHM_PlatformCaps_SclkThrottleLowNotification)
3711 && (hwmgr->gfx_arbiter.sclk_threshold !=
3712 data->low_sclk_interrupt_threshold)) {
3713 data->low_sclk_interrupt_threshold =
3714 hwmgr->gfx_arbiter.sclk_threshold;
3715 low_sclk_interrupt_threshold =
3716 data->low_sclk_interrupt_threshold;
3717
3718 data->smc_state_table.pp_table.LowGfxclkInterruptThreshold =
3719 cpu_to_le32(low_sclk_interrupt_threshold);
3720
3721 /* This message will also enable SmcToHost Interrupt */
3722 result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
3723 PPSMC_MSG_SetLowGfxclkInterruptThreshold,
3724 (uint32_t)low_sclk_interrupt_threshold);
3725 }
3726
3727 return result;
3728}
3729
3730static int vega10_set_power_state_tasks(struct pp_hwmgr *hwmgr,
3731 const void *input)
3732{
3733 int tmp_result, result = 0;
3734 struct vega10_hwmgr *data =
3735 (struct vega10_hwmgr *)(hwmgr->backend);
3736 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
3737
3738 tmp_result = vega10_find_dpm_states_clocks_in_dpm_table(hwmgr, input);
3739 PP_ASSERT_WITH_CODE(!tmp_result,
3740 "Failed to find DPM states clocks in DPM table!",
3741 result = tmp_result);
3742
3743 tmp_result = vega10_populate_and_upload_sclk_mclk_dpm_levels(hwmgr, input);
3744 PP_ASSERT_WITH_CODE(!tmp_result,
3745 "Failed to populate and upload SCLK MCLK DPM levels!",
3746 result = tmp_result);
3747
3748 tmp_result = vega10_generate_dpm_level_enable_mask(hwmgr, input);
3749 PP_ASSERT_WITH_CODE(!tmp_result,
3750 "Failed to generate DPM level enabled mask!",
3751 result = tmp_result);
3752
3753 tmp_result = vega10_update_sclk_threshold(hwmgr);
3754 PP_ASSERT_WITH_CODE(!tmp_result,
3755 "Failed to update SCLK threshold!",
3756 result = tmp_result);
3757
3758 result = vega10_copy_table_to_smc(hwmgr->smumgr,
3759 (uint8_t *)pp_table, PPTABLE);
3760 PP_ASSERT_WITH_CODE(!result,
3761 "Failed to upload PPtable!", return result);
3762
3763 data->apply_optimized_settings = false;
3764 data->apply_overdrive_next_settings_mask = 0;
3765
3766 return 0;
3767}
3768
3769static int vega10_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
3770{
3771 struct pp_power_state *ps;
3772 struct vega10_power_state *vega10_ps;
3773
3774 if (hwmgr == NULL)
3775 return -EINVAL;
3776
3777 ps = hwmgr->request_ps;
3778
3779 if (ps == NULL)
3780 return -EINVAL;
3781
3782 vega10_ps = cast_phw_vega10_power_state(&ps->hardware);
3783
3784 if (low)
3785 return vega10_ps->performance_levels[0].gfx_clock;
3786 else
3787 return vega10_ps->performance_levels
3788 [vega10_ps->performance_level_count - 1].gfx_clock;
3789}
3790
3791static int vega10_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
3792{
3793 struct pp_power_state *ps;
3794 struct vega10_power_state *vega10_ps;
3795
3796 if (hwmgr == NULL)
3797 return -EINVAL;
3798
3799 ps = hwmgr->request_ps;
3800
3801 if (ps == NULL)
3802 return -EINVAL;
3803
3804 vega10_ps = cast_phw_vega10_power_state(&ps->hardware);
3805
3806 if (low)
3807 return vega10_ps->performance_levels[0].mem_clock;
3808 else
3809 return vega10_ps->performance_levels
3810 [vega10_ps->performance_level_count-1].mem_clock;
3811}
3812
3813static int vega10_read_sensor(struct pp_hwmgr *hwmgr, int idx,
3814 void *value, int *size)
3815{
3816 uint32_t sclk_idx, mclk_idx, activity_percent = 0;
3817 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
3818 struct vega10_dpm_table *dpm_table = &data->dpm_table;
3819 int ret = 0;
3820
3821 switch (idx) {
3822 case AMDGPU_PP_SENSOR_GFX_SCLK:
3823 ret = smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_GetCurrentGfxclkIndex);
3824 if (!ret) {
3825 vega10_read_arg_from_smc(hwmgr->smumgr, &sclk_idx);
3826 *((uint32_t *)value) = dpm_table->gfx_table.dpm_levels[sclk_idx].value;
3827 *size = 4;
3828 }
3829 break;
3830 case AMDGPU_PP_SENSOR_GFX_MCLK:
3831 ret = smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_GetCurrentUclkIndex);
3832 if (!ret) {
3833 vega10_read_arg_from_smc(hwmgr->smumgr, &mclk_idx);
3834 *((uint32_t *)value) = dpm_table->mem_table.dpm_levels[mclk_idx].value;
3835 *size = 4;
3836 }
3837 break;
3838 case AMDGPU_PP_SENSOR_GPU_LOAD:
3839 ret = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_GetAverageGfxActivity, 0);
3840 if (!ret) {
3841 vega10_read_arg_from_smc(hwmgr->smumgr, &activity_percent);
3842 *((uint32_t *)value) = activity_percent > 100 ? 100 : activity_percent;
3843 *size = 4;
3844 }
3845 break;
3846 case AMDGPU_PP_SENSOR_GPU_TEMP:
3847 *((uint32_t *)value) = vega10_thermal_get_temperature(hwmgr);
3848 *size = 4;
3849 break;
3850 case AMDGPU_PP_SENSOR_UVD_POWER:
3851 *((uint32_t *)value) = data->uvd_power_gated ? 0 : 1;
3852 *size = 4;
3853 break;
3854 case AMDGPU_PP_SENSOR_VCE_POWER:
3855 *((uint32_t *)value) = data->vce_power_gated ? 0 : 1;
3856 *size = 4;
3857 break;
3858 default:
3859 ret = -EINVAL;
3860 break;
3861 }
3862 return ret;
3863}
3864
3865static int vega10_notify_smc_display_change(struct pp_hwmgr *hwmgr,
3866 bool has_disp)
3867{
3868 return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
3869 PPSMC_MSG_SetUclkFastSwitch,
3870 has_disp ? 0 : 1);
3871}
3872
3873int vega10_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
3874 struct pp_display_clock_request *clock_req)
3875{
3876 int result = 0;
3877 enum amd_pp_clock_type clk_type = clock_req->clock_type;
75f0e32b 3878 uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000;
f83a9991
EH
3879 DSPCLK_e clk_select = 0;
3880 uint32_t clk_request = 0;
3881
3882 switch (clk_type) {
3883 case amd_pp_dcef_clock:
3884 clk_select = DSPCLK_DCEFCLK;
3885 break;
3886 case amd_pp_disp_clock:
3887 clk_select = DSPCLK_DISPCLK;
3888 break;
3889 case amd_pp_pixel_clock:
3890 clk_select = DSPCLK_PIXCLK;
3891 break;
3892 case amd_pp_phy_clock:
3893 clk_select = DSPCLK_PHYCLK;
3894 break;
3895 default:
3896 pr_info("[DisplayClockVoltageRequest]Invalid Clock Type!");
3897 result = -1;
3898 break;
3899 }
3900
3901 if (!result) {
3902 clk_request = (clk_freq << 16) | clk_select;
3903 result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
3904 PPSMC_MSG_RequestDisplayClockByFreq,
3905 clk_request);
3906 }
3907
3908 return result;
3909}
3910
75f0e32b
RZ
3911static uint8_t vega10_get_uclk_index(struct pp_hwmgr *hwmgr,
3912 struct phm_ppt_v1_clock_voltage_dependency_table *mclk_table,
3913 uint32_t frequency)
3914{
3915 uint8_t count;
3916 uint8_t i;
3917
3918 if (mclk_table == NULL || mclk_table->count == 0)
3919 return 0;
3920
3921 count = (uint8_t)(mclk_table->count);
3922
3923 for(i = 0; i < count; i++) {
3924 if(mclk_table->entries[i].clk >= frequency)
3925 return i;
3926 }
3927
3928 return i-1;
3929}
3930
f83a9991
EH
3931static int vega10_notify_smc_display_config_after_ps_adjustment(
3932 struct pp_hwmgr *hwmgr)
3933{
3934 struct vega10_hwmgr *data =
3935 (struct vega10_hwmgr *)(hwmgr->backend);
3936 struct vega10_single_dpm_table *dpm_table =
3937 &data->dpm_table.dcef_table;
75f0e32b
RZ
3938 struct phm_ppt_v2_information *table_info =
3939 (struct phm_ppt_v2_information *)hwmgr->pptable;
3940 struct phm_ppt_v1_clock_voltage_dependency_table *mclk_table = table_info->vdd_dep_on_mclk;
3941 uint32_t idx;
f83a9991
EH
3942 uint32_t num_active_disps = 0;
3943 struct cgs_display_info info = {0};
3944 struct PP_Clocks min_clocks = {0};
3945 uint32_t i;
3946 struct pp_display_clock_request clock_req;
3947
3948 info.mode_info = NULL;
3949
3950 cgs_get_active_displays_info(hwmgr->device, &info);
3951
3952 num_active_disps = info.display_count;
3953
3954 if (num_active_disps > 1)
3955 vega10_notify_smc_display_change(hwmgr, false);
3956 else
3957 vega10_notify_smc_display_change(hwmgr, true);
3958
3959 min_clocks.dcefClock = hwmgr->display_config.min_dcef_set_clk;
3960 min_clocks.dcefClockInSR = hwmgr->display_config.min_dcef_deep_sleep_set_clk;
75f0e32b 3961 min_clocks.memoryClock = hwmgr->display_config.min_mem_set_clock;
f83a9991
EH
3962
3963 for (i = 0; i < dpm_table->count; i++) {
3964 if (dpm_table->dpm_levels[i].value == min_clocks.dcefClock)
3965 break;
3966 }
3967
3968 if (i < dpm_table->count) {
3969 clock_req.clock_type = amd_pp_dcef_clock;
3970 clock_req.clock_freq_in_khz = dpm_table->dpm_levels[i].value;
3971 if (!vega10_display_clock_voltage_request(hwmgr, &clock_req)) {
3972 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter(
3973 hwmgr->smumgr, PPSMC_MSG_SetMinDeepSleepDcefclk,
75f0e32b 3974 min_clocks.dcefClockInSR /100),
f83a9991 3975 "Attempt to set divider for DCEFCLK Failed!",);
75f0e32b 3976 } else {
f83a9991 3977 pr_info("Attempt to set Hard Min for DCEFCLK Failed!");
75f0e32b
RZ
3978 }
3979 } else {
f83a9991 3980 pr_info("Cannot find requested DCEFCLK!");
75f0e32b
RZ
3981 }
3982
3983 if (min_clocks.memoryClock != 0) {
3984 idx = vega10_get_uclk_index(hwmgr, mclk_table, min_clocks.memoryClock);
3985 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_SetSoftMinUclkByIndex, idx);
3986 data->dpm_table.mem_table.dpm_state.soft_min_level= idx;
3987 }
f83a9991
EH
3988
3989 return 0;
3990}
3991
3992static int vega10_force_dpm_highest(struct pp_hwmgr *hwmgr)
3993{
3994 struct vega10_hwmgr *data =
3995 (struct vega10_hwmgr *)(hwmgr->backend);
3996
3997 data->smc_state_table.gfx_boot_level =
3998 data->smc_state_table.gfx_max_level =
3999 vega10_find_highest_dpm_level(&(data->dpm_table.gfx_table));
4000 data->smc_state_table.mem_boot_level =
4001 data->smc_state_table.mem_max_level =
4002 vega10_find_highest_dpm_level(&(data->dpm_table.mem_table));
4003
4004 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr),
4005 "Failed to upload boot level to highest!",
4006 return -1);
4007
4008 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr),
4009 "Failed to upload dpm max level to highest!",
4010 return -1);
4011
4012 return 0;
4013}
4014
4015static int vega10_force_dpm_lowest(struct pp_hwmgr *hwmgr)
4016{
4017 struct vega10_hwmgr *data =
4018 (struct vega10_hwmgr *)(hwmgr->backend);
4019
4020 data->smc_state_table.gfx_boot_level =
4021 data->smc_state_table.gfx_max_level =
4022 vega10_find_lowest_dpm_level(&(data->dpm_table.gfx_table));
4023 data->smc_state_table.mem_boot_level =
4024 data->smc_state_table.mem_max_level =
4025 vega10_find_lowest_dpm_level(&(data->dpm_table.mem_table));
4026
4027 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr),
4028 "Failed to upload boot level to highest!",
4029 return -1);
4030
4031 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr),
4032 "Failed to upload dpm max level to highest!",
4033 return -1);
4034
4035 return 0;
4036
4037}
4038
4039static int vega10_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
4040{
4041 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
4042
4043 data->smc_state_table.gfx_boot_level =
4044 vega10_find_lowest_dpm_level(&(data->dpm_table.gfx_table));
4045 data->smc_state_table.gfx_max_level =
4046 vega10_find_highest_dpm_level(&(data->dpm_table.gfx_table));
4047 data->smc_state_table.mem_boot_level =
4048 vega10_find_lowest_dpm_level(&(data->dpm_table.mem_table));
4049 data->smc_state_table.mem_max_level =
4050 vega10_find_highest_dpm_level(&(data->dpm_table.mem_table));
4051
4052 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr),
4053 "Failed to upload DPM Bootup Levels!",
4054 return -1);
4055
4056 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr),
4057 "Failed to upload DPM Max Levels!",
4058 return -1);
4059 return 0;
4060}
4061
4062static int vega10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
4063 enum amd_dpm_forced_level level)
4064{
4065 int ret = 0;
4066
4067 switch (level) {
4068 case AMD_DPM_FORCED_LEVEL_HIGH:
4069 ret = vega10_force_dpm_highest(hwmgr);
4070 if (ret)
4071 return ret;
4072 break;
4073 case AMD_DPM_FORCED_LEVEL_LOW:
4074 ret = vega10_force_dpm_lowest(hwmgr);
4075 if (ret)
4076 return ret;
4077 break;
4078 case AMD_DPM_FORCED_LEVEL_AUTO:
4079 ret = vega10_unforce_dpm_levels(hwmgr);
4080 if (ret)
4081 return ret;
4082 break;
4083 default:
4084 break;
4085 }
4086
4087 hwmgr->dpm_level = level;
4088
4089 return ret;
4090}
4091
4092static int vega10_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
4093{
7522ffc4 4094 int result = 0;
f83a9991 4095
7522ffc4
RZ
4096 switch (mode) {
4097 case AMD_FAN_CTRL_NONE:
4098 result = vega10_fan_ctrl_set_fan_speed_percent(hwmgr, 100);
4099 break;
4100 case AMD_FAN_CTRL_MANUAL:
4101 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
4102 PHM_PlatformCaps_MicrocodeFanControl))
4103 result = vega10_fan_ctrl_stop_smc_fan_control(hwmgr);
4104 break;
4105 case AMD_FAN_CTRL_AUTO:
4106 result = vega10_fan_ctrl_set_static_mode(hwmgr, mode);
4107 if (!result)
4108 result = vega10_fan_ctrl_start_smc_fan_control(hwmgr);
4109 break;
4110 default:
4111 break;
4112 }
4113 return result;
f83a9991
EH
4114}
4115
4116static int vega10_get_fan_control_mode(struct pp_hwmgr *hwmgr)
4117{
7522ffc4 4118 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
f83a9991 4119
7522ffc4
RZ
4120 if (data->smu_features[GNLD_FAN_CONTROL].enabled == false)
4121 return AMD_FAN_CTRL_MANUAL;
4122 else
4123 return AMD_FAN_CTRL_AUTO;
f83a9991
EH
4124}
4125
4126static int vega10_get_dal_power_level(struct pp_hwmgr *hwmgr,
4127 struct amd_pp_simple_clock_info *info)
4128{
4129 struct phm_ppt_v2_information *table_info =
4130 (struct phm_ppt_v2_information *)hwmgr->pptable;
4131 struct phm_clock_and_voltage_limits *max_limits =
4132 &table_info->max_clock_voltage_on_ac;
4133
4134 info->engine_max_clock = max_limits->sclk;
4135 info->memory_max_clock = max_limits->mclk;
4136
4137 return 0;
4138}
4139
4140static void vega10_get_sclks(struct pp_hwmgr *hwmgr,
4141 struct pp_clock_levels_with_latency *clocks)
4142{
4143 struct phm_ppt_v2_information *table_info =
4144 (struct phm_ppt_v2_information *)hwmgr->pptable;
4145 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table =
4146 table_info->vdd_dep_on_sclk;
4147 uint32_t i;
4148
4149 for (i = 0; i < dep_table->count; i++) {
4150 if (dep_table->entries[i].clk) {
4151 clocks->data[clocks->num_levels].clocks_in_khz =
4152 dep_table->entries[i].clk;
4153 clocks->num_levels++;
4154 }
4155 }
4156
4157}
4158
4159static uint32_t vega10_get_mem_latency(struct pp_hwmgr *hwmgr,
4160 uint32_t clock)
4161{
4162 if (clock >= MEM_FREQ_LOW_LATENCY &&
4163 clock < MEM_FREQ_HIGH_LATENCY)
4164 return MEM_LATENCY_HIGH;
4165 else if (clock >= MEM_FREQ_HIGH_LATENCY)
4166 return MEM_LATENCY_LOW;
4167 else
4168 return MEM_LATENCY_ERR;
4169}
4170
4171static void vega10_get_memclocks(struct pp_hwmgr *hwmgr,
4172 struct pp_clock_levels_with_latency *clocks)
4173{
4174 struct phm_ppt_v2_information *table_info =
4175 (struct phm_ppt_v2_information *)hwmgr->pptable;
4176 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table =
4177 table_info->vdd_dep_on_mclk;
4178 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
4179 uint32_t i;
4180
4181 clocks->num_levels = 0;
4182 data->mclk_latency_table.count = 0;
4183
4184 for (i = 0; i < dep_table->count; i++) {
4185 if (dep_table->entries[i].clk) {
4186 clocks->data[clocks->num_levels].clocks_in_khz =
4187 data->mclk_latency_table.entries
4188 [data->mclk_latency_table.count].frequency =
4189 dep_table->entries[i].clk;
4190 clocks->data[clocks->num_levels].latency_in_us =
4191 data->mclk_latency_table.entries
4192 [data->mclk_latency_table.count].latency =
4193 vega10_get_mem_latency(hwmgr,
4194 dep_table->entries[i].clk);
4195 clocks->num_levels++;
4196 data->mclk_latency_table.count++;
4197 }
4198 }
4199}
4200
4201static void vega10_get_dcefclocks(struct pp_hwmgr *hwmgr,
4202 struct pp_clock_levels_with_latency *clocks)
4203{
4204 struct phm_ppt_v2_information *table_info =
4205 (struct phm_ppt_v2_information *)hwmgr->pptable;
4206 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table =
4207 table_info->vdd_dep_on_dcefclk;
4208 uint32_t i;
4209
4210 for (i = 0; i < dep_table->count; i++) {
4211 clocks->data[i].clocks_in_khz = dep_table->entries[i].clk;
4212 clocks->data[i].latency_in_us = 0;
4213 clocks->num_levels++;
4214 }
4215}
4216
4217static void vega10_get_socclocks(struct pp_hwmgr *hwmgr,
4218 struct pp_clock_levels_with_latency *clocks)
4219{
4220 struct phm_ppt_v2_information *table_info =
4221 (struct phm_ppt_v2_information *)hwmgr->pptable;
4222 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table =
4223 table_info->vdd_dep_on_socclk;
4224 uint32_t i;
4225
4226 for (i = 0; i < dep_table->count; i++) {
4227 clocks->data[i].clocks_in_khz = dep_table->entries[i].clk;
4228 clocks->data[i].latency_in_us = 0;
4229 clocks->num_levels++;
4230 }
4231}
4232
4233static int vega10_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr,
4234 enum amd_pp_clock_type type,
4235 struct pp_clock_levels_with_latency *clocks)
4236{
4237 switch (type) {
4238 case amd_pp_sys_clock:
4239 vega10_get_sclks(hwmgr, clocks);
4240 break;
4241 case amd_pp_mem_clock:
4242 vega10_get_memclocks(hwmgr, clocks);
4243 break;
4244 case amd_pp_dcef_clock:
4245 vega10_get_dcefclocks(hwmgr, clocks);
4246 break;
4247 case amd_pp_soc_clock:
4248 vega10_get_socclocks(hwmgr, clocks);
4249 break;
4250 default:
4251 return -1;
4252 }
4253
4254 return 0;
4255}
4256
4257static int vega10_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr,
4258 enum amd_pp_clock_type type,
4259 struct pp_clock_levels_with_voltage *clocks)
4260{
4261 struct phm_ppt_v2_information *table_info =
4262 (struct phm_ppt_v2_information *)hwmgr->pptable;
4263 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table;
4264 uint32_t i;
4265
4266 switch (type) {
4267 case amd_pp_mem_clock:
4268 dep_table = table_info->vdd_dep_on_mclk;
4269 break;
4270 case amd_pp_dcef_clock:
4271 dep_table = table_info->vdd_dep_on_dcefclk;
4272 break;
4273 case amd_pp_disp_clock:
4274 dep_table = table_info->vdd_dep_on_dispclk;
4275 break;
4276 case amd_pp_pixel_clock:
4277 dep_table = table_info->vdd_dep_on_pixclk;
4278 break;
4279 case amd_pp_phy_clock:
4280 dep_table = table_info->vdd_dep_on_phyclk;
4281 break;
4282 default:
4283 return -1;
4284 }
4285
4286 for (i = 0; i < dep_table->count; i++) {
4287 clocks->data[i].clocks_in_khz = dep_table->entries[i].clk;
4288 clocks->data[i].voltage_in_mv = (uint32_t)(table_info->vddc_lookup_table->
4289 entries[dep_table->entries[i].vddInd].us_vdd);
4290 clocks->num_levels++;
4291 }
4292
4293 if (i < dep_table->count)
4294 return -1;
4295
4296 return 0;
4297}
4298
4299static int vega10_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
4300 struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges)
4301{
4302 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
4303 Watermarks_t *table = &(data->smc_state_table.water_marks_table);
4304 int result = 0;
4305 uint32_t i;
4306
4307 if (!data->registry_data.disable_water_mark) {
4308 for (i = 0; i < wm_with_clock_ranges->num_wm_sets_dmif; i++) {
4309 table->WatermarkRow[WM_DCEFCLK][i].MinClock =
4310 cpu_to_le16((uint16_t)
4311 (wm_with_clock_ranges->wm_sets_dmif[i].wm_min_dcefclk_in_khz) /
4312 100);
4313 table->WatermarkRow[WM_DCEFCLK][i].MaxClock =
4314 cpu_to_le16((uint16_t)
4315 (wm_with_clock_ranges->wm_sets_dmif[i].wm_max_dcefclk_in_khz) /
4316 100);
4317 table->WatermarkRow[WM_DCEFCLK][i].MinUclk =
4318 cpu_to_le16((uint16_t)
4319 (wm_with_clock_ranges->wm_sets_dmif[i].wm_min_memclk_in_khz) /
4320 100);
4321 table->WatermarkRow[WM_DCEFCLK][i].MaxUclk =
4322 cpu_to_le16((uint16_t)
4323 (wm_with_clock_ranges->wm_sets_dmif[i].wm_max_memclk_in_khz) /
4324 100);
4325 table->WatermarkRow[WM_DCEFCLK][i].WmSetting = (uint8_t)
4326 wm_with_clock_ranges->wm_sets_dmif[i].wm_set_id;
4327 }
4328
4329 for (i = 0; i < wm_with_clock_ranges->num_wm_sets_mcif; i++) {
4330 table->WatermarkRow[WM_SOCCLK][i].MinClock =
4331 cpu_to_le16((uint16_t)
4332 (wm_with_clock_ranges->wm_sets_mcif[i].wm_min_socclk_in_khz) /
4333 100);
4334 table->WatermarkRow[WM_SOCCLK][i].MaxClock =
4335 cpu_to_le16((uint16_t)
4336 (wm_with_clock_ranges->wm_sets_mcif[i].wm_max_socclk_in_khz) /
4337 100);
4338 table->WatermarkRow[WM_SOCCLK][i].MinUclk =
4339 cpu_to_le16((uint16_t)
4340 (wm_with_clock_ranges->wm_sets_mcif[i].wm_min_memclk_in_khz) /
4341 100);
4342 table->WatermarkRow[WM_SOCCLK][i].MaxUclk =
4343 cpu_to_le16((uint16_t)
4344 (wm_with_clock_ranges->wm_sets_mcif[i].wm_max_memclk_in_khz) /
4345 100);
4346 table->WatermarkRow[WM_SOCCLK][i].WmSetting = (uint8_t)
4347 wm_with_clock_ranges->wm_sets_mcif[i].wm_set_id;
4348 }
4349 data->water_marks_bitmap = WaterMarksExist;
4350 }
4351
4352 return result;
4353}
4354
4355static int vega10_force_clock_level(struct pp_hwmgr *hwmgr,
4356 enum pp_clock_type type, uint32_t mask)
4357{
4358 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
2f590f84 4359 int i;
f83a9991
EH
4360
4361 if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
4362 return -EINVAL;
4363
4364 switch (type) {
4365 case PP_SCLK:
f83a9991
EH
4366 for (i = 0; i < 32; i++) {
4367 if (mask & (1 << i))
4368 break;
4369 }
7b52db39 4370 data->smc_state_table.gfx_boot_level = i;
f83a9991 4371
7b52db39
RZ
4372 for (i = 31; i >= 0; i--) {
4373 if (mask & (1 << i))
4374 break;
4375 }
4376 data->smc_state_table.gfx_max_level = i;
4377
4378 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr),
4379 "Failed to upload boot level to lowest!",
4380 return -EINVAL);
4381
4382 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr),
4383 "Failed to upload dpm max level to highest!",
4384 return -EINVAL);
f83a9991
EH
4385 break;
4386
4387 case PP_MCLK:
f83a9991
EH
4388 for (i = 0; i < 32; i++) {
4389 if (mask & (1 << i))
4390 break;
4391 }
7b52db39
RZ
4392 data->smc_state_table.mem_boot_level = i;
4393
4394 for (i = 31; i >= 0; i--) {
4395 if (mask & (1 << i))
4396 break;
4397 }
4398 data->smc_state_table.mem_max_level = i;
4399
4400 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr),
4401 "Failed to upload boot level to lowest!",
4402 return -EINVAL);
4403
4404 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr),
4405 "Failed to upload dpm max level to highest!",
4406 return -EINVAL);
f83a9991 4407
f83a9991 4408 break;
7b52db39
RZ
4409
4410 case PP_PCIE:
f83a9991
EH
4411 default:
4412 break;
4413 }
4414
4415 return 0;
4416}
4417
4418static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
4419 enum pp_clock_type type, char *buf)
4420{
4421 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
4422 struct vega10_single_dpm_table *sclk_table = &(data->dpm_table.gfx_table);
4423 struct vega10_single_dpm_table *mclk_table = &(data->dpm_table.mem_table);
4424 struct vega10_pcie_table *pcie_table = &(data->dpm_table.pcie_table);
4425 int i, now, size = 0;
4426
4427 switch (type) {
4428 case PP_SCLK:
4429 if (data->registry_data.sclk_dpm_key_disabled)
4430 break;
4431
4432 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr->smumgr,
4433 PPSMC_MSG_GetCurrentGfxclkIndex),
4434 "Attempt to get current sclk index Failed!",
4435 return -1);
4436 PP_ASSERT_WITH_CODE(!vega10_read_arg_from_smc(hwmgr->smumgr,
4437 &now),
4438 "Attempt to read sclk index Failed!",
4439 return -1);
4440
4441 for (i = 0; i < sclk_table->count; i++)
4442 size += sprintf(buf + size, "%d: %uMhz %s\n",
4443 i, sclk_table->dpm_levels[i].value / 100,
4444 (i == now) ? "*" : "");
4445 break;
4446 case PP_MCLK:
4447 if (data->registry_data.mclk_dpm_key_disabled)
4448 break;
4449
4450 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr->smumgr,
4451 PPSMC_MSG_GetCurrentUclkIndex),
4452 "Attempt to get current mclk index Failed!",
4453 return -1);
4454 PP_ASSERT_WITH_CODE(!vega10_read_arg_from_smc(hwmgr->smumgr,
4455 &now),
4456 "Attempt to read mclk index Failed!",
4457 return -1);
4458
4459 for (i = 0; i < mclk_table->count; i++)
4460 size += sprintf(buf + size, "%d: %uMhz %s\n",
4461 i, mclk_table->dpm_levels[i].value / 100,
4462 (i == now) ? "*" : "");
4463 break;
4464 case PP_PCIE:
4465 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr->smumgr,
4466 PPSMC_MSG_GetCurrentLinkIndex),
4467 "Attempt to get current mclk index Failed!",
4468 return -1);
4469 PP_ASSERT_WITH_CODE(!vega10_read_arg_from_smc(hwmgr->smumgr,
4470 &now),
4471 "Attempt to read mclk index Failed!",
4472 return -1);
4473
4474 for (i = 0; i < pcie_table->count; i++)
4475 size += sprintf(buf + size, "%d: %s %s\n", i,
4476 (pcie_table->pcie_gen[i] == 0) ? "2.5GB, x1" :
4477 (pcie_table->pcie_gen[i] == 1) ? "5.0GB, x16" :
4478 (pcie_table->pcie_gen[i] == 2) ? "8.0GB, x16" : "",
4479 (i == now) ? "*" : "");
4480 break;
4481 default:
4482 break;
4483 }
4484 return size;
4485}
4486
4487static int vega10_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
4488{
4489 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
4490 int result = 0;
4491 uint32_t num_turned_on_displays = 1;
4492 Watermarks_t *wm_table = &(data->smc_state_table.water_marks_table);
4493 struct cgs_display_info info = {0};
4494
4495 if ((data->water_marks_bitmap & WaterMarksExist) &&
4496 !(data->water_marks_bitmap & WaterMarksLoaded)) {
4497 result = vega10_copy_table_to_smc(hwmgr->smumgr,
4498 (uint8_t *)wm_table, WMTABLE);
4499 PP_ASSERT_WITH_CODE(result, "Failed to update WMTABLE!", return EINVAL);
4500 data->water_marks_bitmap |= WaterMarksLoaded;
4501 }
4502
4503 if (data->water_marks_bitmap & WaterMarksLoaded) {
4504 cgs_get_active_displays_info(hwmgr->device, &info);
4505 num_turned_on_displays = info.display_count;
4506 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
4507 PPSMC_MSG_NumOfDisplays, num_turned_on_displays);
4508 }
4509
4510 return result;
4511}
4512
4513int vega10_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable)
4514{
4515 struct vega10_hwmgr *data =
4516 (struct vega10_hwmgr *)(hwmgr->backend);
4517
4518 if (data->smu_features[GNLD_DPM_UVD].supported) {
4519 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
4520 enable,
4521 data->smu_features[GNLD_DPM_UVD].smu_feature_bitmap),
4522 "Attempt to Enable/Disable DPM UVD Failed!",
4523 return -1);
4524 data->smu_features[GNLD_DPM_UVD].enabled = enable;
4525 }
4526 return 0;
4527}
4528
4529static int vega10_power_gate_vce(struct pp_hwmgr *hwmgr, bool bgate)
4530{
4531 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
4532
4533 data->vce_power_gated = bgate;
4534 return vega10_enable_disable_vce_dpm(hwmgr, !bgate);
4535}
4536
4537static int vega10_power_gate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
4538{
4539 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
4540
4541 data->uvd_power_gated = bgate;
4542 return vega10_enable_disable_uvd_dpm(hwmgr, !bgate);
4543}
4544
4545static inline bool vega10_are_power_levels_equal(
4546 const struct vega10_performance_level *pl1,
4547 const struct vega10_performance_level *pl2)
4548{
4549 return ((pl1->soc_clock == pl2->soc_clock) &&
4550 (pl1->gfx_clock == pl2->gfx_clock) &&
4551 (pl1->mem_clock == pl2->mem_clock));
4552}
4553
4554static int vega10_check_states_equal(struct pp_hwmgr *hwmgr,
4555 const struct pp_hw_power_state *pstate1,
4556 const struct pp_hw_power_state *pstate2, bool *equal)
4557{
4558 const struct vega10_power_state *psa;
4559 const struct vega10_power_state *psb;
4560 int i;
4561
4562 if (pstate1 == NULL || pstate2 == NULL || equal == NULL)
4563 return -EINVAL;
4564
4565 psa = cast_const_phw_vega10_power_state(pstate1);
4566 psb = cast_const_phw_vega10_power_state(pstate2);
4567 /* If the two states don't even have the same number of performance levels they cannot be the same state. */
4568 if (psa->performance_level_count != psb->performance_level_count) {
4569 *equal = false;
4570 return 0;
4571 }
4572
4573 for (i = 0; i < psa->performance_level_count; i++) {
4574 if (!vega10_are_power_levels_equal(&(psa->performance_levels[i]), &(psb->performance_levels[i]))) {
4575 /* If we have found even one performance level pair that is different the states are different. */
4576 *equal = false;
4577 return 0;
4578 }
4579 }
4580
4581 /* If all performance levels are the same try to use the UVD clocks to break the tie.*/
4582 *equal = ((psa->uvd_clks.vclk == psb->uvd_clks.vclk) && (psa->uvd_clks.dclk == psb->uvd_clks.dclk));
4583 *equal &= ((psa->vce_clks.evclk == psb->vce_clks.evclk) && (psa->vce_clks.ecclk == psb->vce_clks.ecclk));
4584 *equal &= (psa->sclk_threshold == psb->sclk_threshold);
4585
4586 return 0;
4587}
4588
4589static bool
4590vega10_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
4591{
4592 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
4593 bool is_update_required = false;
4594 struct cgs_display_info info = {0, 0, NULL};
4595
4596 cgs_get_active_displays_info(hwmgr->device, &info);
4597
4598 if (data->display_timing.num_existing_displays != info.display_count)
4599 is_update_required = true;
4600
4601 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) {
4602 if (data->display_timing.min_clock_in_sr != hwmgr->display_config.min_core_set_clock_in_sr)
4603 is_update_required = true;
4604 }
4605
4606 return is_update_required;
4607}
4608
8b9242ed
RZ
4609static int vega10_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
4610{
4611 int tmp_result, result = 0;
4612
4613 tmp_result = (vega10_is_dpm_running(hwmgr)) ? 0 : -1;
4614 PP_ASSERT_WITH_CODE(tmp_result == 0,
4615 "DPM is not running right now, no need to disable DPM!",
4616 return 0);
4617
4618 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
4619 PHM_PlatformCaps_ThermalController))
4620 vega10_disable_thermal_protection(hwmgr);
4621
4622 tmp_result = vega10_disable_power_containment(hwmgr);
4623 PP_ASSERT_WITH_CODE((tmp_result == 0),
4624 "Failed to disable power containment!", result = tmp_result);
4625
4626 tmp_result = vega10_avfs_enable(hwmgr, false);
4627 PP_ASSERT_WITH_CODE((tmp_result == 0),
4628 "Failed to disable AVFS!", result = tmp_result);
4629
4630 tmp_result = vega10_stop_dpm(hwmgr, SMC_DPM_FEATURES);
4631 PP_ASSERT_WITH_CODE((tmp_result == 0),
4632 "Failed to stop DPM!", result = tmp_result);
4633
df057e02
RZ
4634 tmp_result = vega10_disable_deep_sleep_master_switch(hwmgr);
4635 PP_ASSERT_WITH_CODE((tmp_result == 0),
4636 "Failed to disable deep sleep!", result = tmp_result);
4637
4022e4f2
RZ
4638 tmp_result = vega10_disable_ulv(hwmgr);
4639 PP_ASSERT_WITH_CODE((tmp_result == 0),
4640 "Failed to disable ulv!", result = tmp_result);
4641
8b9242ed
RZ
4642 return result;
4643}
4644
4645static int vega10_power_off_asic(struct pp_hwmgr *hwmgr)
4646{
4647 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
4648 int result;
4649
4650 result = vega10_disable_dpm_tasks(hwmgr);
4651 PP_ASSERT_WITH_CODE((0 == result),
4652 "[disable_dpm_tasks] Failed to disable DPM!",
4653 );
4654 data->water_marks_bitmap &= ~(WaterMarksLoaded);
4655
4656 return result;
4657}
4658
d6c025d2
EH
4659static void vega10_find_min_clock_index(struct pp_hwmgr *hwmgr,
4660 uint32_t *sclk_idx, uint32_t *mclk_idx,
4661 uint32_t min_sclk, uint32_t min_mclk)
4662{
4663 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
4664 struct vega10_dpm_table *dpm_table = &(data->dpm_table);
4665 uint32_t i;
4666
4667 for (i = 0; i < dpm_table->gfx_table.count; i++) {
4668 if (dpm_table->gfx_table.dpm_levels[i].enabled &&
4669 dpm_table->gfx_table.dpm_levels[i].value >= min_sclk) {
4670 *sclk_idx = i;
4671 break;
4672 }
4673 }
4674
4675 for (i = 0; i < dpm_table->mem_table.count; i++) {
4676 if (dpm_table->mem_table.dpm_levels[i].enabled &&
4677 dpm_table->mem_table.dpm_levels[i].value >= min_mclk) {
4678 *mclk_idx = i;
4679 break;
4680 }
4681 }
4682}
4683
4684static int vega10_set_power_profile_state(struct pp_hwmgr *hwmgr,
4685 struct amd_pp_profile *request)
4686{
4687 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
e0ec4506 4688 uint32_t sclk_idx = ~0, mclk_idx = ~0;
d6c025d2
EH
4689
4690 if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_AUTO)
4691 return -EINVAL;
4692
4693 vega10_find_min_clock_index(hwmgr, &sclk_idx, &mclk_idx,
4694 request->min_sclk, request->min_mclk);
4695
e0ec4506 4696 if (sclk_idx != ~0) {
d6c025d2
EH
4697 if (!data->registry_data.sclk_dpm_key_disabled)
4698 PP_ASSERT_WITH_CODE(
4699 !smum_send_msg_to_smc_with_parameter(
4700 hwmgr->smumgr,
4701 PPSMC_MSG_SetSoftMinGfxclkByIndex,
4702 sclk_idx),
4703 "Failed to set soft min sclk index!",
4704 return -EINVAL);
4705 }
4706
e0ec4506 4707 if (mclk_idx != ~0) {
d6c025d2
EH
4708 if (!data->registry_data.mclk_dpm_key_disabled)
4709 PP_ASSERT_WITH_CODE(
4710 !smum_send_msg_to_smc_with_parameter(
4711 hwmgr->smumgr,
4712 PPSMC_MSG_SetSoftMinUclkByIndex,
4713 mclk_idx),
4714 "Failed to set soft min mclk index!",
4715 return -EINVAL);
4716 }
4717
4718 return 0;
4719}
8b9242ed 4720
f83a9991
EH
4721static const struct pp_hwmgr_func vega10_hwmgr_funcs = {
4722 .backend_init = vega10_hwmgr_backend_init,
4723 .backend_fini = vega10_hwmgr_backend_fini,
4724 .asic_setup = vega10_setup_asic_task,
4725 .dynamic_state_management_enable = vega10_enable_dpm_tasks,
8b9242ed 4726 .dynamic_state_management_disable = vega10_disable_dpm_tasks,
f83a9991
EH
4727 .get_num_of_pp_table_entries =
4728 vega10_get_number_of_powerplay_table_entries,
4729 .get_power_state_size = vega10_get_power_state_size,
4730 .get_pp_table_entry = vega10_get_pp_table_entry,
4731 .patch_boot_state = vega10_patch_boot_state,
4732 .apply_state_adjust_rules = vega10_apply_state_adjust_rules,
4733 .power_state_set = vega10_set_power_state_tasks,
4734 .get_sclk = vega10_dpm_get_sclk,
4735 .get_mclk = vega10_dpm_get_mclk,
4736 .notify_smc_display_config_after_ps_adjustment =
4737 vega10_notify_smc_display_config_after_ps_adjustment,
4738 .force_dpm_level = vega10_dpm_force_dpm_level,
4739 .get_temperature = vega10_thermal_get_temperature,
4740 .stop_thermal_controller = vega10_thermal_stop_thermal_controller,
4741 .get_fan_speed_info = vega10_fan_ctrl_get_fan_speed_info,
4742 .get_fan_speed_percent = vega10_fan_ctrl_get_fan_speed_percent,
4743 .set_fan_speed_percent = vega10_fan_ctrl_set_fan_speed_percent,
4744 .reset_fan_speed_to_default =
4745 vega10_fan_ctrl_reset_fan_speed_to_default,
4746 .get_fan_speed_rpm = vega10_fan_ctrl_get_fan_speed_rpm,
4747 .set_fan_speed_rpm = vega10_fan_ctrl_set_fan_speed_rpm,
4748 .uninitialize_thermal_controller =
4749 vega10_thermal_ctrl_uninitialize_thermal_controller,
4750 .set_fan_control_mode = vega10_set_fan_control_mode,
4751 .get_fan_control_mode = vega10_get_fan_control_mode,
4752 .read_sensor = vega10_read_sensor,
4753 .get_dal_power_level = vega10_get_dal_power_level,
4754 .get_clock_by_type_with_latency = vega10_get_clock_by_type_with_latency,
4755 .get_clock_by_type_with_voltage = vega10_get_clock_by_type_with_voltage,
4756 .set_watermarks_for_clocks_ranges = vega10_set_watermarks_for_clocks_ranges,
4757 .display_clock_voltage_request = vega10_display_clock_voltage_request,
4758 .force_clock_level = vega10_force_clock_level,
4759 .print_clock_levels = vega10_print_clock_levels,
4760 .display_config_changed = vega10_display_configuration_changed_task,
4761 .powergate_uvd = vega10_power_gate_uvd,
4762 .powergate_vce = vega10_power_gate_vce,
4763 .check_states_equal = vega10_check_states_equal,
4764 .check_smc_update_required_for_display_configuration =
4765 vega10_check_smc_update_required_for_display_configuration,
8b9242ed
RZ
4766 .power_off_asic = vega10_power_off_asic,
4767 .disable_smc_firmware_ctf = vega10_thermal_disable_alert,
d6c025d2 4768 .set_power_profile_state = vega10_set_power_profile_state,
f83a9991
EH
4769};
4770
4771int vega10_hwmgr_init(struct pp_hwmgr *hwmgr)
4772{
4773 hwmgr->hwmgr_func = &vega10_hwmgr_funcs;
4774 hwmgr->pptable_func = &vega10_pptable_funcs;
4775 pp_vega10_thermal_initialize(hwmgr);
4776 return 0;
4777}