]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
Merge tag 'for-linus-20170825' of git://git.infradead.org/linux-mtd
[mirror_ubuntu-artful-kernel.git] / drivers / gpu / drm / amd / powerplay / hwmgr / vega10_hwmgr.c
CommitLineData
f83a9991
EH
1/*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
f90dee20
MY
23
24#include <linux/delay.h>
25#include <linux/fb.h>
f83a9991
EH
26#include <linux/module.h>
27#include <linux/slab.h>
f83a9991
EH
28
29#include "hwmgr.h"
30#include "amd_powerplay.h"
31#include "vega10_smumgr.h"
32#include "hardwaremanager.h"
33#include "ppatomfwctrl.h"
34#include "atomfirmware.h"
35#include "cgs_common.h"
36#include "vega10_powertune.h"
37#include "smu9.h"
38#include "smu9_driver_if.h"
39#include "vega10_inc.h"
40#include "pp_soc15.h"
41#include "pppcielanes.h"
42#include "vega10_hwmgr.h"
43#include "vega10_processpptables.h"
44#include "vega10_pptable.h"
45#include "vega10_thermal.h"
46#include "pp_debug.h"
47#include "pp_acpi.h"
48#include "amd_pcie_helpers.h"
49#include "cgs_linux.h"
50#include "ppinterrupt.h"
ab5cf3a5 51#include "pp_overdriver.h"
f83a9991
EH
52
53#define VOLTAGE_SCALE 4
54#define VOLTAGE_VID_OFFSET_SCALE1 625
55#define VOLTAGE_VID_OFFSET_SCALE2 100
56
57#define HBM_MEMORY_CHANNEL_WIDTH 128
58
59uint32_t channel_number[] = {1, 2, 0, 4, 0, 8, 0, 16, 2};
60
61#define MEM_FREQ_LOW_LATENCY 25000
62#define MEM_FREQ_HIGH_LATENCY 80000
63#define MEM_LATENCY_HIGH 245
64#define MEM_LATENCY_LOW 35
65#define MEM_LATENCY_ERR 0xFFFF
66
67#define mmDF_CS_AON0_DramBaseAddress0 0x0044
68#define mmDF_CS_AON0_DramBaseAddress0_BASE_IDX 0
69
70//DF_CS_AON0_DramBaseAddress0
71#define DF_CS_AON0_DramBaseAddress0__AddrRngVal__SHIFT 0x0
72#define DF_CS_AON0_DramBaseAddress0__LgcyMmioHoleEn__SHIFT 0x1
73#define DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT 0x4
74#define DF_CS_AON0_DramBaseAddress0__IntLvAddrSel__SHIFT 0x8
75#define DF_CS_AON0_DramBaseAddress0__DramBaseAddr__SHIFT 0xc
76#define DF_CS_AON0_DramBaseAddress0__AddrRngVal_MASK 0x00000001L
77#define DF_CS_AON0_DramBaseAddress0__LgcyMmioHoleEn_MASK 0x00000002L
78#define DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK 0x000000F0L
79#define DF_CS_AON0_DramBaseAddress0__IntLvAddrSel_MASK 0x00000700L
80#define DF_CS_AON0_DramBaseAddress0__DramBaseAddr_MASK 0xFFFFF000L
81
82const ULONG PhwVega10_Magic = (ULONG)(PHM_VIslands_Magic);
83
84struct vega10_power_state *cast_phw_vega10_power_state(
85 struct pp_hw_power_state *hw_ps)
86{
87 PP_ASSERT_WITH_CODE((PhwVega10_Magic == hw_ps->magic),
88 "Invalid Powerstate Type!",
89 return NULL;);
90
91 return (struct vega10_power_state *)hw_ps;
92}
93
94const struct vega10_power_state *cast_const_phw_vega10_power_state(
95 const struct pp_hw_power_state *hw_ps)
96{
97 PP_ASSERT_WITH_CODE((PhwVega10_Magic == hw_ps->magic),
98 "Invalid Powerstate Type!",
99 return NULL;);
100
101 return (const struct vega10_power_state *)hw_ps;
102}
103
104static void vega10_set_default_registry_data(struct pp_hwmgr *hwmgr)
105{
106 struct vega10_hwmgr *data =
107 (struct vega10_hwmgr *)(hwmgr->backend);
108
109 data->registry_data.sclk_dpm_key_disabled =
110 hwmgr->feature_mask & PP_SCLK_DPM_MASK ? false : true;
111 data->registry_data.socclk_dpm_key_disabled =
112 hwmgr->feature_mask & PP_SOCCLK_DPM_MASK ? false : true;
113 data->registry_data.mclk_dpm_key_disabled =
114 hwmgr->feature_mask & PP_MCLK_DPM_MASK ? false : true;
2d5f5f94
RZ
115 data->registry_data.pcie_dpm_key_disabled =
116 hwmgr->feature_mask & PP_PCIE_DPM_MASK ? false : true;
f83a9991
EH
117
118 data->registry_data.dcefclk_dpm_key_disabled =
119 hwmgr->feature_mask & PP_DCEFCLK_DPM_MASK ? false : true;
120
121 if (hwmgr->feature_mask & PP_POWER_CONTAINMENT_MASK) {
122 data->registry_data.power_containment_support = 1;
123 data->registry_data.enable_pkg_pwr_tracking_feature = 1;
124 data->registry_data.enable_tdc_limit_feature = 1;
125 }
126
afc0255c 127 data->registry_data.clock_stretcher_support =
117a48a7 128 hwmgr->feature_mask & PP_CLOCK_STRETCH_MASK ? true : false;
afc0255c 129
4022e4f2
RZ
130 data->registry_data.ulv_support =
131 hwmgr->feature_mask & PP_ULV_MASK ? true : false;
132
df057e02
RZ
133 data->registry_data.sclk_deep_sleep_support =
134 hwmgr->feature_mask & PP_SCLK_DEEP_SLEEP_MASK ? true : false;
afc0255c 135
f83a9991
EH
136 data->registry_data.disable_water_mark = 0;
137
138 data->registry_data.fan_control_support = 1;
139 data->registry_data.thermal_support = 1;
140 data->registry_data.fw_ctf_enabled = 1;
141
142 data->registry_data.avfs_support = 1;
143 data->registry_data.led_dpm_enabled = 1;
144
145 data->registry_data.vr0hot_enabled = 1;
146 data->registry_data.vr1hot_enabled = 1;
147 data->registry_data.regulator_hot_gpio_support = 1;
148
149 data->display_voltage_mode = PPVEGA10_VEGA10DISPLAYVOLTAGEMODE_DFLT;
150 data->dcef_clk_quad_eqn_a = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
151 data->dcef_clk_quad_eqn_b = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
152 data->dcef_clk_quad_eqn_c = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
153 data->disp_clk_quad_eqn_a = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
154 data->disp_clk_quad_eqn_b = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
155 data->disp_clk_quad_eqn_c = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
156 data->pixel_clk_quad_eqn_a = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
157 data->pixel_clk_quad_eqn_b = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
158 data->pixel_clk_quad_eqn_c = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
159 data->phy_clk_quad_eqn_a = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
160 data->phy_clk_quad_eqn_b = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
161 data->phy_clk_quad_eqn_c = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
162
163 data->gfxclk_average_alpha = PPVEGA10_VEGA10GFXCLKAVERAGEALPHA_DFLT;
164 data->socclk_average_alpha = PPVEGA10_VEGA10SOCCLKAVERAGEALPHA_DFLT;
165 data->uclk_average_alpha = PPVEGA10_VEGA10UCLKCLKAVERAGEALPHA_DFLT;
166 data->gfx_activity_average_alpha = PPVEGA10_VEGA10GFXACTIVITYAVERAGEALPHA_DFLT;
167}
168
169static int vega10_set_features_platform_caps(struct pp_hwmgr *hwmgr)
170{
171 struct vega10_hwmgr *data =
172 (struct vega10_hwmgr *)(hwmgr->backend);
173 struct phm_ppt_v2_information *table_info =
174 (struct phm_ppt_v2_information *)hwmgr->pptable;
175 struct cgs_system_info sys_info = {0};
176 int result;
177
178 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
179 PHM_PlatformCaps_SclkDeepSleep);
180
181 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
182 PHM_PlatformCaps_DynamicPatchPowerState);
183
184 if (data->vddci_control == VEGA10_VOLTAGE_CONTROL_NONE)
185 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
186 PHM_PlatformCaps_ControlVDDCI);
187
188 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
189 PHM_PlatformCaps_TablelessHardwareInterface);
190
191 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
192 PHM_PlatformCaps_EnableSMU7ThermalManagement);
193
194 sys_info.size = sizeof(struct cgs_system_info);
195 sys_info.info_id = CGS_SYSTEM_INFO_PG_FLAGS;
196 result = cgs_query_system_info(hwmgr->device, &sys_info);
197
198 if (!result && (sys_info.value & AMD_PG_SUPPORT_UVD))
199 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
200 PHM_PlatformCaps_UVDPowerGating);
201
202 if (!result && (sys_info.value & AMD_PG_SUPPORT_VCE))
203 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
204 PHM_PlatformCaps_VCEPowerGating);
205
206 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
207 PHM_PlatformCaps_UnTabledHardwareInterface);
208
209 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
210 PHM_PlatformCaps_FanSpeedInTableIsRPM);
211
212 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
213 PHM_PlatformCaps_ODFuzzyFanControlSupport);
214
215 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
216 PHM_PlatformCaps_DynamicPowerManagement);
217
218 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
219 PHM_PlatformCaps_SMC);
220
221 /* power tune caps */
222 /* assume disabled */
223 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
224 PHM_PlatformCaps_PowerContainment);
225 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
226 PHM_PlatformCaps_SQRamping);
227 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
228 PHM_PlatformCaps_DBRamping);
229 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
230 PHM_PlatformCaps_TDRamping);
231 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
232 PHM_PlatformCaps_TCPRamping);
233
234 if (data->registry_data.power_containment_support)
235 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
236 PHM_PlatformCaps_PowerContainment);
237 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
238 PHM_PlatformCaps_CAC);
239
240 if (table_info->tdp_table->usClockStretchAmount &&
241 data->registry_data.clock_stretcher_support)
242 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
243 PHM_PlatformCaps_ClockStretcher);
244
245 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
246 PHM_PlatformCaps_RegulatorHot);
247 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
248 PHM_PlatformCaps_AutomaticDCTransition);
249
250 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
251 PHM_PlatformCaps_UVDDPM);
252 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
253 PHM_PlatformCaps_VCEDPM);
254
255 return 0;
256}
257
258static void vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr)
259{
260 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
261 int i;
262
263 vega10_initialize_power_tune_defaults(hwmgr);
264
265 for (i = 0; i < GNLD_FEATURES_MAX; i++) {
266 data->smu_features[i].smu_feature_id = 0xffff;
267 data->smu_features[i].smu_feature_bitmap = 1 << i;
268 data->smu_features[i].enabled = false;
269 data->smu_features[i].supported = false;
270 }
271
272 data->smu_features[GNLD_DPM_PREFETCHER].smu_feature_id =
273 FEATURE_DPM_PREFETCHER_BIT;
274 data->smu_features[GNLD_DPM_GFXCLK].smu_feature_id =
275 FEATURE_DPM_GFXCLK_BIT;
276 data->smu_features[GNLD_DPM_UCLK].smu_feature_id =
277 FEATURE_DPM_UCLK_BIT;
278 data->smu_features[GNLD_DPM_SOCCLK].smu_feature_id =
279 FEATURE_DPM_SOCCLK_BIT;
280 data->smu_features[GNLD_DPM_UVD].smu_feature_id =
281 FEATURE_DPM_UVD_BIT;
282 data->smu_features[GNLD_DPM_VCE].smu_feature_id =
283 FEATURE_DPM_VCE_BIT;
284 data->smu_features[GNLD_DPM_MP0CLK].smu_feature_id =
285 FEATURE_DPM_MP0CLK_BIT;
286 data->smu_features[GNLD_DPM_LINK].smu_feature_id =
287 FEATURE_DPM_LINK_BIT;
288 data->smu_features[GNLD_DPM_DCEFCLK].smu_feature_id =
289 FEATURE_DPM_DCEFCLK_BIT;
290 data->smu_features[GNLD_ULV].smu_feature_id =
291 FEATURE_ULV_BIT;
292 data->smu_features[GNLD_AVFS].smu_feature_id =
293 FEATURE_AVFS_BIT;
294 data->smu_features[GNLD_DS_GFXCLK].smu_feature_id =
295 FEATURE_DS_GFXCLK_BIT;
296 data->smu_features[GNLD_DS_SOCCLK].smu_feature_id =
297 FEATURE_DS_SOCCLK_BIT;
298 data->smu_features[GNLD_DS_LCLK].smu_feature_id =
299 FEATURE_DS_LCLK_BIT;
300 data->smu_features[GNLD_PPT].smu_feature_id =
301 FEATURE_PPT_BIT;
302 data->smu_features[GNLD_TDC].smu_feature_id =
303 FEATURE_TDC_BIT;
304 data->smu_features[GNLD_THERMAL].smu_feature_id =
305 FEATURE_THERMAL_BIT;
306 data->smu_features[GNLD_GFX_PER_CU_CG].smu_feature_id =
307 FEATURE_GFX_PER_CU_CG_BIT;
308 data->smu_features[GNLD_RM].smu_feature_id =
309 FEATURE_RM_BIT;
310 data->smu_features[GNLD_DS_DCEFCLK].smu_feature_id =
311 FEATURE_DS_DCEFCLK_BIT;
312 data->smu_features[GNLD_ACDC].smu_feature_id =
313 FEATURE_ACDC_BIT;
314 data->smu_features[GNLD_VR0HOT].smu_feature_id =
315 FEATURE_VR0HOT_BIT;
316 data->smu_features[GNLD_VR1HOT].smu_feature_id =
317 FEATURE_VR1HOT_BIT;
318 data->smu_features[GNLD_FW_CTF].smu_feature_id =
319 FEATURE_FW_CTF_BIT;
320 data->smu_features[GNLD_LED_DISPLAY].smu_feature_id =
321 FEATURE_LED_DISPLAY_BIT;
322 data->smu_features[GNLD_FAN_CONTROL].smu_feature_id =
323 FEATURE_FAN_CONTROL_BIT;
324 data->smu_features[GNLD_VOLTAGE_CONTROLLER].smu_feature_id =
325 FEATURE_VOLTAGE_CONTROLLER_BIT;
326
327 if (!data->registry_data.prefetcher_dpm_key_disabled)
328 data->smu_features[GNLD_DPM_PREFETCHER].supported = true;
329
330 if (!data->registry_data.sclk_dpm_key_disabled)
331 data->smu_features[GNLD_DPM_GFXCLK].supported = true;
332
333 if (!data->registry_data.mclk_dpm_key_disabled)
334 data->smu_features[GNLD_DPM_UCLK].supported = true;
335
336 if (!data->registry_data.socclk_dpm_key_disabled)
337 data->smu_features[GNLD_DPM_SOCCLK].supported = true;
338
339 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
340 PHM_PlatformCaps_UVDDPM))
341 data->smu_features[GNLD_DPM_UVD].supported = true;
342
343 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
344 PHM_PlatformCaps_VCEDPM))
345 data->smu_features[GNLD_DPM_VCE].supported = true;
346
347 if (!data->registry_data.pcie_dpm_key_disabled)
348 data->smu_features[GNLD_DPM_LINK].supported = true;
349
350 if (!data->registry_data.dcefclk_dpm_key_disabled)
351 data->smu_features[GNLD_DPM_DCEFCLK].supported = true;
352
353 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
354 PHM_PlatformCaps_SclkDeepSleep) &&
355 data->registry_data.sclk_deep_sleep_support) {
356 data->smu_features[GNLD_DS_GFXCLK].supported = true;
357 data->smu_features[GNLD_DS_SOCCLK].supported = true;
358 data->smu_features[GNLD_DS_LCLK].supported = true;
df057e02 359 data->smu_features[GNLD_DS_DCEFCLK].supported = true;
f83a9991
EH
360 }
361
362 if (data->registry_data.enable_pkg_pwr_tracking_feature)
363 data->smu_features[GNLD_PPT].supported = true;
364
365 if (data->registry_data.enable_tdc_limit_feature)
366 data->smu_features[GNLD_TDC].supported = true;
367
368 if (data->registry_data.thermal_support)
369 data->smu_features[GNLD_THERMAL].supported = true;
370
371 if (data->registry_data.fan_control_support)
372 data->smu_features[GNLD_FAN_CONTROL].supported = true;
373
374 if (data->registry_data.fw_ctf_enabled)
375 data->smu_features[GNLD_FW_CTF].supported = true;
376
377 if (data->registry_data.avfs_support)
378 data->smu_features[GNLD_AVFS].supported = true;
379
380 if (data->registry_data.led_dpm_enabled)
381 data->smu_features[GNLD_LED_DISPLAY].supported = true;
382
383 if (data->registry_data.vr1hot_enabled)
384 data->smu_features[GNLD_VR1HOT].supported = true;
385
386 if (data->registry_data.vr0hot_enabled)
387 data->smu_features[GNLD_VR0HOT].supported = true;
388
389}
390
391#ifdef PPLIB_VEGA10_EVV_SUPPORT
392static int vega10_get_socclk_for_voltage_evv(struct pp_hwmgr *hwmgr,
393 phm_ppt_v1_voltage_lookup_table *lookup_table,
394 uint16_t virtual_voltage_id, int32_t *socclk)
395{
396 uint8_t entry_id;
397 uint8_t voltage_id;
398 struct phm_ppt_v2_information *table_info =
399 (struct phm_ppt_v2_information *)(hwmgr->pptable);
400
401 PP_ASSERT_WITH_CODE(lookup_table->count != 0,
402 "Lookup table is empty",
403 return -EINVAL);
404
405 /* search for leakage voltage ID 0xff01 ~ 0xff08 and sclk */
406 for (entry_id = 0; entry_id < table_info->vdd_dep_on_sclk->count; entry_id++) {
407 voltage_id = table_info->vdd_dep_on_socclk->entries[entry_id].vddInd;
408 if (lookup_table->entries[voltage_id].us_vdd == virtual_voltage_id)
409 break;
410 }
411
412 PP_ASSERT_WITH_CODE(entry_id < table_info->vdd_dep_on_socclk->count,
413 "Can't find requested voltage id in vdd_dep_on_socclk table!",
414 return -EINVAL);
415
416 *socclk = table_info->vdd_dep_on_socclk->entries[entry_id].clk;
417
418 return 0;
419}
420
421#define ATOM_VIRTUAL_VOLTAGE_ID0 0xff01
422/**
423* Get Leakage VDDC based on leakage ID.
424*
425* @param hwmgr the address of the powerplay hardware manager.
426* @return always 0.
427*/
428static int vega10_get_evv_voltages(struct pp_hwmgr *hwmgr)
429{
430 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
431 uint16_t vv_id;
432 uint32_t vddc = 0;
433 uint16_t i, j;
434 uint32_t sclk = 0;
435 struct phm_ppt_v2_information *table_info =
436 (struct phm_ppt_v2_information *)hwmgr->pptable;
437 struct phm_ppt_v1_clock_voltage_dependency_table *socclk_table =
438 table_info->vdd_dep_on_socclk;
439 int result;
440
441 for (i = 0; i < VEGA10_MAX_LEAKAGE_COUNT; i++) {
442 vv_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
443
444 if (!vega10_get_socclk_for_voltage_evv(hwmgr,
445 table_info->vddc_lookup_table, vv_id, &sclk)) {
446 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
447 PHM_PlatformCaps_ClockStretcher)) {
448 for (j = 1; j < socclk_table->count; j++) {
449 if (socclk_table->entries[j].clk == sclk &&
450 socclk_table->entries[j].cks_enable == 0) {
451 sclk += 5000;
452 break;
453 }
454 }
455 }
456
457 PP_ASSERT_WITH_CODE(!atomctrl_get_voltage_evv_on_sclk_ai(hwmgr,
458 VOLTAGE_TYPE_VDDC, sclk, vv_id, &vddc),
459 "Error retrieving EVV voltage value!",
460 continue);
461
462
463 /* need to make sure vddc is less than 2v or else, it could burn the ASIC. */
464 PP_ASSERT_WITH_CODE((vddc < 2000 && vddc != 0),
465 "Invalid VDDC value", result = -EINVAL;);
466
467 /* the voltage should not be zero nor equal to leakage ID */
468 if (vddc != 0 && vddc != vv_id) {
469 data->vddc_leakage.actual_voltage[data->vddc_leakage.count] = (uint16_t)(vddc/100);
470 data->vddc_leakage.leakage_id[data->vddc_leakage.count] = vv_id;
471 data->vddc_leakage.count++;
472 }
473 }
474 }
475
476 return 0;
477}
478
479/**
480 * Change virtual leakage voltage to actual value.
481 *
482 * @param hwmgr the address of the powerplay hardware manager.
483 * @param pointer to changing voltage
484 * @param pointer to leakage table
485 */
486static void vega10_patch_with_vdd_leakage(struct pp_hwmgr *hwmgr,
487 uint16_t *voltage, struct vega10_leakage_voltage *leakage_table)
488{
489 uint32_t index;
490
491 /* search for leakage voltage ID 0xff01 ~ 0xff08 */
492 for (index = 0; index < leakage_table->count; index++) {
493 /* if this voltage matches a leakage voltage ID */
494 /* patch with actual leakage voltage */
495 if (leakage_table->leakage_id[index] == *voltage) {
496 *voltage = leakage_table->actual_voltage[index];
497 break;
498 }
499 }
500
501 if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0)
502 pr_info("Voltage value looks like a Leakage ID \
503 but it's not patched\n");
504}
505
506/**
507* Patch voltage lookup table by EVV leakages.
508*
509* @param hwmgr the address of the powerplay hardware manager.
510* @param pointer to voltage lookup table
511* @param pointer to leakage table
512* @return always 0
513*/
514static int vega10_patch_lookup_table_with_leakage(struct pp_hwmgr *hwmgr,
515 phm_ppt_v1_voltage_lookup_table *lookup_table,
516 struct vega10_leakage_voltage *leakage_table)
517{
518 uint32_t i;
519
520 for (i = 0; i < lookup_table->count; i++)
521 vega10_patch_with_vdd_leakage(hwmgr,
522 &lookup_table->entries[i].us_vdd, leakage_table);
523
524 return 0;
525}
526
527static int vega10_patch_clock_voltage_limits_with_vddc_leakage(
528 struct pp_hwmgr *hwmgr, struct vega10_leakage_voltage *leakage_table,
529 uint16_t *vddc)
530{
531 vega10_patch_with_vdd_leakage(hwmgr, (uint16_t *)vddc, leakage_table);
532
533 return 0;
534}
535#endif
536
537static int vega10_patch_voltage_dependency_tables_with_lookup_table(
538 struct pp_hwmgr *hwmgr)
539{
540 uint8_t entry_id;
541 uint8_t voltage_id;
542 struct phm_ppt_v2_information *table_info =
543 (struct phm_ppt_v2_information *)(hwmgr->pptable);
544 struct phm_ppt_v1_clock_voltage_dependency_table *socclk_table =
545 table_info->vdd_dep_on_socclk;
546 struct phm_ppt_v1_clock_voltage_dependency_table *gfxclk_table =
547 table_info->vdd_dep_on_sclk;
548 struct phm_ppt_v1_clock_voltage_dependency_table *dcefclk_table =
549 table_info->vdd_dep_on_dcefclk;
550 struct phm_ppt_v1_clock_voltage_dependency_table *pixclk_table =
551 table_info->vdd_dep_on_pixclk;
552 struct phm_ppt_v1_clock_voltage_dependency_table *dspclk_table =
553 table_info->vdd_dep_on_dispclk;
554 struct phm_ppt_v1_clock_voltage_dependency_table *phyclk_table =
555 table_info->vdd_dep_on_phyclk;
556 struct phm_ppt_v1_clock_voltage_dependency_table *mclk_table =
557 table_info->vdd_dep_on_mclk;
558 struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
559 table_info->mm_dep_table;
560
561 for (entry_id = 0; entry_id < socclk_table->count; entry_id++) {
562 voltage_id = socclk_table->entries[entry_id].vddInd;
563 socclk_table->entries[entry_id].vddc =
564 table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
565 }
566
567 for (entry_id = 0; entry_id < gfxclk_table->count; entry_id++) {
568 voltage_id = gfxclk_table->entries[entry_id].vddInd;
569 gfxclk_table->entries[entry_id].vddc =
570 table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
571 }
572
573 for (entry_id = 0; entry_id < dcefclk_table->count; entry_id++) {
574 voltage_id = dcefclk_table->entries[entry_id].vddInd;
575 dcefclk_table->entries[entry_id].vddc =
576 table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
577 }
578
579 for (entry_id = 0; entry_id < pixclk_table->count; entry_id++) {
580 voltage_id = pixclk_table->entries[entry_id].vddInd;
581 pixclk_table->entries[entry_id].vddc =
582 table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
583 }
584
585 for (entry_id = 0; entry_id < dspclk_table->count; entry_id++) {
586 voltage_id = dspclk_table->entries[entry_id].vddInd;
587 dspclk_table->entries[entry_id].vddc =
588 table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
589 }
590
591 for (entry_id = 0; entry_id < phyclk_table->count; entry_id++) {
592 voltage_id = phyclk_table->entries[entry_id].vddInd;
593 phyclk_table->entries[entry_id].vddc =
594 table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
595 }
596
597 for (entry_id = 0; entry_id < mclk_table->count; ++entry_id) {
598 voltage_id = mclk_table->entries[entry_id].vddInd;
599 mclk_table->entries[entry_id].vddc =
600 table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
601 voltage_id = mclk_table->entries[entry_id].vddciInd;
602 mclk_table->entries[entry_id].vddci =
603 table_info->vddci_lookup_table->entries[voltage_id].us_vdd;
604 voltage_id = mclk_table->entries[entry_id].mvddInd;
605 mclk_table->entries[entry_id].mvdd =
606 table_info->vddmem_lookup_table->entries[voltage_id].us_vdd;
607 }
608
609 for (entry_id = 0; entry_id < mm_table->count; ++entry_id) {
610 voltage_id = mm_table->entries[entry_id].vddcInd;
611 mm_table->entries[entry_id].vddc =
612 table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
613 }
614
615 return 0;
616
617}
618
619static int vega10_sort_lookup_table(struct pp_hwmgr *hwmgr,
620 struct phm_ppt_v1_voltage_lookup_table *lookup_table)
621{
622 uint32_t table_size, i, j;
623 struct phm_ppt_v1_voltage_lookup_record tmp_voltage_lookup_record;
624
625 PP_ASSERT_WITH_CODE(lookup_table && lookup_table->count,
626 "Lookup table is empty", return -EINVAL);
627
628 table_size = lookup_table->count;
629
630 /* Sorting voltages */
631 for (i = 0; i < table_size - 1; i++) {
632 for (j = i + 1; j > 0; j--) {
633 if (lookup_table->entries[j].us_vdd <
634 lookup_table->entries[j - 1].us_vdd) {
635 tmp_voltage_lookup_record = lookup_table->entries[j - 1];
636 lookup_table->entries[j - 1] = lookup_table->entries[j];
637 lookup_table->entries[j] = tmp_voltage_lookup_record;
638 }
639 }
640 }
641
642 return 0;
643}
644
645static int vega10_complete_dependency_tables(struct pp_hwmgr *hwmgr)
646{
647 int result = 0;
648 int tmp_result;
649 struct phm_ppt_v2_information *table_info =
650 (struct phm_ppt_v2_information *)(hwmgr->pptable);
651#ifdef PPLIB_VEGA10_EVV_SUPPORT
652 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
653
654 tmp_result = vega10_patch_lookup_table_with_leakage(hwmgr,
655 table_info->vddc_lookup_table, &(data->vddc_leakage));
656 if (tmp_result)
657 result = tmp_result;
658
659 tmp_result = vega10_patch_clock_voltage_limits_with_vddc_leakage(hwmgr,
660 &(data->vddc_leakage), &table_info->max_clock_voltage_on_dc.vddc);
661 if (tmp_result)
662 result = tmp_result;
663#endif
664
665 tmp_result = vega10_patch_voltage_dependency_tables_with_lookup_table(hwmgr);
666 if (tmp_result)
667 result = tmp_result;
668
669 tmp_result = vega10_sort_lookup_table(hwmgr, table_info->vddc_lookup_table);
670 if (tmp_result)
671 result = tmp_result;
672
673 return result;
674}
675
676static int vega10_set_private_data_based_on_pptable(struct pp_hwmgr *hwmgr)
677{
678 struct phm_ppt_v2_information *table_info =
679 (struct phm_ppt_v2_information *)(hwmgr->pptable);
680 struct phm_ppt_v1_clock_voltage_dependency_table *allowed_sclk_vdd_table =
681 table_info->vdd_dep_on_socclk;
682 struct phm_ppt_v1_clock_voltage_dependency_table *allowed_mclk_vdd_table =
683 table_info->vdd_dep_on_mclk;
684
685 PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table,
686 "VDD dependency on SCLK table is missing. \
687 This table is mandatory", return -EINVAL);
688 PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table->count >= 1,
689 "VDD dependency on SCLK table is empty. \
690 This table is mandatory", return -EINVAL);
691
692 PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table,
693 "VDD dependency on MCLK table is missing. \
694 This table is mandatory", return -EINVAL);
695 PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table->count >= 1,
696 "VDD dependency on MCLK table is empty. \
697 This table is mandatory", return -EINVAL);
698
699 table_info->max_clock_voltage_on_ac.sclk =
700 allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].clk;
701 table_info->max_clock_voltage_on_ac.mclk =
702 allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].clk;
703 table_info->max_clock_voltage_on_ac.vddc =
704 allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].vddc;
705 table_info->max_clock_voltage_on_ac.vddci =
706 allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].vddci;
707
708 hwmgr->dyn_state.max_clock_voltage_on_ac.sclk =
709 table_info->max_clock_voltage_on_ac.sclk;
710 hwmgr->dyn_state.max_clock_voltage_on_ac.mclk =
711 table_info->max_clock_voltage_on_ac.mclk;
712 hwmgr->dyn_state.max_clock_voltage_on_ac.vddc =
713 table_info->max_clock_voltage_on_ac.vddc;
714 hwmgr->dyn_state.max_clock_voltage_on_ac.vddci =
715 table_info->max_clock_voltage_on_ac.vddci;
716
717 return 0;
718}
719
720static int vega10_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
721{
722 kfree(hwmgr->dyn_state.vddc_dep_on_dal_pwrl);
723 hwmgr->dyn_state.vddc_dep_on_dal_pwrl = NULL;
724
725 kfree(hwmgr->backend);
726 hwmgr->backend = NULL;
727
728 return 0;
729}
730
731static int vega10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
732{
733 int result = 0;
734 struct vega10_hwmgr *data;
735 uint32_t config_telemetry = 0;
736 struct pp_atomfwctrl_voltage_table vol_table;
737 struct cgs_system_info sys_info = {0};
738
739 data = kzalloc(sizeof(struct vega10_hwmgr), GFP_KERNEL);
740 if (data == NULL)
741 return -ENOMEM;
742
743 hwmgr->backend = data;
744
745 vega10_set_default_registry_data(hwmgr);
746
747 data->disable_dpm_mask = 0xff;
748 data->workload_mask = 0xff;
749
750 /* need to set voltage control types before EVV patching */
751 data->vddc_control = VEGA10_VOLTAGE_CONTROL_NONE;
752 data->mvdd_control = VEGA10_VOLTAGE_CONTROL_NONE;
753 data->vddci_control = VEGA10_VOLTAGE_CONTROL_NONE;
754
755 /* VDDCR_SOC */
756 if (pp_atomfwctrl_is_voltage_controlled_by_gpio_v4(hwmgr,
757 VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2)) {
758 if (!pp_atomfwctrl_get_voltage_table_v4(hwmgr,
759 VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2,
760 &vol_table)) {
761 config_telemetry = ((vol_table.telemetry_slope << 8) & 0xff00) |
762 (vol_table.telemetry_offset & 0xff);
763 data->vddc_control = VEGA10_VOLTAGE_CONTROL_BY_SVID2;
764 }
765 } else {
766 kfree(hwmgr->backend);
767 hwmgr->backend = NULL;
768 PP_ASSERT_WITH_CODE(false,
769 "VDDCR_SOC is not SVID2!",
770 return -1);
771 }
772
773 /* MVDDC */
774 if (pp_atomfwctrl_is_voltage_controlled_by_gpio_v4(hwmgr,
775 VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2)) {
776 if (!pp_atomfwctrl_get_voltage_table_v4(hwmgr,
777 VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2,
778 &vol_table)) {
779 config_telemetry |=
780 ((vol_table.telemetry_slope << 24) & 0xff000000) |
781 ((vol_table.telemetry_offset << 16) & 0xff0000);
782 data->mvdd_control = VEGA10_VOLTAGE_CONTROL_BY_SVID2;
783 }
784 }
785
786 /* VDDCI_MEM */
787 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
788 PHM_PlatformCaps_ControlVDDCI)) {
789 if (pp_atomfwctrl_is_voltage_controlled_by_gpio_v4(hwmgr,
790 VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT))
791 data->vddci_control = VEGA10_VOLTAGE_CONTROL_BY_GPIO;
792 }
793
794 data->config_telemetry = config_telemetry;
795
796 vega10_set_features_platform_caps(hwmgr);
797
798 vega10_init_dpm_defaults(hwmgr);
799
800#ifdef PPLIB_VEGA10_EVV_SUPPORT
801 /* Get leakage voltage based on leakage ID. */
802 PP_ASSERT_WITH_CODE(!vega10_get_evv_voltages(hwmgr),
803 "Get EVV Voltage Failed. Abort Driver loading!",
804 return -1);
805#endif
806
807 /* Patch our voltage dependency table with actual leakage voltage
808 * We need to perform leakage translation before it's used by other functions
809 */
810 vega10_complete_dependency_tables(hwmgr);
811
812 /* Parse pptable data read from VBIOS */
813 vega10_set_private_data_based_on_pptable(hwmgr);
814
815 data->is_tlu_enabled = false;
816
817 hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
818 VEGA10_MAX_HARDWARE_POWERLEVELS;
819 hwmgr->platform_descriptor.hardwarePerformanceLevels = 2;
820 hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50;
821
822 hwmgr->platform_descriptor.vbiosInterruptId = 0x20000400; /* IRQ_SOURCE1_SW_INT */
823 /* The true clock step depends on the frequency, typically 4.5 or 9 MHz. Here we use 5. */
824 hwmgr->platform_descriptor.clockStep.engineClock = 500;
825 hwmgr->platform_descriptor.clockStep.memoryClock = 500;
826
827 sys_info.size = sizeof(struct cgs_system_info);
828 sys_info.info_id = CGS_SYSTEM_INFO_GFX_CU_INFO;
829 result = cgs_query_system_info(hwmgr->device, &sys_info);
830 data->total_active_cus = sys_info.value;
831 /* Setup default Overdrive Fan control settings */
832 data->odn_fan_table.target_fan_speed =
833 hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM;
834 data->odn_fan_table.target_temperature =
835 hwmgr->thermal_controller.
836 advanceFanControlParameters.ucTargetTemperature;
837 data->odn_fan_table.min_performance_clock =
838 hwmgr->thermal_controller.advanceFanControlParameters.
839 ulMinFanSCLKAcousticLimit;
840 data->odn_fan_table.min_fan_limit =
841 hwmgr->thermal_controller.
842 advanceFanControlParameters.usFanPWMMinLimit *
843 hwmgr->thermal_controller.fanInfo.ulMaxRPM / 100;
844
845 return result;
846}
847
848static int vega10_init_sclk_threshold(struct pp_hwmgr *hwmgr)
849{
850 struct vega10_hwmgr *data =
851 (struct vega10_hwmgr *)(hwmgr->backend);
852
853 data->low_sclk_interrupt_threshold = 0;
854
855 return 0;
856}
857
858static int vega10_setup_dpm_led_config(struct pp_hwmgr *hwmgr)
859{
860 struct vega10_hwmgr *data =
861 (struct vega10_hwmgr *)(hwmgr->backend);
862 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
863
864 struct pp_atomfwctrl_voltage_table table;
865 uint8_t i, j;
866 uint32_t mask = 0;
867 uint32_t tmp;
868 int32_t ret = 0;
869
870 ret = pp_atomfwctrl_get_voltage_table_v4(hwmgr, VOLTAGE_TYPE_LEDDPM,
871 VOLTAGE_OBJ_GPIO_LUT, &table);
872
873 if (!ret) {
874 tmp = table.mask_low;
875 for (i = 0, j = 0; i < 32; i++) {
876 if (tmp & 1) {
877 mask |= (uint32_t)(i << (8 * j));
878 if (++j >= 3)
879 break;
880 }
881 tmp >>= 1;
882 }
883 }
884
885 pp_table->LedPin0 = (uint8_t)(mask & 0xff);
886 pp_table->LedPin1 = (uint8_t)((mask >> 8) & 0xff);
887 pp_table->LedPin2 = (uint8_t)((mask >> 16) & 0xff);
888 return 0;
889}
890
891static int vega10_setup_asic_task(struct pp_hwmgr *hwmgr)
892{
893 PP_ASSERT_WITH_CODE(!vega10_init_sclk_threshold(hwmgr),
894 "Failed to init sclk threshold!",
895 return -EINVAL);
896
897 PP_ASSERT_WITH_CODE(!vega10_setup_dpm_led_config(hwmgr),
898 "Failed to set up led dpm config!",
899 return -EINVAL);
900
901 return 0;
902}
903
904static bool vega10_is_dpm_running(struct pp_hwmgr *hwmgr)
905{
906 uint32_t features_enabled;
907
908 if (!vega10_get_smc_features(hwmgr->smumgr, &features_enabled)) {
909 if (features_enabled & SMC_DPM_FEATURES)
910 return true;
911 }
912 return false;
913}
914
915/**
916* Remove repeated voltage values and create table with unique values.
917*
918* @param hwmgr the address of the powerplay hardware manager.
919* @param vol_table the pointer to changing voltage table
920* @return 0 in success
921*/
922
923static int vega10_trim_voltage_table(struct pp_hwmgr *hwmgr,
924 struct pp_atomfwctrl_voltage_table *vol_table)
925{
926 uint32_t i, j;
927 uint16_t vvalue;
928 bool found = false;
929 struct pp_atomfwctrl_voltage_table *table;
930
931 PP_ASSERT_WITH_CODE(vol_table,
932 "Voltage Table empty.", return -EINVAL);
933 table = kzalloc(sizeof(struct pp_atomfwctrl_voltage_table),
934 GFP_KERNEL);
935
936 if (!table)
937 return -ENOMEM;
938
939 table->mask_low = vol_table->mask_low;
940 table->phase_delay = vol_table->phase_delay;
941
942 for (i = 0; i < vol_table->count; i++) {
943 vvalue = vol_table->entries[i].value;
944 found = false;
945
946 for (j = 0; j < table->count; j++) {
947 if (vvalue == table->entries[j].value) {
948 found = true;
949 break;
950 }
951 }
952
953 if (!found) {
954 table->entries[table->count].value = vvalue;
955 table->entries[table->count].smio_low =
956 vol_table->entries[i].smio_low;
957 table->count++;
958 }
959 }
960
961 memcpy(vol_table, table, sizeof(struct pp_atomfwctrl_voltage_table));
962 kfree(table);
963
964 return 0;
965}
966
967static int vega10_get_mvdd_voltage_table(struct pp_hwmgr *hwmgr,
968 phm_ppt_v1_clock_voltage_dependency_table *dep_table,
969 struct pp_atomfwctrl_voltage_table *vol_table)
970{
971 int i;
972
973 PP_ASSERT_WITH_CODE(dep_table->count,
974 "Voltage Dependency Table empty.",
975 return -EINVAL);
976
977 vol_table->mask_low = 0;
978 vol_table->phase_delay = 0;
979 vol_table->count = dep_table->count;
980
981 for (i = 0; i < vol_table->count; i++) {
982 vol_table->entries[i].value = dep_table->entries[i].mvdd;
983 vol_table->entries[i].smio_low = 0;
984 }
985
986 PP_ASSERT_WITH_CODE(!vega10_trim_voltage_table(hwmgr,
987 vol_table),
988 "Failed to trim MVDD Table!",
989 return -1);
990
991 return 0;
992}
993
994static int vega10_get_vddci_voltage_table(struct pp_hwmgr *hwmgr,
995 phm_ppt_v1_clock_voltage_dependency_table *dep_table,
996 struct pp_atomfwctrl_voltage_table *vol_table)
997{
998 uint32_t i;
999
1000 PP_ASSERT_WITH_CODE(dep_table->count,
1001 "Voltage Dependency Table empty.",
1002 return -EINVAL);
1003
1004 vol_table->mask_low = 0;
1005 vol_table->phase_delay = 0;
1006 vol_table->count = dep_table->count;
1007
1008 for (i = 0; i < dep_table->count; i++) {
1009 vol_table->entries[i].value = dep_table->entries[i].vddci;
1010 vol_table->entries[i].smio_low = 0;
1011 }
1012
1013 PP_ASSERT_WITH_CODE(!vega10_trim_voltage_table(hwmgr, vol_table),
1014 "Failed to trim VDDCI table.",
1015 return -1);
1016
1017 return 0;
1018}
1019
1020static int vega10_get_vdd_voltage_table(struct pp_hwmgr *hwmgr,
1021 phm_ppt_v1_clock_voltage_dependency_table *dep_table,
1022 struct pp_atomfwctrl_voltage_table *vol_table)
1023{
1024 int i;
1025
1026 PP_ASSERT_WITH_CODE(dep_table->count,
1027 "Voltage Dependency Table empty.",
1028 return -EINVAL);
1029
1030 vol_table->mask_low = 0;
1031 vol_table->phase_delay = 0;
1032 vol_table->count = dep_table->count;
1033
1034 for (i = 0; i < vol_table->count; i++) {
1035 vol_table->entries[i].value = dep_table->entries[i].vddc;
1036 vol_table->entries[i].smio_low = 0;
1037 }
1038
1039 return 0;
1040}
1041
1042/* ---- Voltage Tables ----
1043 * If the voltage table would be bigger than
1044 * what will fit into the state table on
1045 * the SMC keep only the higher entries.
1046 */
1047static void vega10_trim_voltage_table_to_fit_state_table(
1048 struct pp_hwmgr *hwmgr,
1049 uint32_t max_vol_steps,
1050 struct pp_atomfwctrl_voltage_table *vol_table)
1051{
1052 unsigned int i, diff;
1053
1054 if (vol_table->count <= max_vol_steps)
1055 return;
1056
1057 diff = vol_table->count - max_vol_steps;
1058
1059 for (i = 0; i < max_vol_steps; i++)
1060 vol_table->entries[i] = vol_table->entries[i + diff];
1061
1062 vol_table->count = max_vol_steps;
1063}
1064
1065/**
1066* Create Voltage Tables.
1067*
1068* @param hwmgr the address of the powerplay hardware manager.
1069* @return always 0
1070*/
1071static int vega10_construct_voltage_tables(struct pp_hwmgr *hwmgr)
1072{
1073 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
1074 struct phm_ppt_v2_information *table_info =
1075 (struct phm_ppt_v2_information *)hwmgr->pptable;
1076 int result;
1077
1078 if (data->mvdd_control == VEGA10_VOLTAGE_CONTROL_BY_SVID2 ||
1079 data->mvdd_control == VEGA10_VOLTAGE_CONTROL_NONE) {
1080 result = vega10_get_mvdd_voltage_table(hwmgr,
1081 table_info->vdd_dep_on_mclk,
1082 &(data->mvdd_voltage_table));
1083 PP_ASSERT_WITH_CODE(!result,
1084 "Failed to retrieve MVDDC table!",
1085 return result);
1086 }
1087
1088 if (data->vddci_control == VEGA10_VOLTAGE_CONTROL_NONE) {
1089 result = vega10_get_vddci_voltage_table(hwmgr,
1090 table_info->vdd_dep_on_mclk,
1091 &(data->vddci_voltage_table));
1092 PP_ASSERT_WITH_CODE(!result,
1093 "Failed to retrieve VDDCI_MEM table!",
1094 return result);
1095 }
1096
1097 if (data->vddc_control == VEGA10_VOLTAGE_CONTROL_BY_SVID2 ||
1098 data->vddc_control == VEGA10_VOLTAGE_CONTROL_NONE) {
1099 result = vega10_get_vdd_voltage_table(hwmgr,
1100 table_info->vdd_dep_on_sclk,
1101 &(data->vddc_voltage_table));
1102 PP_ASSERT_WITH_CODE(!result,
1103 "Failed to retrieve VDDCR_SOC table!",
1104 return result);
1105 }
1106
1107 PP_ASSERT_WITH_CODE(data->vddc_voltage_table.count <= 16,
1108 "Too many voltage values for VDDC. Trimming to fit state table.",
1109 vega10_trim_voltage_table_to_fit_state_table(hwmgr,
1110 16, &(data->vddc_voltage_table)));
1111
1112 PP_ASSERT_WITH_CODE(data->vddci_voltage_table.count <= 16,
1113 "Too many voltage values for VDDCI. Trimming to fit state table.",
1114 vega10_trim_voltage_table_to_fit_state_table(hwmgr,
1115 16, &(data->vddci_voltage_table)));
1116
1117 PP_ASSERT_WITH_CODE(data->mvdd_voltage_table.count <= 16,
1118 "Too many voltage values for MVDD. Trimming to fit state table.",
1119 vega10_trim_voltage_table_to_fit_state_table(hwmgr,
1120 16, &(data->mvdd_voltage_table)));
1121
1122
1123 return 0;
1124}
1125
1126/*
1127 * @fn vega10_init_dpm_state
1128 * @brief Function to initialize all Soft Min/Max and Hard Min/Max to 0xff.
1129 *
1130 * @param dpm_state - the address of the DPM Table to initiailize.
1131 * @return None.
1132 */
1133static void vega10_init_dpm_state(struct vega10_dpm_state *dpm_state)
1134{
1135 dpm_state->soft_min_level = 0xff;
1136 dpm_state->soft_max_level = 0xff;
1137 dpm_state->hard_min_level = 0xff;
1138 dpm_state->hard_max_level = 0xff;
1139}
1140
1141static void vega10_setup_default_single_dpm_table(struct pp_hwmgr *hwmgr,
1142 struct vega10_single_dpm_table *dpm_table,
1143 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table)
1144{
1145 int i;
1146
1147 for (i = 0; i < dep_table->count; i++) {
b7a1f0e3 1148 if (i == 0 || dpm_table->dpm_levels[dpm_table->count - 1].value <=
f83a9991
EH
1149 dep_table->entries[i].clk) {
1150 dpm_table->dpm_levels[dpm_table->count].value =
1151 dep_table->entries[i].clk;
1152 dpm_table->dpm_levels[dpm_table->count].enabled = true;
1153 dpm_table->count++;
1154 }
1155 }
1156}
1157static int vega10_setup_default_pcie_table(struct pp_hwmgr *hwmgr)
1158{
1159 struct vega10_hwmgr *data =
1160 (struct vega10_hwmgr *)(hwmgr->backend);
1161 struct vega10_pcie_table *pcie_table = &(data->dpm_table.pcie_table);
1162 struct phm_ppt_v2_information *table_info =
1163 (struct phm_ppt_v2_information *)(hwmgr->pptable);
1164 struct phm_ppt_v1_pcie_table *bios_pcie_table =
1165 table_info->pcie_table;
1166 uint32_t i;
1167
1168 PP_ASSERT_WITH_CODE(bios_pcie_table->count,
1169 "Incorrect number of PCIE States from VBIOS!",
1170 return -1);
1171
b6dc60cf 1172 for (i = 0; i < NUM_LINK_LEVELS; i++) {
f83a9991
EH
1173 if (data->registry_data.pcieSpeedOverride)
1174 pcie_table->pcie_gen[i] =
1175 data->registry_data.pcieSpeedOverride;
1176 else
1177 pcie_table->pcie_gen[i] =
1178 bios_pcie_table->entries[i].gen_speed;
1179
1180 if (data->registry_data.pcieLaneOverride)
676b4087
RZ
1181 pcie_table->pcie_lane[i] = (uint8_t)encode_pcie_lane_width(
1182 data->registry_data.pcieLaneOverride);
f83a9991 1183 else
676b4087
RZ
1184 pcie_table->pcie_lane[i] = (uint8_t)encode_pcie_lane_width(
1185 bios_pcie_table->entries[i].lane_width);
f83a9991
EH
1186 if (data->registry_data.pcieClockOverride)
1187 pcie_table->lclk[i] =
1188 data->registry_data.pcieClockOverride;
1189 else
1190 pcie_table->lclk[i] =
1191 bios_pcie_table->entries[i].pcie_sclk;
f83a9991
EH
1192 }
1193
00c4855e 1194 pcie_table->count = NUM_LINK_LEVELS;
f83a9991
EH
1195
1196 return 0;
1197}
1198
1199/*
1200 * This function is to initialize all DPM state tables
1201 * for SMU based on the dependency table.
1202 * Dynamic state patching function will then trim these
1203 * state tables to the allowed range based
1204 * on the power policy or external client requests,
1205 * such as UVD request, etc.
1206 */
1207static int vega10_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
1208{
1209 struct vega10_hwmgr *data =
1210 (struct vega10_hwmgr *)(hwmgr->backend);
1211 struct phm_ppt_v2_information *table_info =
1212 (struct phm_ppt_v2_information *)(hwmgr->pptable);
1213 struct vega10_single_dpm_table *dpm_table;
1214 uint32_t i;
1215
1216 struct phm_ppt_v1_clock_voltage_dependency_table *dep_soc_table =
1217 table_info->vdd_dep_on_socclk;
1218 struct phm_ppt_v1_clock_voltage_dependency_table *dep_gfx_table =
1219 table_info->vdd_dep_on_sclk;
1220 struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table =
1221 table_info->vdd_dep_on_mclk;
1222 struct phm_ppt_v1_mm_clock_voltage_dependency_table *dep_mm_table =
1223 table_info->mm_dep_table;
1224 struct phm_ppt_v1_clock_voltage_dependency_table *dep_dcef_table =
1225 table_info->vdd_dep_on_dcefclk;
1226 struct phm_ppt_v1_clock_voltage_dependency_table *dep_pix_table =
1227 table_info->vdd_dep_on_pixclk;
1228 struct phm_ppt_v1_clock_voltage_dependency_table *dep_disp_table =
1229 table_info->vdd_dep_on_dispclk;
1230 struct phm_ppt_v1_clock_voltage_dependency_table *dep_phy_table =
1231 table_info->vdd_dep_on_phyclk;
1232
1233 PP_ASSERT_WITH_CODE(dep_soc_table,
1234 "SOCCLK dependency table is missing. This table is mandatory",
1235 return -EINVAL);
1236 PP_ASSERT_WITH_CODE(dep_soc_table->count >= 1,
1237 "SOCCLK dependency table is empty. This table is mandatory",
1238 return -EINVAL);
1239
1240 PP_ASSERT_WITH_CODE(dep_gfx_table,
1241 "GFXCLK dependency table is missing. This table is mandatory",
1242 return -EINVAL);
1243 PP_ASSERT_WITH_CODE(dep_gfx_table->count >= 1,
1244 "GFXCLK dependency table is empty. This table is mandatory",
1245 return -EINVAL);
1246
1247 PP_ASSERT_WITH_CODE(dep_mclk_table,
1248 "MCLK dependency table is missing. This table is mandatory",
1249 return -EINVAL);
1250 PP_ASSERT_WITH_CODE(dep_mclk_table->count >= 1,
1251 "MCLK dependency table has to have is missing. This table is mandatory",
1252 return -EINVAL);
1253
1254 /* Initialize Sclk DPM table based on allow Sclk values */
1255 data->dpm_table.soc_table.count = 0;
1256 data->dpm_table.gfx_table.count = 0;
1257 data->dpm_table.dcef_table.count = 0;
1258
1259 dpm_table = &(data->dpm_table.soc_table);
1260 vega10_setup_default_single_dpm_table(hwmgr,
1261 dpm_table,
1262 dep_soc_table);
1263
1264 vega10_init_dpm_state(&(dpm_table->dpm_state));
1265
1266 dpm_table = &(data->dpm_table.gfx_table);
1267 vega10_setup_default_single_dpm_table(hwmgr,
1268 dpm_table,
1269 dep_gfx_table);
1270 vega10_init_dpm_state(&(dpm_table->dpm_state));
1271
1272 /* Initialize Mclk DPM table based on allow Mclk values */
1273 data->dpm_table.mem_table.count = 0;
1274 dpm_table = &(data->dpm_table.mem_table);
1275 vega10_setup_default_single_dpm_table(hwmgr,
1276 dpm_table,
1277 dep_mclk_table);
1278 vega10_init_dpm_state(&(dpm_table->dpm_state));
1279
1280 data->dpm_table.eclk_table.count = 0;
1281 dpm_table = &(data->dpm_table.eclk_table);
1282 for (i = 0; i < dep_mm_table->count; i++) {
1283 if (i == 0 || dpm_table->dpm_levels
b7a1f0e3 1284 [dpm_table->count - 1].value <=
f83a9991
EH
1285 dep_mm_table->entries[i].eclk) {
1286 dpm_table->dpm_levels[dpm_table->count].value =
1287 dep_mm_table->entries[i].eclk;
1288 dpm_table->dpm_levels[dpm_table->count].enabled =
1289 (i == 0) ? true : false;
1290 dpm_table->count++;
1291 }
1292 }
1293 vega10_init_dpm_state(&(dpm_table->dpm_state));
1294
1295 data->dpm_table.vclk_table.count = 0;
1296 data->dpm_table.dclk_table.count = 0;
1297 dpm_table = &(data->dpm_table.vclk_table);
1298 for (i = 0; i < dep_mm_table->count; i++) {
1299 if (i == 0 || dpm_table->dpm_levels
b7a1f0e3 1300 [dpm_table->count - 1].value <=
f83a9991
EH
1301 dep_mm_table->entries[i].vclk) {
1302 dpm_table->dpm_levels[dpm_table->count].value =
1303 dep_mm_table->entries[i].vclk;
1304 dpm_table->dpm_levels[dpm_table->count].enabled =
1305 (i == 0) ? true : false;
1306 dpm_table->count++;
1307 }
1308 }
1309 vega10_init_dpm_state(&(dpm_table->dpm_state));
1310
1311 dpm_table = &(data->dpm_table.dclk_table);
1312 for (i = 0; i < dep_mm_table->count; i++) {
1313 if (i == 0 || dpm_table->dpm_levels
b7a1f0e3 1314 [dpm_table->count - 1].value <=
f83a9991
EH
1315 dep_mm_table->entries[i].dclk) {
1316 dpm_table->dpm_levels[dpm_table->count].value =
1317 dep_mm_table->entries[i].dclk;
1318 dpm_table->dpm_levels[dpm_table->count].enabled =
1319 (i == 0) ? true : false;
1320 dpm_table->count++;
1321 }
1322 }
1323 vega10_init_dpm_state(&(dpm_table->dpm_state));
1324
1325 /* Assume there is no headless Vega10 for now */
1326 dpm_table = &(data->dpm_table.dcef_table);
1327 vega10_setup_default_single_dpm_table(hwmgr,
1328 dpm_table,
1329 dep_dcef_table);
1330
1331 vega10_init_dpm_state(&(dpm_table->dpm_state));
1332
1333 dpm_table = &(data->dpm_table.pixel_table);
1334 vega10_setup_default_single_dpm_table(hwmgr,
1335 dpm_table,
1336 dep_pix_table);
1337
1338 vega10_init_dpm_state(&(dpm_table->dpm_state));
1339
1340 dpm_table = &(data->dpm_table.display_table);
1341 vega10_setup_default_single_dpm_table(hwmgr,
1342 dpm_table,
1343 dep_disp_table);
1344
1345 vega10_init_dpm_state(&(dpm_table->dpm_state));
1346
1347 dpm_table = &(data->dpm_table.phy_table);
1348 vega10_setup_default_single_dpm_table(hwmgr,
1349 dpm_table,
1350 dep_phy_table);
1351
1352 vega10_init_dpm_state(&(dpm_table->dpm_state));
1353
1354 vega10_setup_default_pcie_table(hwmgr);
1355
1356 /* save a copy of the default DPM table */
1357 memcpy(&(data->golden_dpm_table), &(data->dpm_table),
1358 sizeof(struct vega10_dpm_table));
1359
1360 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1361 PHM_PlatformCaps_ODNinACSupport) ||
1362 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1363 PHM_PlatformCaps_ODNinDCSupport)) {
1364 data->odn_dpm_table.odn_core_clock_dpm_levels.
1365 number_of_performance_levels = data->dpm_table.gfx_table.count;
1366 for (i = 0; i < data->dpm_table.gfx_table.count; i++) {
1367 data->odn_dpm_table.odn_core_clock_dpm_levels.
1368 performance_level_entries[i].clock =
1369 data->dpm_table.gfx_table.dpm_levels[i].value;
1370 data->odn_dpm_table.odn_core_clock_dpm_levels.
1371 performance_level_entries[i].enabled = true;
1372 }
1373
1374 data->odn_dpm_table.vdd_dependency_on_sclk.count =
1375 dep_gfx_table->count;
1376 for (i = 0; i < dep_gfx_table->count; i++) {
1377 data->odn_dpm_table.vdd_dependency_on_sclk.entries[i].clk =
1378 dep_gfx_table->entries[i].clk;
1379 data->odn_dpm_table.vdd_dependency_on_sclk.entries[i].vddInd =
1380 dep_gfx_table->entries[i].vddInd;
1381 data->odn_dpm_table.vdd_dependency_on_sclk.entries[i].cks_enable =
1382 dep_gfx_table->entries[i].cks_enable;
1383 data->odn_dpm_table.vdd_dependency_on_sclk.entries[i].cks_voffset =
1384 dep_gfx_table->entries[i].cks_voffset;
1385 }
1386
1387 data->odn_dpm_table.odn_memory_clock_dpm_levels.
1388 number_of_performance_levels = data->dpm_table.mem_table.count;
1389 for (i = 0; i < data->dpm_table.mem_table.count; i++) {
1390 data->odn_dpm_table.odn_memory_clock_dpm_levels.
1391 performance_level_entries[i].clock =
1392 data->dpm_table.mem_table.dpm_levels[i].value;
1393 data->odn_dpm_table.odn_memory_clock_dpm_levels.
1394 performance_level_entries[i].enabled = true;
1395 }
1396
1397 data->odn_dpm_table.vdd_dependency_on_mclk.count = dep_mclk_table->count;
1398 for (i = 0; i < dep_mclk_table->count; i++) {
1399 data->odn_dpm_table.vdd_dependency_on_mclk.entries[i].clk =
1400 dep_mclk_table->entries[i].clk;
1401 data->odn_dpm_table.vdd_dependency_on_mclk.entries[i].vddInd =
1402 dep_mclk_table->entries[i].vddInd;
1403 data->odn_dpm_table.vdd_dependency_on_mclk.entries[i].vddci =
1404 dep_mclk_table->entries[i].vddci;
1405 }
1406 }
1407
1408 return 0;
1409}
1410
1411/*
1412 * @fn vega10_populate_ulv_state
1413 * @brief Function to provide parameters for Utral Low Voltage state to SMC.
1414 *
1415 * @param hwmgr - the address of the hardware manager.
1416 * @return Always 0.
1417 */
1418static int vega10_populate_ulv_state(struct pp_hwmgr *hwmgr)
1419{
1420 struct vega10_hwmgr *data =
1421 (struct vega10_hwmgr *)(hwmgr->backend);
1422 struct phm_ppt_v2_information *table_info =
1423 (struct phm_ppt_v2_information *)(hwmgr->pptable);
1424
1425 data->smc_state_table.pp_table.UlvOffsetVid =
effa290c 1426 (uint8_t)table_info->us_ulv_voltage_offset;
f83a9991
EH
1427
1428 data->smc_state_table.pp_table.UlvSmnclkDid =
1429 (uint8_t)(table_info->us_ulv_smnclk_did);
1430 data->smc_state_table.pp_table.UlvMp1clkDid =
1431 (uint8_t)(table_info->us_ulv_mp1clk_did);
1432 data->smc_state_table.pp_table.UlvGfxclkBypass =
1433 (uint8_t)(table_info->us_ulv_gfxclk_bypass);
1434 data->smc_state_table.pp_table.UlvPhaseSheddingPsi0 =
1435 (uint8_t)(data->vddc_voltage_table.psi0_enable);
1436 data->smc_state_table.pp_table.UlvPhaseSheddingPsi1 =
1437 (uint8_t)(data->vddc_voltage_table.psi1_enable);
1438
1439 return 0;
1440}
1441
1442static int vega10_populate_single_lclk_level(struct pp_hwmgr *hwmgr,
1443 uint32_t lclock, uint8_t *curr_lclk_did)
1444{
1445 struct pp_atomfwctrl_clock_dividers_soc15 dividers;
1446
1447 PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(
1448 hwmgr,
1449 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
1450 lclock, &dividers),
1451 "Failed to get LCLK clock settings from VBIOS!",
1452 return -1);
1453
1454 *curr_lclk_did = dividers.ulDid;
1455
1456 return 0;
1457}
1458
1459static int vega10_populate_smc_link_levels(struct pp_hwmgr *hwmgr)
1460{
1461 int result = -1;
1462 struct vega10_hwmgr *data =
1463 (struct vega10_hwmgr *)(hwmgr->backend);
1464 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
1465 struct vega10_pcie_table *pcie_table =
1466 &(data->dpm_table.pcie_table);
1467 uint32_t i, j;
1468
1469 for (i = 0; i < pcie_table->count; i++) {
1470 pp_table->PcieGenSpeed[i] = pcie_table->pcie_gen[i];
1471 pp_table->PcieLaneCount[i] = pcie_table->pcie_lane[i];
1472
1473 result = vega10_populate_single_lclk_level(hwmgr,
1474 pcie_table->lclk[i], &(pp_table->LclkDid[i]));
1475 if (result) {
1476 pr_info("Populate LClock Level %d Failed!\n", i);
1477 return result;
1478 }
1479 }
1480
1481 j = i - 1;
1482 while (i < NUM_LINK_LEVELS) {
1483 pp_table->PcieGenSpeed[i] = pcie_table->pcie_gen[j];
1484 pp_table->PcieLaneCount[i] = pcie_table->pcie_lane[j];
1485
1486 result = vega10_populate_single_lclk_level(hwmgr,
1487 pcie_table->lclk[j], &(pp_table->LclkDid[i]));
1488 if (result) {
1489 pr_info("Populate LClock Level %d Failed!\n", i);
1490 return result;
1491 }
1492 i++;
1493 }
1494
1495 return result;
1496}
1497
1498/**
1499* Populates single SMC GFXSCLK structure using the provided engine clock
1500*
1501* @param hwmgr the address of the hardware manager
1502* @param gfx_clock the GFX clock to use to populate the structure.
1503* @param current_gfxclk_level location in PPTable for the SMC GFXCLK structure.
1504*/
1505
1506static int vega10_populate_single_gfx_level(struct pp_hwmgr *hwmgr,
1507 uint32_t gfx_clock, PllSetting_t *current_gfxclk_level)
1508{
1509 struct phm_ppt_v2_information *table_info =
1510 (struct phm_ppt_v2_information *)(hwmgr->pptable);
1511 struct phm_ppt_v1_clock_voltage_dependency_table *dep_on_sclk =
1512 table_info->vdd_dep_on_sclk;
1513 struct vega10_hwmgr *data =
1514 (struct vega10_hwmgr *)(hwmgr->backend);
1515 struct pp_atomfwctrl_clock_dividers_soc15 dividers;
dd4e2237
EH
1516 uint32_t gfx_max_clock =
1517 hwmgr->platform_descriptor.overdriveLimit.engineClock;
1518 uint32_t i = 0;
f83a9991
EH
1519
1520 if (data->apply_overdrive_next_settings_mask &
1521 DPMTABLE_OD_UPDATE_VDDC)
1522 dep_on_sclk = (struct phm_ppt_v1_clock_voltage_dependency_table *)
1523 &(data->odn_dpm_table.vdd_dependency_on_sclk);
1524
1525 PP_ASSERT_WITH_CODE(dep_on_sclk,
1526 "Invalid SOC_VDD-GFX_CLK Dependency Table!",
1527 return -EINVAL);
1528
dd4e2237
EH
1529 if (data->need_update_dpm_table & DPMTABLE_OD_UPDATE_SCLK)
1530 gfx_clock = gfx_clock > gfx_max_clock ? gfx_max_clock : gfx_clock;
1531 else {
1532 for (i = 0; i < dep_on_sclk->count; i++) {
1533 if (dep_on_sclk->entries[i].clk == gfx_clock)
1534 break;
1535 }
1536 PP_ASSERT_WITH_CODE(dep_on_sclk->count > i,
1537 "Cannot find gfx_clk in SOC_VDD-GFX_CLK!",
1538 return -EINVAL);
f83a9991
EH
1539 }
1540
f83a9991
EH
1541 PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(hwmgr,
1542 COMPUTE_GPUCLK_INPUT_FLAG_GFXCLK,
1543 gfx_clock, &dividers),
1544 "Failed to get GFX Clock settings from VBIOS!",
1545 return -EINVAL);
1546
1547 /* Feedback Multiplier: bit 0:8 int, bit 15:12 post_div, bit 31:16 frac */
1548 current_gfxclk_level->FbMult =
1549 cpu_to_le32(dividers.ulPll_fb_mult);
1550 /* Spread FB Multiplier bit: bit 0:8 int, bit 31:16 frac */
93480f89 1551 current_gfxclk_level->SsOn = dividers.ucPll_ss_enable;
f83a9991
EH
1552 current_gfxclk_level->SsFbMult =
1553 cpu_to_le32(dividers.ulPll_ss_fbsmult);
1554 current_gfxclk_level->SsSlewFrac =
1555 cpu_to_le16(dividers.usPll_ss_slew_frac);
1556 current_gfxclk_level->Did = (uint8_t)(dividers.ulDid);
1557
1558 return 0;
1559}
1560
1561/**
1562 * @brief Populates single SMC SOCCLK structure using the provided clock.
1563 *
1564 * @param hwmgr - the address of the hardware manager.
1565 * @param soc_clock - the SOC clock to use to populate the structure.
1566 * @param current_socclk_level - location in PPTable for the SMC SOCCLK structure.
1567 * @return 0 on success..
1568 */
1569static int vega10_populate_single_soc_level(struct pp_hwmgr *hwmgr,
1570 uint32_t soc_clock, uint8_t *current_soc_did,
1571 uint8_t *current_vol_index)
1572{
1573 struct phm_ppt_v2_information *table_info =
1574 (struct phm_ppt_v2_information *)(hwmgr->pptable);
1575 struct phm_ppt_v1_clock_voltage_dependency_table *dep_on_soc =
1576 table_info->vdd_dep_on_socclk;
1577 struct pp_atomfwctrl_clock_dividers_soc15 dividers;
1578 uint32_t i;
1579
1580 PP_ASSERT_WITH_CODE(dep_on_soc,
1581 "Invalid SOC_VDD-SOC_CLK Dependency Table!",
1582 return -EINVAL);
1583 for (i = 0; i < dep_on_soc->count; i++) {
1584 if (dep_on_soc->entries[i].clk == soc_clock)
1585 break;
1586 }
1587 PP_ASSERT_WITH_CODE(dep_on_soc->count > i,
1588 "Cannot find SOC_CLK in SOC_VDD-SOC_CLK Dependency Table",
1589 return -EINVAL);
1590 PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(hwmgr,
1591 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
1592 soc_clock, &dividers),
1593 "Failed to get SOC Clock settings from VBIOS!",
1594 return -EINVAL);
1595
1596 *current_soc_did = (uint8_t)dividers.ulDid;
1597 *current_vol_index = (uint8_t)(dep_on_soc->entries[i].vddInd);
1598
1599 return 0;
1600}
1601
1602uint16_t vega10_locate_vddc_given_clock(struct pp_hwmgr *hwmgr,
1603 uint32_t clk,
1604 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table)
1605{
1606 uint16_t i;
1607
1608 for (i = 0; i < dep_table->count; i++) {
1609 if (dep_table->entries[i].clk == clk)
1610 return dep_table->entries[i].vddc;
1611 }
1612
1613 pr_info("[LocateVddcGivenClock] Cannot locate SOC Vddc for this clock!");
1614 return 0;
1615}
1616
1617/**
1618* Populates all SMC SCLK levels' structure based on the trimmed allowed dpm engine clock states
1619*
1620* @param hwmgr the address of the hardware manager
1621*/
1622static int vega10_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
1623{
1624 struct vega10_hwmgr *data =
1625 (struct vega10_hwmgr *)(hwmgr->backend);
1626 struct phm_ppt_v2_information *table_info =
1627 (struct phm_ppt_v2_information *)(hwmgr->pptable);
1628 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table =
1629 table_info->vdd_dep_on_socclk;
1630 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
1631 struct vega10_single_dpm_table *dpm_table = &(data->dpm_table.gfx_table);
1632 int result = 0;
1633 uint32_t i, j;
1634
1635 for (i = 0; i < dpm_table->count; i++) {
1636 result = vega10_populate_single_gfx_level(hwmgr,
1637 dpm_table->dpm_levels[i].value,
1638 &(pp_table->GfxclkLevel[i]));
1639 if (result)
1640 return result;
1641 }
1642
1643 j = i - 1;
1644 while (i < NUM_GFXCLK_DPM_LEVELS) {
1645 result = vega10_populate_single_gfx_level(hwmgr,
1646 dpm_table->dpm_levels[j].value,
1647 &(pp_table->GfxclkLevel[i]));
1648 if (result)
1649 return result;
1650 i++;
1651 }
1652
1653 pp_table->GfxclkSlewRate =
1654 cpu_to_le16(table_info->us_gfxclk_slew_rate);
1655
1656 dpm_table = &(data->dpm_table.soc_table);
1657 for (i = 0; i < dpm_table->count; i++) {
1658 pp_table->SocVid[i] =
1659 (uint8_t)convert_to_vid(
1660 vega10_locate_vddc_given_clock(hwmgr,
1661 dpm_table->dpm_levels[i].value,
1662 dep_table));
1663 result = vega10_populate_single_soc_level(hwmgr,
1664 dpm_table->dpm_levels[i].value,
1665 &(pp_table->SocclkDid[i]),
1666 &(pp_table->SocDpmVoltageIndex[i]));
1667 if (result)
1668 return result;
1669 }
1670
1671 j = i - 1;
1672 while (i < NUM_SOCCLK_DPM_LEVELS) {
1673 pp_table->SocVid[i] = pp_table->SocVid[j];
1674 result = vega10_populate_single_soc_level(hwmgr,
1675 dpm_table->dpm_levels[j].value,
1676 &(pp_table->SocclkDid[i]),
1677 &(pp_table->SocDpmVoltageIndex[i]));
1678 if (result)
1679 return result;
1680 i++;
1681 }
1682
1683 return result;
1684}
1685
1686/**
1687 * @brief Populates single SMC GFXCLK structure using the provided clock.
1688 *
1689 * @param hwmgr - the address of the hardware manager.
1690 * @param mem_clock - the memory clock to use to populate the structure.
1691 * @return 0 on success..
1692 */
1693static int vega10_populate_single_memory_level(struct pp_hwmgr *hwmgr,
1694 uint32_t mem_clock, uint8_t *current_mem_vid,
1695 PllSetting_t *current_memclk_level, uint8_t *current_mem_soc_vind)
1696{
1697 struct vega10_hwmgr *data =
1698 (struct vega10_hwmgr *)(hwmgr->backend);
1699 struct phm_ppt_v2_information *table_info =
1700 (struct phm_ppt_v2_information *)(hwmgr->pptable);
1701 struct phm_ppt_v1_clock_voltage_dependency_table *dep_on_mclk =
1702 table_info->vdd_dep_on_mclk;
1703 struct pp_atomfwctrl_clock_dividers_soc15 dividers;
dd4e2237
EH
1704 uint32_t mem_max_clock =
1705 hwmgr->platform_descriptor.overdriveLimit.memoryClock;
1706 uint32_t i = 0;
f83a9991
EH
1707
1708 if (data->apply_overdrive_next_settings_mask &
1709 DPMTABLE_OD_UPDATE_VDDC)
1710 dep_on_mclk = (struct phm_ppt_v1_clock_voltage_dependency_table *)
1711 &data->odn_dpm_table.vdd_dependency_on_mclk;
1712
1713 PP_ASSERT_WITH_CODE(dep_on_mclk,
1714 "Invalid SOC_VDD-UCLK Dependency Table!",
1715 return -EINVAL);
1716
dd4e2237
EH
1717 if (data->need_update_dpm_table & DPMTABLE_OD_UPDATE_MCLK)
1718 mem_clock = mem_clock > mem_max_clock ? mem_max_clock : mem_clock;
1719 else {
1720 for (i = 0; i < dep_on_mclk->count; i++) {
1721 if (dep_on_mclk->entries[i].clk == mem_clock)
1722 break;
1723 }
1724 PP_ASSERT_WITH_CODE(dep_on_mclk->count > i,
1725 "Cannot find UCLK in SOC_VDD-UCLK Dependency Table!",
1726 return -EINVAL);
f83a9991
EH
1727 }
1728
f83a9991
EH
1729 PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(
1730 hwmgr, COMPUTE_GPUCLK_INPUT_FLAG_UCLK, mem_clock, &dividers),
1731 "Failed to get UCLK settings from VBIOS!",
1732 return -1);
1733
1734 *current_mem_vid =
1735 (uint8_t)(convert_to_vid(dep_on_mclk->entries[i].mvdd));
1736 *current_mem_soc_vind =
1737 (uint8_t)(dep_on_mclk->entries[i].vddInd);
1738 current_memclk_level->FbMult = cpu_to_le32(dividers.ulPll_fb_mult);
1739 current_memclk_level->Did = (uint8_t)(dividers.ulDid);
1740
1741 PP_ASSERT_WITH_CODE(current_memclk_level->Did >= 1,
1742 "Invalid Divider ID!",
1743 return -EINVAL);
1744
1745 return 0;
1746}
1747
1748/**
1749 * @brief Populates all SMC MCLK levels' structure based on the trimmed allowed dpm memory clock states.
1750 *
1751 * @param pHwMgr - the address of the hardware manager.
1752 * @return PP_Result_OK on success.
1753 */
1754static int vega10_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
1755{
1756 struct vega10_hwmgr *data =
1757 (struct vega10_hwmgr *)(hwmgr->backend);
1758 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
1759 struct vega10_single_dpm_table *dpm_table =
1760 &(data->dpm_table.mem_table);
1761 int result = 0;
1762 uint32_t i, j, reg, mem_channels;
1763
1764 for (i = 0; i < dpm_table->count; i++) {
1765 result = vega10_populate_single_memory_level(hwmgr,
1766 dpm_table->dpm_levels[i].value,
1767 &(pp_table->MemVid[i]),
1768 &(pp_table->UclkLevel[i]),
1769 &(pp_table->MemSocVoltageIndex[i]));
1770 if (result)
1771 return result;
1772 }
1773
1774 j = i - 1;
1775 while (i < NUM_UCLK_DPM_LEVELS) {
1776 result = vega10_populate_single_memory_level(hwmgr,
1777 dpm_table->dpm_levels[j].value,
1778 &(pp_table->MemVid[i]),
1779 &(pp_table->UclkLevel[i]),
1780 &(pp_table->MemSocVoltageIndex[i]));
1781 if (result)
1782 return result;
1783 i++;
1784 }
1785
1786 reg = soc15_get_register_offset(DF_HWID, 0,
1787 mmDF_CS_AON0_DramBaseAddress0_BASE_IDX,
1788 mmDF_CS_AON0_DramBaseAddress0);
1789 mem_channels = (cgs_read_register(hwmgr->device, reg) &
1790 DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK) >>
1791 DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT;
1792 pp_table->NumMemoryChannels = cpu_to_le16(mem_channels);
1793 pp_table->MemoryChannelWidth =
1794 cpu_to_le16(HBM_MEMORY_CHANNEL_WIDTH *
1795 channel_number[mem_channels]);
1796
1797 pp_table->LowestUclkReservedForUlv =
1798 (uint8_t)(data->lowest_uclk_reserved_for_ulv);
1799
1800 return result;
1801}
1802
1803static int vega10_populate_single_display_type(struct pp_hwmgr *hwmgr,
1804 DSPCLK_e disp_clock)
1805{
1806 struct vega10_hwmgr *data =
1807 (struct vega10_hwmgr *)(hwmgr->backend);
1808 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
1809 struct phm_ppt_v2_information *table_info =
1810 (struct phm_ppt_v2_information *)
1811 (hwmgr->pptable);
1812 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table;
1813 uint32_t i;
1814 uint16_t clk = 0, vddc = 0;
1815 uint8_t vid = 0;
1816
1817 switch (disp_clock) {
1818 case DSPCLK_DCEFCLK:
1819 dep_table = table_info->vdd_dep_on_dcefclk;
1820 break;
1821 case DSPCLK_DISPCLK:
1822 dep_table = table_info->vdd_dep_on_dispclk;
1823 break;
1824 case DSPCLK_PIXCLK:
1825 dep_table = table_info->vdd_dep_on_pixclk;
1826 break;
1827 case DSPCLK_PHYCLK:
1828 dep_table = table_info->vdd_dep_on_phyclk;
1829 break;
1830 default:
1831 return -1;
1832 }
1833
1834 PP_ASSERT_WITH_CODE(dep_table->count <= NUM_DSPCLK_LEVELS,
1835 "Number Of Entries Exceeded maximum!",
1836 return -1);
1837
1838 for (i = 0; i < dep_table->count; i++) {
1839 clk = (uint16_t)(dep_table->entries[i].clk / 100);
1840 vddc = table_info->vddc_lookup_table->
1841 entries[dep_table->entries[i].vddInd].us_vdd;
1842 vid = (uint8_t)convert_to_vid(vddc);
1843 pp_table->DisplayClockTable[disp_clock][i].Freq =
1844 cpu_to_le16(clk);
1845 pp_table->DisplayClockTable[disp_clock][i].Vid =
1846 cpu_to_le16(vid);
1847 }
1848
1849 while (i < NUM_DSPCLK_LEVELS) {
1850 pp_table->DisplayClockTable[disp_clock][i].Freq =
1851 cpu_to_le16(clk);
1852 pp_table->DisplayClockTable[disp_clock][i].Vid =
1853 cpu_to_le16(vid);
1854 i++;
1855 }
1856
1857 return 0;
1858}
1859
1860static int vega10_populate_all_display_clock_levels(struct pp_hwmgr *hwmgr)
1861{
1862 uint32_t i;
1863
1864 for (i = 0; i < DSPCLK_COUNT; i++) {
1865 PP_ASSERT_WITH_CODE(!vega10_populate_single_display_type(hwmgr, i),
1866 "Failed to populate Clock in DisplayClockTable!",
1867 return -1);
1868 }
1869
1870 return 0;
1871}
1872
1873static int vega10_populate_single_eclock_level(struct pp_hwmgr *hwmgr,
1874 uint32_t eclock, uint8_t *current_eclk_did,
1875 uint8_t *current_soc_vol)
1876{
1877 struct phm_ppt_v2_information *table_info =
1878 (struct phm_ppt_v2_information *)(hwmgr->pptable);
1879 struct phm_ppt_v1_mm_clock_voltage_dependency_table *dep_table =
1880 table_info->mm_dep_table;
1881 struct pp_atomfwctrl_clock_dividers_soc15 dividers;
1882 uint32_t i;
1883
1884 PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(hwmgr,
1885 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
1886 eclock, &dividers),
1887 "Failed to get ECLK clock settings from VBIOS!",
1888 return -1);
1889
1890 *current_eclk_did = (uint8_t)dividers.ulDid;
1891
1892 for (i = 0; i < dep_table->count; i++) {
1893 if (dep_table->entries[i].eclk == eclock)
1894 *current_soc_vol = dep_table->entries[i].vddcInd;
1895 }
1896
1897 return 0;
1898}
1899
1900static int vega10_populate_smc_vce_levels(struct pp_hwmgr *hwmgr)
1901{
1902 struct vega10_hwmgr *data =
1903 (struct vega10_hwmgr *)(hwmgr->backend);
1904 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
1905 struct vega10_single_dpm_table *dpm_table = &(data->dpm_table.eclk_table);
1906 int result = -EINVAL;
1907 uint32_t i, j;
1908
1909 for (i = 0; i < dpm_table->count; i++) {
1910 result = vega10_populate_single_eclock_level(hwmgr,
1911 dpm_table->dpm_levels[i].value,
1912 &(pp_table->EclkDid[i]),
1913 &(pp_table->VceDpmVoltageIndex[i]));
1914 if (result)
1915 return result;
1916 }
1917
1918 j = i - 1;
1919 while (i < NUM_VCE_DPM_LEVELS) {
1920 result = vega10_populate_single_eclock_level(hwmgr,
1921 dpm_table->dpm_levels[j].value,
1922 &(pp_table->EclkDid[i]),
1923 &(pp_table->VceDpmVoltageIndex[i]));
1924 if (result)
1925 return result;
1926 i++;
1927 }
1928
1929 return result;
1930}
1931
1932static int vega10_populate_single_vclock_level(struct pp_hwmgr *hwmgr,
1933 uint32_t vclock, uint8_t *current_vclk_did)
1934{
1935 struct pp_atomfwctrl_clock_dividers_soc15 dividers;
1936
1937 PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(hwmgr,
1938 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
1939 vclock, &dividers),
1940 "Failed to get VCLK clock settings from VBIOS!",
1941 return -EINVAL);
1942
1943 *current_vclk_did = (uint8_t)dividers.ulDid;
1944
1945 return 0;
1946}
1947
1948static int vega10_populate_single_dclock_level(struct pp_hwmgr *hwmgr,
1949 uint32_t dclock, uint8_t *current_dclk_did)
1950{
1951 struct pp_atomfwctrl_clock_dividers_soc15 dividers;
1952
1953 PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(hwmgr,
1954 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
1955 dclock, &dividers),
1956 "Failed to get DCLK clock settings from VBIOS!",
1957 return -EINVAL);
1958
1959 *current_dclk_did = (uint8_t)dividers.ulDid;
1960
1961 return 0;
1962}
1963
1964static int vega10_populate_smc_uvd_levels(struct pp_hwmgr *hwmgr)
1965{
1966 struct vega10_hwmgr *data =
1967 (struct vega10_hwmgr *)(hwmgr->backend);
1968 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
1969 struct vega10_single_dpm_table *vclk_dpm_table =
1970 &(data->dpm_table.vclk_table);
1971 struct vega10_single_dpm_table *dclk_dpm_table =
1972 &(data->dpm_table.dclk_table);
1973 struct phm_ppt_v2_information *table_info =
1974 (struct phm_ppt_v2_information *)(hwmgr->pptable);
1975 struct phm_ppt_v1_mm_clock_voltage_dependency_table *dep_table =
1976 table_info->mm_dep_table;
1977 int result = -EINVAL;
1978 uint32_t i, j;
1979
1980 for (i = 0; i < vclk_dpm_table->count; i++) {
1981 result = vega10_populate_single_vclock_level(hwmgr,
1982 vclk_dpm_table->dpm_levels[i].value,
1983 &(pp_table->VclkDid[i]));
1984 if (result)
1985 return result;
1986 }
1987
1988 j = i - 1;
1989 while (i < NUM_UVD_DPM_LEVELS) {
1990 result = vega10_populate_single_vclock_level(hwmgr,
1991 vclk_dpm_table->dpm_levels[j].value,
1992 &(pp_table->VclkDid[i]));
1993 if (result)
1994 return result;
1995 i++;
1996 }
1997
1998 for (i = 0; i < dclk_dpm_table->count; i++) {
1999 result = vega10_populate_single_dclock_level(hwmgr,
2000 dclk_dpm_table->dpm_levels[i].value,
2001 &(pp_table->DclkDid[i]));
2002 if (result)
2003 return result;
2004 }
2005
2006 j = i - 1;
2007 while (i < NUM_UVD_DPM_LEVELS) {
2008 result = vega10_populate_single_dclock_level(hwmgr,
2009 dclk_dpm_table->dpm_levels[j].value,
2010 &(pp_table->DclkDid[i]));
2011 if (result)
2012 return result;
2013 i++;
2014 }
2015
2016 for (i = 0; i < dep_table->count; i++) {
2017 if (dep_table->entries[i].vclk ==
2018 vclk_dpm_table->dpm_levels[i].value &&
2019 dep_table->entries[i].dclk ==
2020 dclk_dpm_table->dpm_levels[i].value)
2021 pp_table->UvdDpmVoltageIndex[i] =
2022 dep_table->entries[i].vddcInd;
2023 else
2024 return -1;
2025 }
2026
2027 j = i - 1;
2028 while (i < NUM_UVD_DPM_LEVELS) {
2029 pp_table->UvdDpmVoltageIndex[i] = dep_table->entries[j].vddcInd;
2030 i++;
2031 }
2032
2033 return 0;
2034}
2035
2036static int vega10_populate_clock_stretcher_table(struct pp_hwmgr *hwmgr)
2037{
2038 struct vega10_hwmgr *data =
2039 (struct vega10_hwmgr *)(hwmgr->backend);
2040 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
2041 struct phm_ppt_v2_information *table_info =
2042 (struct phm_ppt_v2_information *)(hwmgr->pptable);
2043 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table =
2044 table_info->vdd_dep_on_sclk;
2045 uint32_t i;
2046
afc0255c 2047 for (i = 0; i < dep_table->count; i++) {
f83a9991 2048 pp_table->CksEnable[i] = dep_table->entries[i].cks_enable;
afc0255c
RZ
2049 pp_table->CksVidOffset[i] = (uint8_t)(dep_table->entries[i].cks_voffset
2050 * VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
f83a9991
EH
2051 }
2052
2053 return 0;
2054}
2055
2056static int vega10_populate_avfs_parameters(struct pp_hwmgr *hwmgr)
2057{
2058 struct vega10_hwmgr *data =
2059 (struct vega10_hwmgr *)(hwmgr->backend);
2060 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
2061 struct phm_ppt_v2_information *table_info =
2062 (struct phm_ppt_v2_information *)(hwmgr->pptable);
2063 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table =
2064 table_info->vdd_dep_on_sclk;
2065 struct pp_atomfwctrl_avfs_parameters avfs_params = {0};
2066 int result = 0;
2067 uint32_t i;
2068
2069 pp_table->MinVoltageVid = (uint8_t)0xff;
2070 pp_table->MaxVoltageVid = (uint8_t)0;
2071
2072 if (data->smu_features[GNLD_AVFS].supported) {
2073 result = pp_atomfwctrl_get_avfs_information(hwmgr, &avfs_params);
2074 if (!result) {
2075 pp_table->MinVoltageVid = (uint8_t)
f83a9991 2076 convert_to_vid((uint16_t)(avfs_params.ulMinVddc));
6524e494
RZ
2077 pp_table->MaxVoltageVid = (uint8_t)
2078 convert_to_vid((uint16_t)(avfs_params.ulMaxVddc));
2079
2080 pp_table->AConstant[0] = cpu_to_le32(avfs_params.ulMeanNsigmaAcontant0);
2081 pp_table->AConstant[1] = cpu_to_le32(avfs_params.ulMeanNsigmaAcontant1);
2082 pp_table->AConstant[2] = cpu_to_le32(avfs_params.ulMeanNsigmaAcontant2);
2083 pp_table->DC_tol_sigma = cpu_to_le16(avfs_params.usMeanNsigmaDcTolSigma);
2084 pp_table->Platform_mean = cpu_to_le16(avfs_params.usMeanNsigmaPlatformMean);
2085 pp_table->Platform_sigma = cpu_to_le16(avfs_params.usMeanNsigmaDcTolSigma);
2086 pp_table->PSM_Age_CompFactor = cpu_to_le16(avfs_params.usPsmAgeComfactor);
f83a9991
EH
2087
2088 pp_table->BtcGbVdroopTableCksOff.a0 =
2089 cpu_to_le32(avfs_params.ulGbVdroopTableCksoffA0);
6524e494 2090 pp_table->BtcGbVdroopTableCksOff.a0_shift = 20;
f83a9991
EH
2091 pp_table->BtcGbVdroopTableCksOff.a1 =
2092 cpu_to_le32(avfs_params.ulGbVdroopTableCksoffA1);
6524e494 2093 pp_table->BtcGbVdroopTableCksOff.a1_shift = 20;
f83a9991
EH
2094 pp_table->BtcGbVdroopTableCksOff.a2 =
2095 cpu_to_le32(avfs_params.ulGbVdroopTableCksoffA2);
6524e494
RZ
2096 pp_table->BtcGbVdroopTableCksOff.a2_shift = 20;
2097
2098 pp_table->OverrideBtcGbCksOn = avfs_params.ucEnableGbVdroopTableCkson;
2099 pp_table->BtcGbVdroopTableCksOn.a0 =
2100 cpu_to_le32(avfs_params.ulGbVdroopTableCksonA0);
2101 pp_table->BtcGbVdroopTableCksOn.a0_shift = 20;
2102 pp_table->BtcGbVdroopTableCksOn.a1 =
2103 cpu_to_le32(avfs_params.ulGbVdroopTableCksonA1);
2104 pp_table->BtcGbVdroopTableCksOn.a1_shift = 20;
2105 pp_table->BtcGbVdroopTableCksOn.a2 =
2106 cpu_to_le32(avfs_params.ulGbVdroopTableCksonA2);
2107 pp_table->BtcGbVdroopTableCksOn.a2_shift = 20;
f83a9991
EH
2108
2109 pp_table->AvfsGbCksOn.m1 =
2110 cpu_to_le32(avfs_params.ulGbFuseTableCksonM1);
2111 pp_table->AvfsGbCksOn.m2 =
040cd2d1 2112 cpu_to_le32(avfs_params.ulGbFuseTableCksonM2);
f83a9991
EH
2113 pp_table->AvfsGbCksOn.b =
2114 cpu_to_le32(avfs_params.ulGbFuseTableCksonB);
2115 pp_table->AvfsGbCksOn.m1_shift = 24;
2116 pp_table->AvfsGbCksOn.m2_shift = 12;
6524e494 2117 pp_table->AvfsGbCksOn.b_shift = 0;
f83a9991 2118
6524e494
RZ
2119 pp_table->OverrideAvfsGbCksOn =
2120 avfs_params.ucEnableGbFuseTableCkson;
f83a9991
EH
2121 pp_table->AvfsGbCksOff.m1 =
2122 cpu_to_le32(avfs_params.ulGbFuseTableCksoffM1);
2123 pp_table->AvfsGbCksOff.m2 =
040cd2d1 2124 cpu_to_le32(avfs_params.ulGbFuseTableCksoffM2);
f83a9991
EH
2125 pp_table->AvfsGbCksOff.b =
2126 cpu_to_le32(avfs_params.ulGbFuseTableCksoffB);
2127 pp_table->AvfsGbCksOff.m1_shift = 24;
2128 pp_table->AvfsGbCksOff.m2_shift = 12;
6524e494
RZ
2129 pp_table->AvfsGbCksOff.b_shift = 0;
2130
41ebafc0
EH
2131 for (i = 0; i < dep_table->count; i++)
2132 pp_table->StaticVoltageOffsetVid[i] =
2133 convert_to_vid((uint8_t)(dep_table->entries[i].sclk_offset));
f83a9991
EH
2134
2135 if ((PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2136 data->disp_clk_quad_eqn_a) &&
2137 (PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2138 data->disp_clk_quad_eqn_b)) {
2139 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m1 =
2140 (int32_t)data->disp_clk_quad_eqn_a;
2141 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m2 =
6524e494 2142 (int32_t)data->disp_clk_quad_eqn_b;
f83a9991
EH
2143 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].b =
2144 (int32_t)data->disp_clk_quad_eqn_c;
2145 } else {
2146 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m1 =
2147 (int32_t)avfs_params.ulDispclk2GfxclkM1;
2148 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m2 =
6524e494 2149 (int32_t)avfs_params.ulDispclk2GfxclkM2;
f83a9991
EH
2150 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].b =
2151 (int32_t)avfs_params.ulDispclk2GfxclkB;
2152 }
2153
2154 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m1_shift = 24;
2155 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m2_shift = 12;
4bae05e1 2156 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].b_shift = 12;
f83a9991
EH
2157
2158 if ((PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2159 data->dcef_clk_quad_eqn_a) &&
2160 (PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2161 data->dcef_clk_quad_eqn_b)) {
2162 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m1 =
2163 (int32_t)data->dcef_clk_quad_eqn_a;
2164 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m2 =
6524e494 2165 (int32_t)data->dcef_clk_quad_eqn_b;
f83a9991
EH
2166 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].b =
2167 (int32_t)data->dcef_clk_quad_eqn_c;
2168 } else {
2169 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m1 =
2170 (int32_t)avfs_params.ulDcefclk2GfxclkM1;
2171 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m2 =
6524e494 2172 (int32_t)avfs_params.ulDcefclk2GfxclkM2;
f83a9991
EH
2173 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].b =
2174 (int32_t)avfs_params.ulDcefclk2GfxclkB;
2175 }
2176
2177 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m1_shift = 24;
2178 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m2_shift = 12;
4bae05e1 2179 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].b_shift = 12;
f83a9991
EH
2180
2181 if ((PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2182 data->pixel_clk_quad_eqn_a) &&
2183 (PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2184 data->pixel_clk_quad_eqn_b)) {
2185 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m1 =
2186 (int32_t)data->pixel_clk_quad_eqn_a;
2187 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m2 =
6524e494 2188 (int32_t)data->pixel_clk_quad_eqn_b;
f83a9991
EH
2189 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].b =
2190 (int32_t)data->pixel_clk_quad_eqn_c;
2191 } else {
2192 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m1 =
2193 (int32_t)avfs_params.ulPixelclk2GfxclkM1;
2194 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m2 =
6524e494 2195 (int32_t)avfs_params.ulPixelclk2GfxclkM2;
f83a9991
EH
2196 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].b =
2197 (int32_t)avfs_params.ulPixelclk2GfxclkB;
2198 }
2199
2200 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m1_shift = 24;
2201 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m2_shift = 12;
4bae05e1 2202 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].b_shift = 12;
f83a9991
EH
2203 if ((PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2204 data->phy_clk_quad_eqn_a) &&
2205 (PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2206 data->phy_clk_quad_eqn_b)) {
2207 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m1 =
2208 (int32_t)data->phy_clk_quad_eqn_a;
2209 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m2 =
6524e494 2210 (int32_t)data->phy_clk_quad_eqn_b;
f83a9991
EH
2211 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].b =
2212 (int32_t)data->phy_clk_quad_eqn_c;
2213 } else {
2214 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m1 =
2215 (int32_t)avfs_params.ulPhyclk2GfxclkM1;
2216 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m2 =
6524e494 2217 (int32_t)avfs_params.ulPhyclk2GfxclkM2;
f83a9991
EH
2218 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].b =
2219 (int32_t)avfs_params.ulPhyclk2GfxclkB;
2220 }
2221
2222 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m1_shift = 24;
2223 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m2_shift = 12;
4bae05e1 2224 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].b_shift = 12;
f83a9991
EH
2225 } else {
2226 data->smu_features[GNLD_AVFS].supported = false;
2227 }
2228 }
2229
2230 return 0;
2231}
2232
2233static int vega10_populate_gpio_parameters(struct pp_hwmgr *hwmgr)
2234{
2235 struct vega10_hwmgr *data =
2236 (struct vega10_hwmgr *)(hwmgr->backend);
2237 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
2238 struct pp_atomfwctrl_gpio_parameters gpio_params = {0};
2239 int result;
2240
2241 result = pp_atomfwctrl_get_gpio_information(hwmgr, &gpio_params);
2242 if (!result) {
2243 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2244 PHM_PlatformCaps_RegulatorHot) &&
2245 (data->registry_data.regulator_hot_gpio_support)) {
2246 pp_table->VR0HotGpio = gpio_params.ucVR0HotGpio;
2247 pp_table->VR0HotPolarity = gpio_params.ucVR0HotPolarity;
2248 pp_table->VR1HotGpio = gpio_params.ucVR1HotGpio;
2249 pp_table->VR1HotPolarity = gpio_params.ucVR1HotPolarity;
2250 } else {
2251 pp_table->VR0HotGpio = 0;
2252 pp_table->VR0HotPolarity = 0;
2253 pp_table->VR1HotGpio = 0;
2254 pp_table->VR1HotPolarity = 0;
2255 }
2256
2257 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2258 PHM_PlatformCaps_AutomaticDCTransition) &&
2259 (data->registry_data.ac_dc_switch_gpio_support)) {
2260 pp_table->AcDcGpio = gpio_params.ucAcDcGpio;
2261 pp_table->AcDcPolarity = gpio_params.ucAcDcPolarity;
2262 } else {
2263 pp_table->AcDcGpio = 0;
2264 pp_table->AcDcPolarity = 0;
2265 }
2266 }
2267
2268 return result;
2269}
2270
2271static int vega10_avfs_enable(struct pp_hwmgr *hwmgr, bool enable)
2272{
2273 struct vega10_hwmgr *data =
2274 (struct vega10_hwmgr *)(hwmgr->backend);
2275
2276 if (data->smu_features[GNLD_AVFS].supported) {
2277 if (enable) {
2278 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
2279 true,
2280 data->smu_features[GNLD_AVFS].smu_feature_bitmap),
2281 "[avfs_control] Attempt to Enable AVFS feature Failed!",
2282 return -1);
2283 data->smu_features[GNLD_AVFS].enabled = true;
2284 } else {
2285 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
2286 false,
2287 data->smu_features[GNLD_AVFS].smu_feature_id),
2288 "[avfs_control] Attempt to Disable AVFS feature Failed!",
2289 return -1);
2290 data->smu_features[GNLD_AVFS].enabled = false;
2291 }
2292 }
2293
2294 return 0;
2295}
2296
ab5cf3a5
RZ
2297static int vega10_populate_and_upload_avfs_fuse_override(struct pp_hwmgr *hwmgr)
2298{
2299 int result = 0;
2300
2301 uint64_t serial_number = 0;
2302 uint32_t top32, bottom32;
2303 struct phm_fuses_default fuse;
2304
2305 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
2306 AvfsFuseOverride_t *avfs_fuse_table = &(data->smc_state_table.avfs_fuse_override_table);
2307
2308 smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_ReadSerialNumTop32);
2309 vega10_read_arg_from_smc(hwmgr->smumgr, &top32);
2310
2311 smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_ReadSerialNumBottom32);
2312 vega10_read_arg_from_smc(hwmgr->smumgr, &bottom32);
2313
2314 serial_number = ((uint64_t)bottom32 << 32) | top32;
2315
2316 if (pp_override_get_default_fuse_value(serial_number, vega10_fuses_default, &fuse) == 0) {
2317 avfs_fuse_table->VFT0_b = fuse.VFT0_b;
2318 avfs_fuse_table->VFT0_m1 = fuse.VFT0_m1;
2319 avfs_fuse_table->VFT0_m2 = fuse.VFT0_m2;
2320 avfs_fuse_table->VFT1_b = fuse.VFT1_b;
2321 avfs_fuse_table->VFT1_m1 = fuse.VFT1_m1;
2322 avfs_fuse_table->VFT1_m2 = fuse.VFT1_m2;
2323 avfs_fuse_table->VFT2_b = fuse.VFT2_b;
2324 avfs_fuse_table->VFT2_m1 = fuse.VFT2_m1;
2325 avfs_fuse_table->VFT2_m2 = fuse.VFT2_m2;
2326 result = vega10_copy_table_to_smc(hwmgr->smumgr,
2327 (uint8_t *)avfs_fuse_table, AVFSFUSETABLE);
2328 PP_ASSERT_WITH_CODE(!result,
2329 "Failed to upload FuseOVerride!",
2330 );
2331 }
2332
2333 return result;
2334}
2335
d6c025d2
EH
2336static int vega10_save_default_power_profile(struct pp_hwmgr *hwmgr)
2337{
2338 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
2339 struct vega10_single_dpm_table *dpm_table = &(data->dpm_table.gfx_table);
2340 uint32_t min_level;
2341
2342 hwmgr->default_gfx_power_profile.type = AMD_PP_GFX_PROFILE;
2343 hwmgr->default_compute_power_profile.type = AMD_PP_COMPUTE_PROFILE;
2344
2345 /* Optimize compute power profile: Use only highest
2346 * 2 power levels (if more than 2 are available)
2347 */
2348 if (dpm_table->count > 2)
2349 min_level = dpm_table->count - 2;
2350 else if (dpm_table->count == 2)
2351 min_level = 1;
2352 else
2353 min_level = 0;
2354
2355 hwmgr->default_compute_power_profile.min_sclk =
2356 dpm_table->dpm_levels[min_level].value;
2357
2358 hwmgr->gfx_power_profile = hwmgr->default_gfx_power_profile;
2359 hwmgr->compute_power_profile = hwmgr->default_compute_power_profile;
2360
2361 return 0;
2362}
2363
f83a9991
EH
2364/**
2365* Initializes the SMC table and uploads it
2366*
2367* @param hwmgr the address of the powerplay hardware manager.
2368* @param pInput the pointer to input data (PowerState)
2369* @return always 0
2370*/
2371static int vega10_init_smc_table(struct pp_hwmgr *hwmgr)
2372{
2373 int result;
2374 struct vega10_hwmgr *data =
2375 (struct vega10_hwmgr *)(hwmgr->backend);
2376 struct phm_ppt_v2_information *table_info =
2377 (struct phm_ppt_v2_information *)(hwmgr->pptable);
2378 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
2379 struct pp_atomfwctrl_voltage_table voltage_table;
05ee3215 2380 struct pp_atomfwctrl_bios_boot_up_values boot_up_values;
f83a9991
EH
2381
2382 result = vega10_setup_default_dpm_tables(hwmgr);
2383 PP_ASSERT_WITH_CODE(!result,
2384 "Failed to setup default DPM tables!",
2385 return result);
2386
2387 pp_atomfwctrl_get_voltage_table_v4(hwmgr, VOLTAGE_TYPE_VDDC,
2388 VOLTAGE_OBJ_SVID2, &voltage_table);
2389 pp_table->MaxVidStep = voltage_table.max_vid_step;
2390
2391 pp_table->GfxDpmVoltageMode =
2392 (uint8_t)(table_info->uc_gfx_dpm_voltage_mode);
2393 pp_table->SocDpmVoltageMode =
2394 (uint8_t)(table_info->uc_soc_dpm_voltage_mode);
2395 pp_table->UclkDpmVoltageMode =
2396 (uint8_t)(table_info->uc_uclk_dpm_voltage_mode);
2397 pp_table->UvdDpmVoltageMode =
2398 (uint8_t)(table_info->uc_uvd_dpm_voltage_mode);
2399 pp_table->VceDpmVoltageMode =
2400 (uint8_t)(table_info->uc_vce_dpm_voltage_mode);
2401 pp_table->Mp0DpmVoltageMode =
2402 (uint8_t)(table_info->uc_mp0_dpm_voltage_mode);
effa290c 2403
f83a9991
EH
2404 pp_table->DisplayDpmVoltageMode =
2405 (uint8_t)(table_info->uc_dcef_dpm_voltage_mode);
2406
2407 if (data->registry_data.ulv_support &&
2408 table_info->us_ulv_voltage_offset) {
2409 result = vega10_populate_ulv_state(hwmgr);
2410 PP_ASSERT_WITH_CODE(!result,
2411 "Failed to initialize ULV state!",
2412 return result);
2413 }
2414
2415 result = vega10_populate_smc_link_levels(hwmgr);
2416 PP_ASSERT_WITH_CODE(!result,
2417 "Failed to initialize Link Level!",
2418 return result);
2419
2420 result = vega10_populate_all_graphic_levels(hwmgr);
2421 PP_ASSERT_WITH_CODE(!result,
2422 "Failed to initialize Graphics Level!",
2423 return result);
2424
2425 result = vega10_populate_all_memory_levels(hwmgr);
2426 PP_ASSERT_WITH_CODE(!result,
2427 "Failed to initialize Memory Level!",
2428 return result);
2429
2430 result = vega10_populate_all_display_clock_levels(hwmgr);
2431 PP_ASSERT_WITH_CODE(!result,
2432 "Failed to initialize Display Level!",
2433 return result);
2434
2435 result = vega10_populate_smc_vce_levels(hwmgr);
2436 PP_ASSERT_WITH_CODE(!result,
2437 "Failed to initialize VCE Level!",
2438 return result);
2439
2440 result = vega10_populate_smc_uvd_levels(hwmgr);
2441 PP_ASSERT_WITH_CODE(!result,
2442 "Failed to initialize UVD Level!",
2443 return result);
2444
afc0255c 2445 if (data->registry_data.clock_stretcher_support) {
f83a9991
EH
2446 result = vega10_populate_clock_stretcher_table(hwmgr);
2447 PP_ASSERT_WITH_CODE(!result,
2448 "Failed to populate Clock Stretcher Table!",
2449 return result);
2450 }
2451
05ee3215
RZ
2452 result = pp_atomfwctrl_get_vbios_bootup_values(hwmgr, &boot_up_values);
2453 if (!result) {
2454 data->vbios_boot_state.vddc = boot_up_values.usVddc;
2455 data->vbios_boot_state.vddci = boot_up_values.usVddci;
2456 data->vbios_boot_state.mvddc = boot_up_values.usMvddc;
2457 data->vbios_boot_state.gfx_clock = boot_up_values.ulGfxClk;
2458 data->vbios_boot_state.mem_clock = boot_up_values.ulUClk;
2459 data->vbios_boot_state.soc_clock = boot_up_values.ulSocClk;
c5b053d2 2460 data->vbios_boot_state.dcef_clock = boot_up_values.ulDCEFClk;
05ee3215
RZ
2461 if (0 != boot_up_values.usVddc) {
2462 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
2463 PPSMC_MSG_SetFloorSocVoltage,
2464 (boot_up_values.usVddc * 4));
2465 data->vbios_boot_state.bsoc_vddc_lock = true;
2466 } else {
2467 data->vbios_boot_state.bsoc_vddc_lock = false;
2468 }
c5b053d2
RZ
2469 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
2470 PPSMC_MSG_SetMinDeepSleepDcefclk,
2471 (uint32_t)(data->vbios_boot_state.dcef_clock / 100));
05ee3215
RZ
2472 }
2473
f83a9991
EH
2474 result = vega10_populate_avfs_parameters(hwmgr);
2475 PP_ASSERT_WITH_CODE(!result,
2476 "Failed to initialize AVFS Parameters!",
2477 return result);
2478
2479 result = vega10_populate_gpio_parameters(hwmgr);
2480 PP_ASSERT_WITH_CODE(!result,
2481 "Failed to initialize GPIO Parameters!",
2482 return result);
2483
2484 pp_table->GfxclkAverageAlpha = (uint8_t)
2485 (data->gfxclk_average_alpha);
2486 pp_table->SocclkAverageAlpha = (uint8_t)
2487 (data->socclk_average_alpha);
2488 pp_table->UclkAverageAlpha = (uint8_t)
2489 (data->uclk_average_alpha);
2490 pp_table->GfxActivityAverageAlpha = (uint8_t)
2491 (data->gfx_activity_average_alpha);
2492
ab5cf3a5
RZ
2493 vega10_populate_and_upload_avfs_fuse_override(hwmgr);
2494
f83a9991
EH
2495 result = vega10_copy_table_to_smc(hwmgr->smumgr,
2496 (uint8_t *)pp_table, PPTABLE);
2497 PP_ASSERT_WITH_CODE(!result,
2498 "Failed to upload PPtable!", return result);
2499
2211a787
RZ
2500 result = vega10_avfs_enable(hwmgr, true);
2501 PP_ASSERT_WITH_CODE(!result, "Attempt to enable AVFS feature Failed!",
f83a9991 2502 return result);
f83a9991 2503
d6c025d2
EH
2504 vega10_save_default_power_profile(hwmgr);
2505
f83a9991
EH
2506 return 0;
2507}
2508
2509static int vega10_enable_thermal_protection(struct pp_hwmgr *hwmgr)
2510{
2511 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
2512
2513 if (data->smu_features[GNLD_THERMAL].supported) {
2514 if (data->smu_features[GNLD_THERMAL].enabled)
2515 pr_info("THERMAL Feature Already enabled!");
2516
2517 PP_ASSERT_WITH_CODE(
2518 !vega10_enable_smc_features(hwmgr->smumgr,
2519 true,
2520 data->smu_features[GNLD_THERMAL].smu_feature_bitmap),
2521 "Enable THERMAL Feature Failed!",
2522 return -1);
2523 data->smu_features[GNLD_THERMAL].enabled = true;
2524 }
2525
2526 return 0;
2527}
2528
8b9242ed
RZ
2529static int vega10_disable_thermal_protection(struct pp_hwmgr *hwmgr)
2530{
2531 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
2532
2533 if (data->smu_features[GNLD_THERMAL].supported) {
2534 if (!data->smu_features[GNLD_THERMAL].enabled)
2535 pr_info("THERMAL Feature Already disabled!");
2536
2537 PP_ASSERT_WITH_CODE(
2538 !vega10_enable_smc_features(hwmgr->smumgr,
2539 false,
2540 data->smu_features[GNLD_THERMAL].smu_feature_bitmap),
2541 "disable THERMAL Feature Failed!",
2542 return -1);
2543 data->smu_features[GNLD_THERMAL].enabled = false;
2544 }
2545
2546 return 0;
2547}
2548
f83a9991
EH
2549static int vega10_enable_vrhot_feature(struct pp_hwmgr *hwmgr)
2550{
2551 struct vega10_hwmgr *data =
2552 (struct vega10_hwmgr *)(hwmgr->backend);
2553
2554 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2555 PHM_PlatformCaps_RegulatorHot)) {
2556 if (data->smu_features[GNLD_VR0HOT].supported) {
2557 PP_ASSERT_WITH_CODE(
2558 !vega10_enable_smc_features(hwmgr->smumgr,
2559 true,
2560 data->smu_features[GNLD_VR0HOT].smu_feature_bitmap),
2561 "Attempt to Enable VR0 Hot feature Failed!",
2562 return -1);
2563 data->smu_features[GNLD_VR0HOT].enabled = true;
2564 } else {
2565 if (data->smu_features[GNLD_VR1HOT].supported) {
2566 PP_ASSERT_WITH_CODE(
2567 !vega10_enable_smc_features(hwmgr->smumgr,
2568 true,
2569 data->smu_features[GNLD_VR1HOT].smu_feature_bitmap),
2570 "Attempt to Enable VR0 Hot feature Failed!",
2571 return -1);
2572 data->smu_features[GNLD_VR1HOT].enabled = true;
2573 }
2574 }
2575 }
2576 return 0;
2577}
2578
2579static int vega10_enable_ulv(struct pp_hwmgr *hwmgr)
2580{
2581 struct vega10_hwmgr *data =
2582 (struct vega10_hwmgr *)(hwmgr->backend);
2583
2584 if (data->registry_data.ulv_support) {
2585 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
2586 true, data->smu_features[GNLD_ULV].smu_feature_bitmap),
2587 "Enable ULV Feature Failed!",
2588 return -1);
2589 data->smu_features[GNLD_ULV].enabled = true;
2590 }
2591
2592 return 0;
2593}
2594
4022e4f2
RZ
2595static int vega10_disable_ulv(struct pp_hwmgr *hwmgr)
2596{
2597 struct vega10_hwmgr *data =
2598 (struct vega10_hwmgr *)(hwmgr->backend);
2599
2600 if (data->registry_data.ulv_support) {
2601 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
2602 false, data->smu_features[GNLD_ULV].smu_feature_bitmap),
2603 "disable ULV Feature Failed!",
2604 return -EINVAL);
2605 data->smu_features[GNLD_ULV].enabled = false;
2606 }
2607
2608 return 0;
2609}
2610
f83a9991
EH
2611static int vega10_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
2612{
2613 struct vega10_hwmgr *data =
2614 (struct vega10_hwmgr *)(hwmgr->backend);
2615
2616 if (data->smu_features[GNLD_DS_GFXCLK].supported) {
2617 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
2618 true, data->smu_features[GNLD_DS_GFXCLK].smu_feature_bitmap),
2619 "Attempt to Enable DS_GFXCLK Feature Failed!",
df057e02 2620 return -EINVAL);
f83a9991
EH
2621 data->smu_features[GNLD_DS_GFXCLK].enabled = true;
2622 }
2623
2624 if (data->smu_features[GNLD_DS_SOCCLK].supported) {
2625 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
2626 true, data->smu_features[GNLD_DS_SOCCLK].smu_feature_bitmap),
df057e02
RZ
2627 "Attempt to Enable DS_SOCCLK Feature Failed!",
2628 return -EINVAL);
f83a9991
EH
2629 data->smu_features[GNLD_DS_SOCCLK].enabled = true;
2630 }
2631
2632 if (data->smu_features[GNLD_DS_LCLK].supported) {
2633 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
2634 true, data->smu_features[GNLD_DS_LCLK].smu_feature_bitmap),
df057e02
RZ
2635 "Attempt to Enable DS_LCLK Feature Failed!",
2636 return -EINVAL);
f83a9991
EH
2637 data->smu_features[GNLD_DS_LCLK].enabled = true;
2638 }
2639
df057e02
RZ
2640 if (data->smu_features[GNLD_DS_DCEFCLK].supported) {
2641 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
2642 true, data->smu_features[GNLD_DS_DCEFCLK].smu_feature_bitmap),
2643 "Attempt to Enable DS_DCEFCLK Feature Failed!",
2644 return -EINVAL);
2645 data->smu_features[GNLD_DS_DCEFCLK].enabled = true;
2646 }
2647
2648 return 0;
2649}
2650
2651static int vega10_disable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
2652{
2653 struct vega10_hwmgr *data =
2654 (struct vega10_hwmgr *)(hwmgr->backend);
2655
2656 if (data->smu_features[GNLD_DS_GFXCLK].supported) {
2657 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
2658 false, data->smu_features[GNLD_DS_GFXCLK].smu_feature_bitmap),
2659 "Attempt to disable DS_GFXCLK Feature Failed!",
2660 return -EINVAL);
2661 data->smu_features[GNLD_DS_GFXCLK].enabled = false;
2662 }
2663
2664 if (data->smu_features[GNLD_DS_SOCCLK].supported) {
2665 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
2666 false, data->smu_features[GNLD_DS_SOCCLK].smu_feature_bitmap),
2667 "Attempt to disable DS_ Feature Failed!",
2668 return -EINVAL);
2669 data->smu_features[GNLD_DS_SOCCLK].enabled = false;
2670 }
2671
2672 if (data->smu_features[GNLD_DS_LCLK].supported) {
2673 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
2674 false, data->smu_features[GNLD_DS_LCLK].smu_feature_bitmap),
2675 "Attempt to disable DS_LCLK Feature Failed!",
2676 return -EINVAL);
2677 data->smu_features[GNLD_DS_LCLK].enabled = false;
2678 }
2679
2680 if (data->smu_features[GNLD_DS_DCEFCLK].supported) {
2681 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
2682 false, data->smu_features[GNLD_DS_DCEFCLK].smu_feature_bitmap),
2683 "Attempt to disable DS_DCEFCLK Feature Failed!",
2684 return -EINVAL);
2685 data->smu_features[GNLD_DS_DCEFCLK].enabled = false;
2686 }
2687
f83a9991
EH
2688 return 0;
2689}
2690
8b9242ed
RZ
2691static int vega10_stop_dpm(struct pp_hwmgr *hwmgr, uint32_t bitmap)
2692{
2693 struct vega10_hwmgr *data =
2694 (struct vega10_hwmgr *)(hwmgr->backend);
2695 uint32_t i, feature_mask = 0;
2696
2697
2698 if(data->smu_features[GNLD_LED_DISPLAY].supported == true){
2699 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
f06fed92
RZ
2700 false, data->smu_features[GNLD_LED_DISPLAY].smu_feature_bitmap),
2701 "Attempt to disable LED DPM feature failed!", return -EINVAL);
2702 data->smu_features[GNLD_LED_DISPLAY].enabled = false;
8b9242ed
RZ
2703 }
2704
2705 for (i = 0; i < GNLD_DPM_MAX; i++) {
2706 if (data->smu_features[i].smu_feature_bitmap & bitmap) {
2707 if (data->smu_features[i].supported) {
2708 if (data->smu_features[i].enabled) {
2709 feature_mask |= data->smu_features[i].
2710 smu_feature_bitmap;
2711 data->smu_features[i].enabled = false;
2712 }
2713 }
2714 }
2715 }
2716
2717 vega10_enable_smc_features(hwmgr->smumgr, false, feature_mask);
2718
2719 return 0;
2720}
2721
f83a9991
EH
2722/**
2723 * @brief Tell SMC to enabled the supported DPMs.
2724 *
2725 * @param hwmgr - the address of the powerplay hardware manager.
2726 * @Param bitmap - bitmap for the features to enabled.
2727 * @return 0 on at least one DPM is successfully enabled.
2728 */
2729static int vega10_start_dpm(struct pp_hwmgr *hwmgr, uint32_t bitmap)
2730{
2731 struct vega10_hwmgr *data =
2732 (struct vega10_hwmgr *)(hwmgr->backend);
2733 uint32_t i, feature_mask = 0;
2734
2735 for (i = 0; i < GNLD_DPM_MAX; i++) {
2736 if (data->smu_features[i].smu_feature_bitmap & bitmap) {
2737 if (data->smu_features[i].supported) {
2738 if (!data->smu_features[i].enabled) {
2739 feature_mask |= data->smu_features[i].
2740 smu_feature_bitmap;
2741 data->smu_features[i].enabled = true;
2742 }
2743 }
2744 }
2745 }
2746
2747 if (vega10_enable_smc_features(hwmgr->smumgr,
2748 true, feature_mask)) {
2749 for (i = 0; i < GNLD_DPM_MAX; i++) {
2750 if (data->smu_features[i].smu_feature_bitmap &
2751 feature_mask)
2752 data->smu_features[i].enabled = false;
2753 }
2754 }
2755
2756 if(data->smu_features[GNLD_LED_DISPLAY].supported == true){
2757 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
2758 true, data->smu_features[GNLD_LED_DISPLAY].smu_feature_bitmap),
2759 "Attempt to Enable LED DPM feature Failed!", return -EINVAL);
2760 data->smu_features[GNLD_LED_DISPLAY].enabled = true;
2761 }
2762
05ee3215
RZ
2763 if (data->vbios_boot_state.bsoc_vddc_lock) {
2764 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
2765 PPSMC_MSG_SetFloorSocVoltage, 0);
2766 data->vbios_boot_state.bsoc_vddc_lock = false;
2767 }
2768
f83a9991
EH
2769 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2770 PHM_PlatformCaps_Falcon_QuickTransition)) {
2771 if (data->smu_features[GNLD_ACDC].supported) {
2772 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
2773 true, data->smu_features[GNLD_ACDC].smu_feature_bitmap),
2774 "Attempt to Enable DS_GFXCLK Feature Failed!",
2775 return -1);
2776 data->smu_features[GNLD_ACDC].enabled = true;
2777 }
2778 }
2779
2780 return 0;
2781}
2782
2783static int vega10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
2784{
2785 struct vega10_hwmgr *data =
2786 (struct vega10_hwmgr *)(hwmgr->backend);
2787 int tmp_result, result = 0;
2788
2789 tmp_result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
2790 PPSMC_MSG_ConfigureTelemetry, data->config_telemetry);
2791 PP_ASSERT_WITH_CODE(!tmp_result,
2792 "Failed to configure telemetry!",
2793 return tmp_result);
2794
f83a9991
EH
2795 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
2796 PPSMC_MSG_NumOfDisplays, 0);
2797
2798 tmp_result = (!vega10_is_dpm_running(hwmgr)) ? 0 : -1;
2799 PP_ASSERT_WITH_CODE(!tmp_result,
2800 "DPM is already running right , skipping re-enablement!",
2801 return 0);
2802
2803 tmp_result = vega10_construct_voltage_tables(hwmgr);
2804 PP_ASSERT_WITH_CODE(!tmp_result,
2805 "Failed to contruct voltage tables!",
2806 result = tmp_result);
2807
2808 tmp_result = vega10_init_smc_table(hwmgr);
2809 PP_ASSERT_WITH_CODE(!tmp_result,
2810 "Failed to initialize SMC table!",
2811 result = tmp_result);
2812
2813 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2814 PHM_PlatformCaps_ThermalController)) {
2815 tmp_result = vega10_enable_thermal_protection(hwmgr);
2816 PP_ASSERT_WITH_CODE(!tmp_result,
2817 "Failed to enable thermal protection!",
2818 result = tmp_result);
2819 }
2820
2821 tmp_result = vega10_enable_vrhot_feature(hwmgr);
2822 PP_ASSERT_WITH_CODE(!tmp_result,
2823 "Failed to enable VR hot feature!",
2824 result = tmp_result);
2825
f83a9991
EH
2826 tmp_result = vega10_enable_deep_sleep_master_switch(hwmgr);
2827 PP_ASSERT_WITH_CODE(!tmp_result,
2828 "Failed to enable deep sleep master switch!",
2829 result = tmp_result);
2830
2831 tmp_result = vega10_start_dpm(hwmgr, SMC_DPM_FEATURES);
2832 PP_ASSERT_WITH_CODE(!tmp_result,
2833 "Failed to start DPM!", result = tmp_result);
2834
2835 tmp_result = vega10_enable_power_containment(hwmgr);
2836 PP_ASSERT_WITH_CODE(!tmp_result,
2837 "Failed to enable power containment!",
2838 result = tmp_result);
2839
2840 tmp_result = vega10_power_control_set_level(hwmgr);
2841 PP_ASSERT_WITH_CODE(!tmp_result,
2842 "Failed to power control set level!",
2843 result = tmp_result);
2844
4022e4f2
RZ
2845 tmp_result = vega10_enable_ulv(hwmgr);
2846 PP_ASSERT_WITH_CODE(!tmp_result,
2847 "Failed to enable ULV!",
2848 result = tmp_result);
2849
f83a9991
EH
2850 return result;
2851}
2852
2853static int vega10_get_power_state_size(struct pp_hwmgr *hwmgr)
2854{
2855 return sizeof(struct vega10_power_state);
2856}
2857
2858static int vega10_get_pp_table_entry_callback_func(struct pp_hwmgr *hwmgr,
2859 void *state, struct pp_power_state *power_state,
2860 void *pp_table, uint32_t classification_flag)
2861{
ebc1c9c1 2862 ATOM_Vega10_GFXCLK_Dependency_Record_V2 *patom_record_V2;
f83a9991
EH
2863 struct vega10_power_state *vega10_power_state =
2864 cast_phw_vega10_power_state(&(power_state->hardware));
2865 struct vega10_performance_level *performance_level;
2866 ATOM_Vega10_State *state_entry = (ATOM_Vega10_State *)state;
2867 ATOM_Vega10_POWERPLAYTABLE *powerplay_table =
2868 (ATOM_Vega10_POWERPLAYTABLE *)pp_table;
2869 ATOM_Vega10_SOCCLK_Dependency_Table *socclk_dep_table =
2870 (ATOM_Vega10_SOCCLK_Dependency_Table *)
2871 (((unsigned long)powerplay_table) +
2872 le16_to_cpu(powerplay_table->usSocclkDependencyTableOffset));
2873 ATOM_Vega10_GFXCLK_Dependency_Table *gfxclk_dep_table =
2874 (ATOM_Vega10_GFXCLK_Dependency_Table *)
2875 (((unsigned long)powerplay_table) +
2876 le16_to_cpu(powerplay_table->usGfxclkDependencyTableOffset));
2877 ATOM_Vega10_MCLK_Dependency_Table *mclk_dep_table =
2878 (ATOM_Vega10_MCLK_Dependency_Table *)
2879 (((unsigned long)powerplay_table) +
2880 le16_to_cpu(powerplay_table->usMclkDependencyTableOffset));
2881
2882
2883 /* The following fields are not initialized here:
2884 * id orderedList allStatesList
2885 */
2886 power_state->classification.ui_label =
2887 (le16_to_cpu(state_entry->usClassification) &
2888 ATOM_PPLIB_CLASSIFICATION_UI_MASK) >>
2889 ATOM_PPLIB_CLASSIFICATION_UI_SHIFT;
2890 power_state->classification.flags = classification_flag;
2891 /* NOTE: There is a classification2 flag in BIOS
2892 * that is not being used right now
2893 */
2894 power_state->classification.temporary_state = false;
2895 power_state->classification.to_be_deleted = false;
2896
2897 power_state->validation.disallowOnDC =
2898 ((le32_to_cpu(state_entry->ulCapsAndSettings) &
2899 ATOM_Vega10_DISALLOW_ON_DC) != 0);
2900
2901 power_state->display.disableFrameModulation = false;
2902 power_state->display.limitRefreshrate = false;
2903 power_state->display.enableVariBright =
2904 ((le32_to_cpu(state_entry->ulCapsAndSettings) &
2905 ATOM_Vega10_ENABLE_VARIBRIGHT) != 0);
2906
2907 power_state->validation.supportedPowerLevels = 0;
2908 power_state->uvd_clocks.VCLK = 0;
2909 power_state->uvd_clocks.DCLK = 0;
2910 power_state->temperatures.min = 0;
2911 power_state->temperatures.max = 0;
2912
2913 performance_level = &(vega10_power_state->performance_levels
2914 [vega10_power_state->performance_level_count++]);
2915
2916 PP_ASSERT_WITH_CODE(
2917 (vega10_power_state->performance_level_count <
2918 NUM_GFXCLK_DPM_LEVELS),
2919 "Performance levels exceeds SMC limit!",
2920 return -1);
2921
2922 PP_ASSERT_WITH_CODE(
2923 (vega10_power_state->performance_level_count <=
2924 hwmgr->platform_descriptor.
2925 hardwareActivityPerformanceLevels),
2926 "Performance levels exceeds Driver limit!",
2927 return -1);
2928
2929 /* Performance levels are arranged from low to high. */
2930 performance_level->soc_clock = socclk_dep_table->entries
2931 [state_entry->ucSocClockIndexLow].ulClk;
2932 performance_level->gfx_clock = gfxclk_dep_table->entries
2933 [state_entry->ucGfxClockIndexLow].ulClk;
2934 performance_level->mem_clock = mclk_dep_table->entries
2935 [state_entry->ucMemClockIndexLow].ulMemClk;
2936
2937 performance_level = &(vega10_power_state->performance_levels
2938 [vega10_power_state->performance_level_count++]);
f83a9991 2939 performance_level->soc_clock = socclk_dep_table->entries
ebc1c9c1
RZ
2940 [state_entry->ucSocClockIndexHigh].ulClk;
2941 if (gfxclk_dep_table->ucRevId == 0) {
2942 performance_level->gfx_clock = gfxclk_dep_table->entries
f83a9991 2943 [state_entry->ucGfxClockIndexHigh].ulClk;
ebc1c9c1
RZ
2944 } else if (gfxclk_dep_table->ucRevId == 1) {
2945 patom_record_V2 = (ATOM_Vega10_GFXCLK_Dependency_Record_V2 *)gfxclk_dep_table->entries;
2946 performance_level->gfx_clock = patom_record_V2[state_entry->ucGfxClockIndexHigh].ulClk;
2947 }
2948
f83a9991
EH
2949 performance_level->mem_clock = mclk_dep_table->entries
2950 [state_entry->ucMemClockIndexHigh].ulMemClk;
2951 return 0;
2952}
2953
2954static int vega10_get_pp_table_entry(struct pp_hwmgr *hwmgr,
2955 unsigned long entry_index, struct pp_power_state *state)
2956{
2957 int result;
2958 struct vega10_power_state *ps;
2959
2960 state->hardware.magic = PhwVega10_Magic;
2961
2962 ps = cast_phw_vega10_power_state(&state->hardware);
2963
2964 result = vega10_get_powerplay_table_entry(hwmgr, entry_index, state,
2965 vega10_get_pp_table_entry_callback_func);
2966
2967 /*
2968 * This is the earliest time we have all the dependency table
2969 * and the VBIOS boot state
2970 */
2971 /* set DC compatible flag if this state supports DC */
2972 if (!state->validation.disallowOnDC)
2973 ps->dc_compatible = true;
2974
2975 ps->uvd_clks.vclk = state->uvd_clocks.VCLK;
2976 ps->uvd_clks.dclk = state->uvd_clocks.DCLK;
2977
2978 return 0;
2979}
2980
2981static int vega10_patch_boot_state(struct pp_hwmgr *hwmgr,
2982 struct pp_hw_power_state *hw_ps)
2983{
2984 return 0;
2985}
2986
2987static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
2988 struct pp_power_state *request_ps,
2989 const struct pp_power_state *current_ps)
2990{
2991 struct vega10_power_state *vega10_ps =
2992 cast_phw_vega10_power_state(&request_ps->hardware);
2993 uint32_t sclk;
2994 uint32_t mclk;
2995 struct PP_Clocks minimum_clocks = {0};
2996 bool disable_mclk_switching;
2997 bool disable_mclk_switching_for_frame_lock;
2998 bool disable_mclk_switching_for_vr;
2999 bool force_mclk_high;
3000 struct cgs_display_info info = {0};
3001 const struct phm_clock_and_voltage_limits *max_limits;
3002 uint32_t i;
3003 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
3004 struct phm_ppt_v2_information *table_info =
3005 (struct phm_ppt_v2_information *)(hwmgr->pptable);
3006 int32_t count;
3007 uint32_t stable_pstate_sclk_dpm_percentage;
3008 uint32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0;
3009 uint32_t latency;
3010
3011 data->battery_state = (PP_StateUILabel_Battery ==
3012 request_ps->classification.ui_label);
3013
3014 if (vega10_ps->performance_level_count != 2)
3015 pr_info("VI should always have 2 performance levels");
3016
3017 max_limits = (PP_PowerSource_AC == hwmgr->power_source) ?
3018 &(hwmgr->dyn_state.max_clock_voltage_on_ac) :
3019 &(hwmgr->dyn_state.max_clock_voltage_on_dc);
3020
3021 /* Cap clock DPM tables at DC MAX if it is in DC. */
3022 if (PP_PowerSource_DC == hwmgr->power_source) {
3023 for (i = 0; i < vega10_ps->performance_level_count; i++) {
3024 if (vega10_ps->performance_levels[i].mem_clock >
3025 max_limits->mclk)
3026 vega10_ps->performance_levels[i].mem_clock =
3027 max_limits->mclk;
3028 if (vega10_ps->performance_levels[i].gfx_clock >
3029 max_limits->sclk)
3030 vega10_ps->performance_levels[i].gfx_clock =
3031 max_limits->sclk;
3032 }
3033 }
3034
3035 vega10_ps->vce_clks.evclk = hwmgr->vce_arbiter.evclk;
3036 vega10_ps->vce_clks.ecclk = hwmgr->vce_arbiter.ecclk;
3037
3038 cgs_get_active_displays_info(hwmgr->device, &info);
3039
3040 /* result = PHM_CheckVBlankTime(hwmgr, &vblankTooShort);*/
3041 minimum_clocks.engineClock = hwmgr->display_config.min_core_set_clock;
75f0e32b 3042 minimum_clocks.memoryClock = hwmgr->display_config.min_mem_set_clock;
f83a9991
EH
3043
3044 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3045 PHM_PlatformCaps_StablePState)) {
3046 PP_ASSERT_WITH_CODE(
3047 data->registry_data.stable_pstate_sclk_dpm_percentage >= 1 &&
3048 data->registry_data.stable_pstate_sclk_dpm_percentage <= 100,
3049 "percent sclk value must range from 1% to 100%, setting default value",
3050 stable_pstate_sclk_dpm_percentage = 75);
3051
3052 max_limits = &(hwmgr->dyn_state.max_clock_voltage_on_ac);
3053 stable_pstate_sclk = (max_limits->sclk *
3054 stable_pstate_sclk_dpm_percentage) / 100;
3055
3056 for (count = table_info->vdd_dep_on_sclk->count - 1;
3057 count >= 0; count--) {
3058 if (stable_pstate_sclk >=
3059 table_info->vdd_dep_on_sclk->entries[count].clk) {
3060 stable_pstate_sclk =
3061 table_info->vdd_dep_on_sclk->entries[count].clk;
3062 break;
3063 }
3064 }
3065
3066 if (count < 0)
3067 stable_pstate_sclk = table_info->vdd_dep_on_sclk->entries[0].clk;
3068
3069 stable_pstate_mclk = max_limits->mclk;
3070
3071 minimum_clocks.engineClock = stable_pstate_sclk;
3072 minimum_clocks.memoryClock = stable_pstate_mclk;
3073 }
3074
3075 if (minimum_clocks.engineClock < hwmgr->gfx_arbiter.sclk)
3076 minimum_clocks.engineClock = hwmgr->gfx_arbiter.sclk;
3077
3078 if (minimum_clocks.memoryClock < hwmgr->gfx_arbiter.mclk)
3079 minimum_clocks.memoryClock = hwmgr->gfx_arbiter.mclk;
3080
3081 vega10_ps->sclk_threshold = hwmgr->gfx_arbiter.sclk_threshold;
3082
3083 if (hwmgr->gfx_arbiter.sclk_over_drive) {
3084 PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.sclk_over_drive <=
3085 hwmgr->platform_descriptor.overdriveLimit.engineClock),
3086 "Overdrive sclk exceeds limit",
3087 hwmgr->gfx_arbiter.sclk_over_drive =
3088 hwmgr->platform_descriptor.overdriveLimit.engineClock);
3089
3090 if (hwmgr->gfx_arbiter.sclk_over_drive >= hwmgr->gfx_arbiter.sclk)
3091 vega10_ps->performance_levels[1].gfx_clock =
3092 hwmgr->gfx_arbiter.sclk_over_drive;
3093 }
3094
3095 if (hwmgr->gfx_arbiter.mclk_over_drive) {
3096 PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.mclk_over_drive <=
3097 hwmgr->platform_descriptor.overdriveLimit.memoryClock),
3098 "Overdrive mclk exceeds limit",
3099 hwmgr->gfx_arbiter.mclk_over_drive =
3100 hwmgr->platform_descriptor.overdriveLimit.memoryClock);
3101
3102 if (hwmgr->gfx_arbiter.mclk_over_drive >= hwmgr->gfx_arbiter.mclk)
3103 vega10_ps->performance_levels[1].mem_clock =
3104 hwmgr->gfx_arbiter.mclk_over_drive;
3105 }
3106
3107 disable_mclk_switching_for_frame_lock = phm_cap_enabled(
3108 hwmgr->platform_descriptor.platformCaps,
3109 PHM_PlatformCaps_DisableMclkSwitchingForFrameLock);
3110 disable_mclk_switching_for_vr = phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3111 PHM_PlatformCaps_DisableMclkSwitchForVR);
3112 force_mclk_high = phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3113 PHM_PlatformCaps_ForceMclkHigh);
3114
3115 disable_mclk_switching = (info.display_count > 1) ||
3116 disable_mclk_switching_for_frame_lock ||
3117 disable_mclk_switching_for_vr ||
3118 force_mclk_high;
3119
3120 sclk = vega10_ps->performance_levels[0].gfx_clock;
3121 mclk = vega10_ps->performance_levels[0].mem_clock;
3122
3123 if (sclk < minimum_clocks.engineClock)
3124 sclk = (minimum_clocks.engineClock > max_limits->sclk) ?
3125 max_limits->sclk : minimum_clocks.engineClock;
3126
3127 if (mclk < minimum_clocks.memoryClock)
3128 mclk = (minimum_clocks.memoryClock > max_limits->mclk) ?
3129 max_limits->mclk : minimum_clocks.memoryClock;
3130
3131 vega10_ps->performance_levels[0].gfx_clock = sclk;
3132 vega10_ps->performance_levels[0].mem_clock = mclk;
3133
d0856f3a
RZ
3134 if (vega10_ps->performance_levels[1].gfx_clock <
3135 vega10_ps->performance_levels[0].gfx_clock)
3136 vega10_ps->performance_levels[0].gfx_clock =
3137 vega10_ps->performance_levels[1].gfx_clock;
f83a9991
EH
3138
3139 if (disable_mclk_switching) {
3140 /* Set Mclk the max of level 0 and level 1 */
3141 if (mclk < vega10_ps->performance_levels[1].mem_clock)
3142 mclk = vega10_ps->performance_levels[1].mem_clock;
3143
3144 /* Find the lowest MCLK frequency that is within
3145 * the tolerable latency defined in DAL
3146 */
3147 latency = 0;
3148 for (i = 0; i < data->mclk_latency_table.count; i++) {
3149 if ((data->mclk_latency_table.entries[i].latency <= latency) &&
3150 (data->mclk_latency_table.entries[i].frequency >=
3151 vega10_ps->performance_levels[0].mem_clock) &&
3152 (data->mclk_latency_table.entries[i].frequency <=
3153 vega10_ps->performance_levels[1].mem_clock))
3154 mclk = data->mclk_latency_table.entries[i].frequency;
3155 }
3156 vega10_ps->performance_levels[0].mem_clock = mclk;
3157 } else {
3158 if (vega10_ps->performance_levels[1].mem_clock <
3159 vega10_ps->performance_levels[0].mem_clock)
d0856f3a
RZ
3160 vega10_ps->performance_levels[0].mem_clock =
3161 vega10_ps->performance_levels[1].mem_clock;
f83a9991
EH
3162 }
3163
3164 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3165 PHM_PlatformCaps_StablePState)) {
3166 for (i = 0; i < vega10_ps->performance_level_count; i++) {
3167 vega10_ps->performance_levels[i].gfx_clock = stable_pstate_sclk;
3168 vega10_ps->performance_levels[i].mem_clock = stable_pstate_mclk;
3169 }
3170 }
3171
3172 return 0;
3173}
3174
3175static int vega10_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, const void *input)
3176{
3177 const struct phm_set_power_state_input *states =
3178 (const struct phm_set_power_state_input *)input;
3179 const struct vega10_power_state *vega10_ps =
3180 cast_const_phw_vega10_power_state(states->pnew_state);
3181 struct vega10_hwmgr *data =
3182 (struct vega10_hwmgr *)(hwmgr->backend);
3183 struct vega10_single_dpm_table *sclk_table =
3184 &(data->dpm_table.gfx_table);
3185 uint32_t sclk = vega10_ps->performance_levels
3186 [vega10_ps->performance_level_count - 1].gfx_clock;
3187 struct vega10_single_dpm_table *mclk_table =
3188 &(data->dpm_table.mem_table);
3189 uint32_t mclk = vega10_ps->performance_levels
3190 [vega10_ps->performance_level_count - 1].mem_clock;
3191 struct PP_Clocks min_clocks = {0};
3192 uint32_t i;
3193 struct cgs_display_info info = {0};
3194
3195 data->need_update_dpm_table = 0;
3196
3197 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3198 PHM_PlatformCaps_ODNinACSupport) ||
3199 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3200 PHM_PlatformCaps_ODNinDCSupport)) {
3201 for (i = 0; i < sclk_table->count; i++) {
3202 if (sclk == sclk_table->dpm_levels[i].value)
3203 break;
3204 }
3205
3206 if (!(data->apply_overdrive_next_settings_mask &
3207 DPMTABLE_OD_UPDATE_SCLK) && i >= sclk_table->count) {
3208 /* Check SCLK in DAL's minimum clocks
3209 * in case DeepSleep divider update is required.
3210 */
3211 if (data->display_timing.min_clock_in_sr !=
3212 min_clocks.engineClockInSR &&
3213 (min_clocks.engineClockInSR >=
3214 VEGA10_MINIMUM_ENGINE_CLOCK ||
3215 data->display_timing.min_clock_in_sr >=
3216 VEGA10_MINIMUM_ENGINE_CLOCK))
3217 data->need_update_dpm_table |= DPMTABLE_UPDATE_SCLK;
3218 }
3219
3220 cgs_get_active_displays_info(hwmgr->device, &info);
3221
3222 if (data->display_timing.num_existing_displays !=
3223 info.display_count)
3224 data->need_update_dpm_table |= DPMTABLE_UPDATE_MCLK;
3225 } else {
3226 for (i = 0; i < sclk_table->count; i++) {
3227 if (sclk == sclk_table->dpm_levels[i].value)
3228 break;
3229 }
3230
3231 if (i >= sclk_table->count)
3232 data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
3233 else {
3234 /* Check SCLK in DAL's minimum clocks
3235 * in case DeepSleep divider update is required.
3236 */
3237 if (data->display_timing.min_clock_in_sr !=
3238 min_clocks.engineClockInSR &&
3239 (min_clocks.engineClockInSR >=
3240 VEGA10_MINIMUM_ENGINE_CLOCK ||
3241 data->display_timing.min_clock_in_sr >=
3242 VEGA10_MINIMUM_ENGINE_CLOCK))
3243 data->need_update_dpm_table |= DPMTABLE_UPDATE_SCLK;
3244 }
3245
3246 for (i = 0; i < mclk_table->count; i++) {
3247 if (mclk == mclk_table->dpm_levels[i].value)
3248 break;
3249 }
3250
3251 cgs_get_active_displays_info(hwmgr->device, &info);
3252
3253 if (i >= mclk_table->count)
3254 data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
3255
3256 if (data->display_timing.num_existing_displays !=
3257 info.display_count ||
3258 i >= mclk_table->count)
3259 data->need_update_dpm_table |= DPMTABLE_UPDATE_MCLK;
3260 }
3261 return 0;
3262}
3263
3264static int vega10_populate_and_upload_sclk_mclk_dpm_levels(
3265 struct pp_hwmgr *hwmgr, const void *input)
3266{
3267 int result = 0;
3268 const struct phm_set_power_state_input *states =
3269 (const struct phm_set_power_state_input *)input;
3270 const struct vega10_power_state *vega10_ps =
3271 cast_const_phw_vega10_power_state(states->pnew_state);
3272 struct vega10_hwmgr *data =
3273 (struct vega10_hwmgr *)(hwmgr->backend);
3274 uint32_t sclk = vega10_ps->performance_levels
3275 [vega10_ps->performance_level_count - 1].gfx_clock;
3276 uint32_t mclk = vega10_ps->performance_levels
3277 [vega10_ps->performance_level_count - 1].mem_clock;
3278 struct vega10_dpm_table *dpm_table = &data->dpm_table;
3279 struct vega10_dpm_table *golden_dpm_table =
3280 &data->golden_dpm_table;
3281 uint32_t dpm_count, clock_percent;
3282 uint32_t i;
3283
3284 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3285 PHM_PlatformCaps_ODNinACSupport) ||
3286 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3287 PHM_PlatformCaps_ODNinDCSupport)) {
3288
3289 if (!data->need_update_dpm_table &&
3290 !data->apply_optimized_settings &&
3291 !data->apply_overdrive_next_settings_mask)
3292 return 0;
3293
3294 if (data->apply_overdrive_next_settings_mask &
3295 DPMTABLE_OD_UPDATE_SCLK) {
3296 for (dpm_count = 0;
3297 dpm_count < dpm_table->gfx_table.count;
3298 dpm_count++) {
3299 dpm_table->gfx_table.dpm_levels[dpm_count].enabled =
3300 data->odn_dpm_table.odn_core_clock_dpm_levels.
3301 performance_level_entries[dpm_count].enabled;
3302 dpm_table->gfx_table.dpm_levels[dpm_count].value =
3303 data->odn_dpm_table.odn_core_clock_dpm_levels.
3304 performance_level_entries[dpm_count].clock;
3305 }
3306 }
3307
3308 if (data->apply_overdrive_next_settings_mask &
3309 DPMTABLE_OD_UPDATE_MCLK) {
3310 for (dpm_count = 0;
3311 dpm_count < dpm_table->mem_table.count;
3312 dpm_count++) {
3313 dpm_table->mem_table.dpm_levels[dpm_count].enabled =
3314 data->odn_dpm_table.odn_memory_clock_dpm_levels.
3315 performance_level_entries[dpm_count].enabled;
3316 dpm_table->mem_table.dpm_levels[dpm_count].value =
3317 data->odn_dpm_table.odn_memory_clock_dpm_levels.
3318 performance_level_entries[dpm_count].clock;
3319 }
3320 }
3321
3322 if ((data->need_update_dpm_table & DPMTABLE_UPDATE_SCLK) ||
3323 data->apply_optimized_settings ||
3324 (data->apply_overdrive_next_settings_mask &
3325 DPMTABLE_OD_UPDATE_SCLK)) {
3326 result = vega10_populate_all_graphic_levels(hwmgr);
3327 PP_ASSERT_WITH_CODE(!result,
3328 "Failed to populate SCLK during \
3329 PopulateNewDPMClocksStates Function!",
3330 return result);
3331 }
3332
3333 if ((data->need_update_dpm_table & DPMTABLE_UPDATE_MCLK) ||
3334 (data->apply_overdrive_next_settings_mask &
3335 DPMTABLE_OD_UPDATE_MCLK)){
3336 result = vega10_populate_all_memory_levels(hwmgr);
3337 PP_ASSERT_WITH_CODE(!result,
3338 "Failed to populate MCLK during \
3339 PopulateNewDPMClocksStates Function!",
3340 return result);
3341 }
3342 } else {
3343 if (!data->need_update_dpm_table &&
3344 !data->apply_optimized_settings)
3345 return 0;
3346
3347 if (data->need_update_dpm_table & DPMTABLE_OD_UPDATE_SCLK &&
3348 data->smu_features[GNLD_DPM_GFXCLK].supported) {
3349 dpm_table->
3350 gfx_table.dpm_levels[dpm_table->gfx_table.count - 1].
3351 value = sclk;
f83a9991
EH
3352 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3353 PHM_PlatformCaps_OD6PlusinACSupport) ||
3354 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3355 PHM_PlatformCaps_OD6PlusinDCSupport)) {
3356 /* Need to do calculation based on the golden DPM table
3357 * as the Heatmap GPU Clock axis is also based on
3358 * the default values
3359 */
3360 PP_ASSERT_WITH_CODE(
3361 golden_dpm_table->gfx_table.dpm_levels
3362 [golden_dpm_table->gfx_table.count - 1].value,
3363 "Divide by 0!",
3364 return -1);
3365
3366 dpm_count = dpm_table->gfx_table.count < 2 ?
3367 0 : dpm_table->gfx_table.count - 2;
3368 for (i = dpm_count; i > 1; i--) {
3369 if (sclk > golden_dpm_table->gfx_table.dpm_levels
3370 [golden_dpm_table->gfx_table.count - 1].value) {
3371 clock_percent =
3372 ((sclk - golden_dpm_table->gfx_table.dpm_levels
3373 [golden_dpm_table->gfx_table.count - 1].value) *
3374 100) /
3375 golden_dpm_table->gfx_table.dpm_levels
3376 [golden_dpm_table->gfx_table.count - 1].value;
3377
3378 dpm_table->gfx_table.dpm_levels[i].value =
3379 golden_dpm_table->gfx_table.dpm_levels[i].value +
3380 (golden_dpm_table->gfx_table.dpm_levels[i].value *
3381 clock_percent) / 100;
3382 } else if (golden_dpm_table->
3383 gfx_table.dpm_levels[dpm_table->gfx_table.count-1].value >
3384 sclk) {
3385 clock_percent =
3386 ((golden_dpm_table->gfx_table.dpm_levels
3387 [golden_dpm_table->gfx_table.count - 1].value -
3388 sclk) * 100) /
3389 golden_dpm_table->gfx_table.dpm_levels
3390 [golden_dpm_table->gfx_table.count-1].value;
3391
3392 dpm_table->gfx_table.dpm_levels[i].value =
3393 golden_dpm_table->gfx_table.dpm_levels[i].value -
3394 (golden_dpm_table->gfx_table.dpm_levels[i].value *
3395 clock_percent) / 100;
3396 } else
3397 dpm_table->gfx_table.dpm_levels[i].value =
3398 golden_dpm_table->gfx_table.dpm_levels[i].value;
3399 }
3400 }
3401 }
3402
3403 if (data->need_update_dpm_table & DPMTABLE_OD_UPDATE_MCLK &&
3404 data->smu_features[GNLD_DPM_UCLK].supported) {
3405 dpm_table->
3406 mem_table.dpm_levels[dpm_table->mem_table.count - 1].
3407 value = mclk;
3408
3409 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3410 PHM_PlatformCaps_OD6PlusinACSupport) ||
3411 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3412 PHM_PlatformCaps_OD6PlusinDCSupport)) {
3413
3414 PP_ASSERT_WITH_CODE(
3415 golden_dpm_table->mem_table.dpm_levels
3416 [golden_dpm_table->mem_table.count - 1].value,
3417 "Divide by 0!",
3418 return -1);
3419
3420 dpm_count = dpm_table->mem_table.count < 2 ?
3421 0 : dpm_table->mem_table.count - 2;
3422 for (i = dpm_count; i > 1; i--) {
3423 if (mclk > golden_dpm_table->mem_table.dpm_levels
3424 [golden_dpm_table->mem_table.count-1].value) {
3425 clock_percent = ((mclk -
3426 golden_dpm_table->mem_table.dpm_levels
3427 [golden_dpm_table->mem_table.count-1].value) *
3428 100) /
3429 golden_dpm_table->mem_table.dpm_levels
3430 [golden_dpm_table->mem_table.count-1].value;
3431
3432 dpm_table->mem_table.dpm_levels[i].value =
3433 golden_dpm_table->mem_table.dpm_levels[i].value +
3434 (golden_dpm_table->mem_table.dpm_levels[i].value *
3435 clock_percent) / 100;
3436 } else if (golden_dpm_table->mem_table.dpm_levels
3437 [dpm_table->mem_table.count-1].value > mclk) {
3438 clock_percent = ((golden_dpm_table->mem_table.dpm_levels
3439 [golden_dpm_table->mem_table.count-1].value - mclk) *
3440 100) /
3441 golden_dpm_table->mem_table.dpm_levels
3442 [golden_dpm_table->mem_table.count-1].value;
3443
3444 dpm_table->mem_table.dpm_levels[i].value =
3445 golden_dpm_table->mem_table.dpm_levels[i].value -
3446 (golden_dpm_table->mem_table.dpm_levels[i].value *
3447 clock_percent) / 100;
3448 } else
3449 dpm_table->mem_table.dpm_levels[i].value =
3450 golden_dpm_table->mem_table.dpm_levels[i].value;
3451 }
3452 }
3453 }
3454
3455 if ((data->need_update_dpm_table &
3456 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK)) ||
3457 data->apply_optimized_settings) {
3458 result = vega10_populate_all_graphic_levels(hwmgr);
3459 PP_ASSERT_WITH_CODE(!result,
3460 "Failed to populate SCLK during \
3461 PopulateNewDPMClocksStates Function!",
3462 return result);
3463 }
3464
3465 if (data->need_update_dpm_table &
3466 (DPMTABLE_OD_UPDATE_MCLK + DPMTABLE_UPDATE_MCLK)) {
3467 result = vega10_populate_all_memory_levels(hwmgr);
3468 PP_ASSERT_WITH_CODE(!result,
3469 "Failed to populate MCLK during \
3470 PopulateNewDPMClocksStates Function!",
3471 return result);
3472 }
3473 }
f83a9991
EH
3474 return result;
3475}
3476
3477static int vega10_trim_single_dpm_states(struct pp_hwmgr *hwmgr,
3478 struct vega10_single_dpm_table *dpm_table,
3479 uint32_t low_limit, uint32_t high_limit)
3480{
3481 uint32_t i;
3482
3483 for (i = 0; i < dpm_table->count; i++) {
3484 if ((dpm_table->dpm_levels[i].value < low_limit) ||
3485 (dpm_table->dpm_levels[i].value > high_limit))
3486 dpm_table->dpm_levels[i].enabled = false;
3487 else
3488 dpm_table->dpm_levels[i].enabled = true;
3489 }
3490 return 0;
3491}
3492
3493static int vega10_trim_single_dpm_states_with_mask(struct pp_hwmgr *hwmgr,
3494 struct vega10_single_dpm_table *dpm_table,
3495 uint32_t low_limit, uint32_t high_limit,
3496 uint32_t disable_dpm_mask)
3497{
3498 uint32_t i;
3499
3500 for (i = 0; i < dpm_table->count; i++) {
3501 if ((dpm_table->dpm_levels[i].value < low_limit) ||
3502 (dpm_table->dpm_levels[i].value > high_limit))
3503 dpm_table->dpm_levels[i].enabled = false;
3504 else if (!((1 << i) & disable_dpm_mask))
3505 dpm_table->dpm_levels[i].enabled = false;
3506 else
3507 dpm_table->dpm_levels[i].enabled = true;
3508 }
3509 return 0;
3510}
3511
3512static int vega10_trim_dpm_states(struct pp_hwmgr *hwmgr,
3513 const struct vega10_power_state *vega10_ps)
3514{
3515 struct vega10_hwmgr *data =
3516 (struct vega10_hwmgr *)(hwmgr->backend);
3517 uint32_t high_limit_count;
3518
3519 PP_ASSERT_WITH_CODE((vega10_ps->performance_level_count >= 1),
3520 "power state did not have any performance level",
3521 return -1);
3522
3523 high_limit_count = (vega10_ps->performance_level_count == 1) ? 0 : 1;
3524
3525 vega10_trim_single_dpm_states(hwmgr,
3526 &(data->dpm_table.soc_table),
3527 vega10_ps->performance_levels[0].soc_clock,
3528 vega10_ps->performance_levels[high_limit_count].soc_clock);
3529
3530 vega10_trim_single_dpm_states_with_mask(hwmgr,
3531 &(data->dpm_table.gfx_table),
3532 vega10_ps->performance_levels[0].gfx_clock,
3533 vega10_ps->performance_levels[high_limit_count].gfx_clock,
3534 data->disable_dpm_mask);
3535
3536 vega10_trim_single_dpm_states(hwmgr,
3537 &(data->dpm_table.mem_table),
3538 vega10_ps->performance_levels[0].mem_clock,
3539 vega10_ps->performance_levels[high_limit_count].mem_clock);
3540
3541 return 0;
3542}
3543
3544static uint32_t vega10_find_lowest_dpm_level(
3545 struct vega10_single_dpm_table *table)
3546{
3547 uint32_t i;
3548
3549 for (i = 0; i < table->count; i++) {
3550 if (table->dpm_levels[i].enabled)
3551 break;
3552 }
3553
3554 return i;
3555}
3556
3557static uint32_t vega10_find_highest_dpm_level(
3558 struct vega10_single_dpm_table *table)
3559{
3560 uint32_t i = 0;
3561
3562 if (table->count <= MAX_REGULAR_DPM_NUMBER) {
3563 for (i = table->count; i > 0; i--) {
3564 if (table->dpm_levels[i - 1].enabled)
3565 return i - 1;
3566 }
3567 } else {
3568 pr_info("DPM Table Has Too Many Entries!");
3569 return MAX_REGULAR_DPM_NUMBER - 1;
3570 }
3571
3572 return i;
3573}
3574
3575static void vega10_apply_dal_minimum_voltage_request(
3576 struct pp_hwmgr *hwmgr)
3577{
3578 return;
3579}
3580
3581static int vega10_upload_dpm_bootup_level(struct pp_hwmgr *hwmgr)
3582{
3583 struct vega10_hwmgr *data =
3584 (struct vega10_hwmgr *)(hwmgr->backend);
3585
3586 vega10_apply_dal_minimum_voltage_request(hwmgr);
3587
3588 if (!data->registry_data.sclk_dpm_key_disabled) {
3589 if (data->smc_state_table.gfx_boot_level !=
3590 data->dpm_table.gfx_table.dpm_state.soft_min_level) {
3591 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter(
3592 hwmgr->smumgr,
3593 PPSMC_MSG_SetSoftMinGfxclkByIndex,
3594 data->smc_state_table.gfx_boot_level),
3595 "Failed to set soft min sclk index!",
3596 return -EINVAL);
3597 data->dpm_table.gfx_table.dpm_state.soft_min_level =
3598 data->smc_state_table.gfx_boot_level;
3599 }
3600 }
3601
3602 if (!data->registry_data.mclk_dpm_key_disabled) {
3603 if (data->smc_state_table.mem_boot_level !=
3604 data->dpm_table.mem_table.dpm_state.soft_min_level) {
3605 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter(
3606 hwmgr->smumgr,
3607 PPSMC_MSG_SetSoftMinUclkByIndex,
3608 data->smc_state_table.mem_boot_level),
3609 "Failed to set soft min mclk index!",
3610 return -EINVAL);
3611
3612 data->dpm_table.mem_table.dpm_state.soft_min_level =
3613 data->smc_state_table.mem_boot_level;
3614 }
3615 }
3616
3617 return 0;
3618}
3619
3620static int vega10_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
3621{
3622 struct vega10_hwmgr *data =
3623 (struct vega10_hwmgr *)(hwmgr->backend);
3624
3625 vega10_apply_dal_minimum_voltage_request(hwmgr);
3626
3627 if (!data->registry_data.sclk_dpm_key_disabled) {
3628 if (data->smc_state_table.gfx_max_level !=
3629 data->dpm_table.gfx_table.dpm_state.soft_max_level) {
3630 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter(
3631 hwmgr->smumgr,
3632 PPSMC_MSG_SetSoftMaxGfxclkByIndex,
3633 data->smc_state_table.gfx_max_level),
3634 "Failed to set soft max sclk index!",
3635 return -EINVAL);
3636 data->dpm_table.gfx_table.dpm_state.soft_max_level =
3637 data->smc_state_table.gfx_max_level;
3638 }
3639 }
3640
3641 if (!data->registry_data.mclk_dpm_key_disabled) {
3642 if (data->smc_state_table.mem_max_level !=
3643 data->dpm_table.mem_table.dpm_state.soft_max_level) {
3644 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter(
3645 hwmgr->smumgr,
3646 PPSMC_MSG_SetSoftMaxUclkByIndex,
3647 data->smc_state_table.mem_max_level),
3648 "Failed to set soft max mclk index!",
3649 return -EINVAL);
3650 data->dpm_table.mem_table.dpm_state.soft_max_level =
3651 data->smc_state_table.mem_max_level;
3652 }
3653 }
3654
3655 return 0;
3656}
3657
3658static int vega10_generate_dpm_level_enable_mask(
3659 struct pp_hwmgr *hwmgr, const void *input)
3660{
3661 struct vega10_hwmgr *data =
3662 (struct vega10_hwmgr *)(hwmgr->backend);
3663 const struct phm_set_power_state_input *states =
3664 (const struct phm_set_power_state_input *)input;
3665 const struct vega10_power_state *vega10_ps =
3666 cast_const_phw_vega10_power_state(states->pnew_state);
3667 int i;
3668
3669 PP_ASSERT_WITH_CODE(!vega10_trim_dpm_states(hwmgr, vega10_ps),
3670 "Attempt to Trim DPM States Failed!",
3671 return -1);
3672
3673 data->smc_state_table.gfx_boot_level =
3674 vega10_find_lowest_dpm_level(&(data->dpm_table.gfx_table));
3675 data->smc_state_table.gfx_max_level =
3676 vega10_find_highest_dpm_level(&(data->dpm_table.gfx_table));
3677 data->smc_state_table.mem_boot_level =
3678 vega10_find_lowest_dpm_level(&(data->dpm_table.mem_table));
3679 data->smc_state_table.mem_max_level =
3680 vega10_find_highest_dpm_level(&(data->dpm_table.mem_table));
3681
3682 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr),
3683 "Attempt to upload DPM Bootup Levels Failed!",
3684 return -1);
3685 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr),
3686 "Attempt to upload DPM Max Levels Failed!",
3687 return -1);
3688 for(i = data->smc_state_table.gfx_boot_level; i < data->smc_state_table.gfx_max_level; i++)
3689 data->dpm_table.gfx_table.dpm_levels[i].enabled = true;
3690
3691
3692 for(i = data->smc_state_table.mem_boot_level; i < data->smc_state_table.mem_max_level; i++)
3693 data->dpm_table.mem_table.dpm_levels[i].enabled = true;
3694
3695 return 0;
3696}
3697
3698int vega10_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
3699{
3700 struct vega10_hwmgr *data =
3701 (struct vega10_hwmgr *)(hwmgr->backend);
3702
3703 if (data->smu_features[GNLD_DPM_VCE].supported) {
3704 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
3705 enable,
3706 data->smu_features[GNLD_DPM_VCE].smu_feature_bitmap),
3707 "Attempt to Enable/Disable DPM VCE Failed!",
3708 return -1);
3709 data->smu_features[GNLD_DPM_VCE].enabled = enable;
3710 }
3711
3712 return 0;
3713}
3714
3715static int vega10_update_sclk_threshold(struct pp_hwmgr *hwmgr)
3716{
3717 struct vega10_hwmgr *data =
3718 (struct vega10_hwmgr *)(hwmgr->backend);
3719 int result = 0;
3720 uint32_t low_sclk_interrupt_threshold = 0;
3721
3722 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3723 PHM_PlatformCaps_SclkThrottleLowNotification)
3724 && (hwmgr->gfx_arbiter.sclk_threshold !=
3725 data->low_sclk_interrupt_threshold)) {
3726 data->low_sclk_interrupt_threshold =
3727 hwmgr->gfx_arbiter.sclk_threshold;
3728 low_sclk_interrupt_threshold =
3729 data->low_sclk_interrupt_threshold;
3730
3731 data->smc_state_table.pp_table.LowGfxclkInterruptThreshold =
3732 cpu_to_le32(low_sclk_interrupt_threshold);
3733
3734 /* This message will also enable SmcToHost Interrupt */
3735 result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
3736 PPSMC_MSG_SetLowGfxclkInterruptThreshold,
3737 (uint32_t)low_sclk_interrupt_threshold);
3738 }
3739
3740 return result;
3741}
3742
3743static int vega10_set_power_state_tasks(struct pp_hwmgr *hwmgr,
3744 const void *input)
3745{
3746 int tmp_result, result = 0;
3747 struct vega10_hwmgr *data =
3748 (struct vega10_hwmgr *)(hwmgr->backend);
3749 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
3750
3751 tmp_result = vega10_find_dpm_states_clocks_in_dpm_table(hwmgr, input);
3752 PP_ASSERT_WITH_CODE(!tmp_result,
3753 "Failed to find DPM states clocks in DPM table!",
3754 result = tmp_result);
3755
3756 tmp_result = vega10_populate_and_upload_sclk_mclk_dpm_levels(hwmgr, input);
3757 PP_ASSERT_WITH_CODE(!tmp_result,
3758 "Failed to populate and upload SCLK MCLK DPM levels!",
3759 result = tmp_result);
3760
3761 tmp_result = vega10_generate_dpm_level_enable_mask(hwmgr, input);
3762 PP_ASSERT_WITH_CODE(!tmp_result,
3763 "Failed to generate DPM level enabled mask!",
3764 result = tmp_result);
3765
3766 tmp_result = vega10_update_sclk_threshold(hwmgr);
3767 PP_ASSERT_WITH_CODE(!tmp_result,
3768 "Failed to update SCLK threshold!",
3769 result = tmp_result);
3770
3771 result = vega10_copy_table_to_smc(hwmgr->smumgr,
3772 (uint8_t *)pp_table, PPTABLE);
3773 PP_ASSERT_WITH_CODE(!result,
3774 "Failed to upload PPtable!", return result);
3775
3776 data->apply_optimized_settings = false;
3777 data->apply_overdrive_next_settings_mask = 0;
3778
3779 return 0;
3780}
3781
3782static int vega10_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
3783{
3784 struct pp_power_state *ps;
3785 struct vega10_power_state *vega10_ps;
3786
3787 if (hwmgr == NULL)
3788 return -EINVAL;
3789
3790 ps = hwmgr->request_ps;
3791
3792 if (ps == NULL)
3793 return -EINVAL;
3794
3795 vega10_ps = cast_phw_vega10_power_state(&ps->hardware);
3796
3797 if (low)
3798 return vega10_ps->performance_levels[0].gfx_clock;
3799 else
3800 return vega10_ps->performance_levels
3801 [vega10_ps->performance_level_count - 1].gfx_clock;
3802}
3803
3804static int vega10_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
3805{
3806 struct pp_power_state *ps;
3807 struct vega10_power_state *vega10_ps;
3808
3809 if (hwmgr == NULL)
3810 return -EINVAL;
3811
3812 ps = hwmgr->request_ps;
3813
3814 if (ps == NULL)
3815 return -EINVAL;
3816
3817 vega10_ps = cast_phw_vega10_power_state(&ps->hardware);
3818
3819 if (low)
3820 return vega10_ps->performance_levels[0].mem_clock;
3821 else
3822 return vega10_ps->performance_levels
3823 [vega10_ps->performance_level_count-1].mem_clock;
3824}
3825
17d176a5
EH
3826static int vega10_get_gpu_power(struct pp_hwmgr *hwmgr,
3827 struct pp_gpu_power *query)
3828{
6b5defd6
EH
3829 uint32_t value;
3830
17d176a5
EH
3831 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr->smumgr,
3832 PPSMC_MSG_GetCurrPkgPwr),
3833 "Failed to get current package power!",
3834 return -EINVAL);
3835
6b5defd6
EH
3836 vega10_read_arg_from_smc(hwmgr->smumgr, &value);
3837 /* power value is an integer */
3838 query->average_gpu_power = value << 8;
3839
3840 return 0;
17d176a5
EH
3841}
3842
f83a9991
EH
3843static int vega10_read_sensor(struct pp_hwmgr *hwmgr, int idx,
3844 void *value, int *size)
3845{
3846 uint32_t sclk_idx, mclk_idx, activity_percent = 0;
3847 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
3848 struct vega10_dpm_table *dpm_table = &data->dpm_table;
3849 int ret = 0;
3850
3851 switch (idx) {
3852 case AMDGPU_PP_SENSOR_GFX_SCLK:
3853 ret = smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_GetCurrentGfxclkIndex);
3854 if (!ret) {
3855 vega10_read_arg_from_smc(hwmgr->smumgr, &sclk_idx);
3856 *((uint32_t *)value) = dpm_table->gfx_table.dpm_levels[sclk_idx].value;
3857 *size = 4;
3858 }
3859 break;
3860 case AMDGPU_PP_SENSOR_GFX_MCLK:
3861 ret = smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_GetCurrentUclkIndex);
3862 if (!ret) {
3863 vega10_read_arg_from_smc(hwmgr->smumgr, &mclk_idx);
3864 *((uint32_t *)value) = dpm_table->mem_table.dpm_levels[mclk_idx].value;
3865 *size = 4;
3866 }
3867 break;
3868 case AMDGPU_PP_SENSOR_GPU_LOAD:
3869 ret = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_GetAverageGfxActivity, 0);
3870 if (!ret) {
3871 vega10_read_arg_from_smc(hwmgr->smumgr, &activity_percent);
3872 *((uint32_t *)value) = activity_percent > 100 ? 100 : activity_percent;
3873 *size = 4;
3874 }
3875 break;
3876 case AMDGPU_PP_SENSOR_GPU_TEMP:
3877 *((uint32_t *)value) = vega10_thermal_get_temperature(hwmgr);
3878 *size = 4;
3879 break;
3880 case AMDGPU_PP_SENSOR_UVD_POWER:
3881 *((uint32_t *)value) = data->uvd_power_gated ? 0 : 1;
3882 *size = 4;
3883 break;
3884 case AMDGPU_PP_SENSOR_VCE_POWER:
3885 *((uint32_t *)value) = data->vce_power_gated ? 0 : 1;
3886 *size = 4;
3887 break;
17d176a5
EH
3888 case AMDGPU_PP_SENSOR_GPU_POWER:
3889 if (*size < sizeof(struct pp_gpu_power))
3890 ret = -EINVAL;
3891 else {
3892 *size = sizeof(struct pp_gpu_power);
3893 ret = vega10_get_gpu_power(hwmgr, (struct pp_gpu_power *)value);
3894 }
3895 break;
f83a9991
EH
3896 default:
3897 ret = -EINVAL;
3898 break;
3899 }
3900 return ret;
3901}
3902
3903static int vega10_notify_smc_display_change(struct pp_hwmgr *hwmgr,
3904 bool has_disp)
3905{
3906 return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
3907 PPSMC_MSG_SetUclkFastSwitch,
3908 has_disp ? 0 : 1);
3909}
3910
3911int vega10_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
3912 struct pp_display_clock_request *clock_req)
3913{
3914 int result = 0;
3915 enum amd_pp_clock_type clk_type = clock_req->clock_type;
75f0e32b 3916 uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000;
f83a9991
EH
3917 DSPCLK_e clk_select = 0;
3918 uint32_t clk_request = 0;
3919
3920 switch (clk_type) {
3921 case amd_pp_dcef_clock:
3922 clk_select = DSPCLK_DCEFCLK;
3923 break;
3924 case amd_pp_disp_clock:
3925 clk_select = DSPCLK_DISPCLK;
3926 break;
3927 case amd_pp_pixel_clock:
3928 clk_select = DSPCLK_PIXCLK;
3929 break;
3930 case amd_pp_phy_clock:
3931 clk_select = DSPCLK_PHYCLK;
3932 break;
3933 default:
3934 pr_info("[DisplayClockVoltageRequest]Invalid Clock Type!");
3935 result = -1;
3936 break;
3937 }
3938
3939 if (!result) {
3940 clk_request = (clk_freq << 16) | clk_select;
3941 result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
3942 PPSMC_MSG_RequestDisplayClockByFreq,
3943 clk_request);
3944 }
3945
3946 return result;
3947}
3948
75f0e32b
RZ
3949static uint8_t vega10_get_uclk_index(struct pp_hwmgr *hwmgr,
3950 struct phm_ppt_v1_clock_voltage_dependency_table *mclk_table,
3951 uint32_t frequency)
3952{
3953 uint8_t count;
3954 uint8_t i;
3955
3956 if (mclk_table == NULL || mclk_table->count == 0)
3957 return 0;
3958
3959 count = (uint8_t)(mclk_table->count);
3960
3961 for(i = 0; i < count; i++) {
3962 if(mclk_table->entries[i].clk >= frequency)
3963 return i;
3964 }
3965
3966 return i-1;
3967}
3968
f83a9991
EH
3969static int vega10_notify_smc_display_config_after_ps_adjustment(
3970 struct pp_hwmgr *hwmgr)
3971{
3972 struct vega10_hwmgr *data =
3973 (struct vega10_hwmgr *)(hwmgr->backend);
3974 struct vega10_single_dpm_table *dpm_table =
3975 &data->dpm_table.dcef_table;
75f0e32b
RZ
3976 struct phm_ppt_v2_information *table_info =
3977 (struct phm_ppt_v2_information *)hwmgr->pptable;
3978 struct phm_ppt_v1_clock_voltage_dependency_table *mclk_table = table_info->vdd_dep_on_mclk;
3979 uint32_t idx;
f83a9991
EH
3980 uint32_t num_active_disps = 0;
3981 struct cgs_display_info info = {0};
3982 struct PP_Clocks min_clocks = {0};
3983 uint32_t i;
3984 struct pp_display_clock_request clock_req;
3985
3986 info.mode_info = NULL;
3987
3988 cgs_get_active_displays_info(hwmgr->device, &info);
3989
3990 num_active_disps = info.display_count;
3991
3992 if (num_active_disps > 1)
3993 vega10_notify_smc_display_change(hwmgr, false);
3994 else
3995 vega10_notify_smc_display_change(hwmgr, true);
3996
3997 min_clocks.dcefClock = hwmgr->display_config.min_dcef_set_clk;
3998 min_clocks.dcefClockInSR = hwmgr->display_config.min_dcef_deep_sleep_set_clk;
75f0e32b 3999 min_clocks.memoryClock = hwmgr->display_config.min_mem_set_clock;
f83a9991
EH
4000
4001 for (i = 0; i < dpm_table->count; i++) {
4002 if (dpm_table->dpm_levels[i].value == min_clocks.dcefClock)
4003 break;
4004 }
4005
4006 if (i < dpm_table->count) {
4007 clock_req.clock_type = amd_pp_dcef_clock;
4008 clock_req.clock_freq_in_khz = dpm_table->dpm_levels[i].value;
4009 if (!vega10_display_clock_voltage_request(hwmgr, &clock_req)) {
4010 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter(
4011 hwmgr->smumgr, PPSMC_MSG_SetMinDeepSleepDcefclk,
75f0e32b 4012 min_clocks.dcefClockInSR /100),
f83a9991 4013 "Attempt to set divider for DCEFCLK Failed!",);
75f0e32b 4014 } else {
f83a9991 4015 pr_info("Attempt to set Hard Min for DCEFCLK Failed!");
75f0e32b
RZ
4016 }
4017 } else {
f83a9991 4018 pr_info("Cannot find requested DCEFCLK!");
75f0e32b
RZ
4019 }
4020
4021 if (min_clocks.memoryClock != 0) {
4022 idx = vega10_get_uclk_index(hwmgr, mclk_table, min_clocks.memoryClock);
4023 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_SetSoftMinUclkByIndex, idx);
4024 data->dpm_table.mem_table.dpm_state.soft_min_level= idx;
4025 }
f83a9991
EH
4026
4027 return 0;
4028}
4029
4030static int vega10_force_dpm_highest(struct pp_hwmgr *hwmgr)
4031{
4032 struct vega10_hwmgr *data =
4033 (struct vega10_hwmgr *)(hwmgr->backend);
4034
4035 data->smc_state_table.gfx_boot_level =
4036 data->smc_state_table.gfx_max_level =
4037 vega10_find_highest_dpm_level(&(data->dpm_table.gfx_table));
4038 data->smc_state_table.mem_boot_level =
4039 data->smc_state_table.mem_max_level =
4040 vega10_find_highest_dpm_level(&(data->dpm_table.mem_table));
4041
4042 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr),
4043 "Failed to upload boot level to highest!",
4044 return -1);
4045
4046 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr),
4047 "Failed to upload dpm max level to highest!",
4048 return -1);
4049
4050 return 0;
4051}
4052
4053static int vega10_force_dpm_lowest(struct pp_hwmgr *hwmgr)
4054{
4055 struct vega10_hwmgr *data =
4056 (struct vega10_hwmgr *)(hwmgr->backend);
4057
4058 data->smc_state_table.gfx_boot_level =
4059 data->smc_state_table.gfx_max_level =
4060 vega10_find_lowest_dpm_level(&(data->dpm_table.gfx_table));
4061 data->smc_state_table.mem_boot_level =
4062 data->smc_state_table.mem_max_level =
4063 vega10_find_lowest_dpm_level(&(data->dpm_table.mem_table));
4064
4065 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr),
4066 "Failed to upload boot level to highest!",
4067 return -1);
4068
4069 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr),
4070 "Failed to upload dpm max level to highest!",
4071 return -1);
4072
4073 return 0;
4074
4075}
4076
4077static int vega10_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
4078{
4079 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
4080
4081 data->smc_state_table.gfx_boot_level =
4082 vega10_find_lowest_dpm_level(&(data->dpm_table.gfx_table));
4083 data->smc_state_table.gfx_max_level =
4084 vega10_find_highest_dpm_level(&(data->dpm_table.gfx_table));
4085 data->smc_state_table.mem_boot_level =
4086 vega10_find_lowest_dpm_level(&(data->dpm_table.mem_table));
4087 data->smc_state_table.mem_max_level =
4088 vega10_find_highest_dpm_level(&(data->dpm_table.mem_table));
4089
4090 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr),
4091 "Failed to upload DPM Bootup Levels!",
4092 return -1);
4093
4094 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr),
4095 "Failed to upload DPM Max Levels!",
4096 return -1);
4097 return 0;
4098}
4099
4100static int vega10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
4101 enum amd_dpm_forced_level level)
4102{
4103 int ret = 0;
4104
4105 switch (level) {
4106 case AMD_DPM_FORCED_LEVEL_HIGH:
4107 ret = vega10_force_dpm_highest(hwmgr);
4108 if (ret)
4109 return ret;
4110 break;
4111 case AMD_DPM_FORCED_LEVEL_LOW:
4112 ret = vega10_force_dpm_lowest(hwmgr);
4113 if (ret)
4114 return ret;
4115 break;
4116 case AMD_DPM_FORCED_LEVEL_AUTO:
4117 ret = vega10_unforce_dpm_levels(hwmgr);
4118 if (ret)
4119 return ret;
4120 break;
4121 default:
4122 break;
4123 }
4124
4125 hwmgr->dpm_level = level;
4126
4127 return ret;
4128}
4129
4130static int vega10_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
4131{
7522ffc4 4132 int result = 0;
f83a9991 4133
7522ffc4
RZ
4134 switch (mode) {
4135 case AMD_FAN_CTRL_NONE:
4136 result = vega10_fan_ctrl_set_fan_speed_percent(hwmgr, 100);
4137 break;
4138 case AMD_FAN_CTRL_MANUAL:
4139 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
4140 PHM_PlatformCaps_MicrocodeFanControl))
4141 result = vega10_fan_ctrl_stop_smc_fan_control(hwmgr);
4142 break;
4143 case AMD_FAN_CTRL_AUTO:
4144 result = vega10_fan_ctrl_set_static_mode(hwmgr, mode);
4145 if (!result)
4146 result = vega10_fan_ctrl_start_smc_fan_control(hwmgr);
4147 break;
4148 default:
4149 break;
4150 }
4151 return result;
f83a9991
EH
4152}
4153
4154static int vega10_get_fan_control_mode(struct pp_hwmgr *hwmgr)
4155{
7522ffc4 4156 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
f83a9991 4157
7522ffc4
RZ
4158 if (data->smu_features[GNLD_FAN_CONTROL].enabled == false)
4159 return AMD_FAN_CTRL_MANUAL;
4160 else
4161 return AMD_FAN_CTRL_AUTO;
f83a9991
EH
4162}
4163
4164static int vega10_get_dal_power_level(struct pp_hwmgr *hwmgr,
4165 struct amd_pp_simple_clock_info *info)
4166{
4167 struct phm_ppt_v2_information *table_info =
4168 (struct phm_ppt_v2_information *)hwmgr->pptable;
4169 struct phm_clock_and_voltage_limits *max_limits =
4170 &table_info->max_clock_voltage_on_ac;
4171
4172 info->engine_max_clock = max_limits->sclk;
4173 info->memory_max_clock = max_limits->mclk;
4174
4175 return 0;
4176}
4177
4178static void vega10_get_sclks(struct pp_hwmgr *hwmgr,
4179 struct pp_clock_levels_with_latency *clocks)
4180{
4181 struct phm_ppt_v2_information *table_info =
4182 (struct phm_ppt_v2_information *)hwmgr->pptable;
4183 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table =
4184 table_info->vdd_dep_on_sclk;
4185 uint32_t i;
4186
4187 for (i = 0; i < dep_table->count; i++) {
4188 if (dep_table->entries[i].clk) {
4189 clocks->data[clocks->num_levels].clocks_in_khz =
4190 dep_table->entries[i].clk;
4191 clocks->num_levels++;
4192 }
4193 }
4194
4195}
4196
4197static uint32_t vega10_get_mem_latency(struct pp_hwmgr *hwmgr,
4198 uint32_t clock)
4199{
4200 if (clock >= MEM_FREQ_LOW_LATENCY &&
4201 clock < MEM_FREQ_HIGH_LATENCY)
4202 return MEM_LATENCY_HIGH;
4203 else if (clock >= MEM_FREQ_HIGH_LATENCY)
4204 return MEM_LATENCY_LOW;
4205 else
4206 return MEM_LATENCY_ERR;
4207}
4208
4209static void vega10_get_memclocks(struct pp_hwmgr *hwmgr,
4210 struct pp_clock_levels_with_latency *clocks)
4211{
4212 struct phm_ppt_v2_information *table_info =
4213 (struct phm_ppt_v2_information *)hwmgr->pptable;
4214 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table =
4215 table_info->vdd_dep_on_mclk;
4216 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
4217 uint32_t i;
4218
4219 clocks->num_levels = 0;
4220 data->mclk_latency_table.count = 0;
4221
4222 for (i = 0; i < dep_table->count; i++) {
4223 if (dep_table->entries[i].clk) {
4224 clocks->data[clocks->num_levels].clocks_in_khz =
4225 data->mclk_latency_table.entries
4226 [data->mclk_latency_table.count].frequency =
4227 dep_table->entries[i].clk;
4228 clocks->data[clocks->num_levels].latency_in_us =
4229 data->mclk_latency_table.entries
4230 [data->mclk_latency_table.count].latency =
4231 vega10_get_mem_latency(hwmgr,
4232 dep_table->entries[i].clk);
4233 clocks->num_levels++;
4234 data->mclk_latency_table.count++;
4235 }
4236 }
4237}
4238
4239static void vega10_get_dcefclocks(struct pp_hwmgr *hwmgr,
4240 struct pp_clock_levels_with_latency *clocks)
4241{
4242 struct phm_ppt_v2_information *table_info =
4243 (struct phm_ppt_v2_information *)hwmgr->pptable;
4244 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table =
4245 table_info->vdd_dep_on_dcefclk;
4246 uint32_t i;
4247
4248 for (i = 0; i < dep_table->count; i++) {
4249 clocks->data[i].clocks_in_khz = dep_table->entries[i].clk;
4250 clocks->data[i].latency_in_us = 0;
4251 clocks->num_levels++;
4252 }
4253}
4254
4255static void vega10_get_socclocks(struct pp_hwmgr *hwmgr,
4256 struct pp_clock_levels_with_latency *clocks)
4257{
4258 struct phm_ppt_v2_information *table_info =
4259 (struct phm_ppt_v2_information *)hwmgr->pptable;
4260 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table =
4261 table_info->vdd_dep_on_socclk;
4262 uint32_t i;
4263
4264 for (i = 0; i < dep_table->count; i++) {
4265 clocks->data[i].clocks_in_khz = dep_table->entries[i].clk;
4266 clocks->data[i].latency_in_us = 0;
4267 clocks->num_levels++;
4268 }
4269}
4270
4271static int vega10_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr,
4272 enum amd_pp_clock_type type,
4273 struct pp_clock_levels_with_latency *clocks)
4274{
4275 switch (type) {
4276 case amd_pp_sys_clock:
4277 vega10_get_sclks(hwmgr, clocks);
4278 break;
4279 case amd_pp_mem_clock:
4280 vega10_get_memclocks(hwmgr, clocks);
4281 break;
4282 case amd_pp_dcef_clock:
4283 vega10_get_dcefclocks(hwmgr, clocks);
4284 break;
4285 case amd_pp_soc_clock:
4286 vega10_get_socclocks(hwmgr, clocks);
4287 break;
4288 default:
4289 return -1;
4290 }
4291
4292 return 0;
4293}
4294
4295static int vega10_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr,
4296 enum amd_pp_clock_type type,
4297 struct pp_clock_levels_with_voltage *clocks)
4298{
4299 struct phm_ppt_v2_information *table_info =
4300 (struct phm_ppt_v2_information *)hwmgr->pptable;
4301 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table;
4302 uint32_t i;
4303
4304 switch (type) {
4305 case amd_pp_mem_clock:
4306 dep_table = table_info->vdd_dep_on_mclk;
4307 break;
4308 case amd_pp_dcef_clock:
4309 dep_table = table_info->vdd_dep_on_dcefclk;
4310 break;
4311 case amd_pp_disp_clock:
4312 dep_table = table_info->vdd_dep_on_dispclk;
4313 break;
4314 case amd_pp_pixel_clock:
4315 dep_table = table_info->vdd_dep_on_pixclk;
4316 break;
4317 case amd_pp_phy_clock:
4318 dep_table = table_info->vdd_dep_on_phyclk;
4319 break;
4320 default:
4321 return -1;
4322 }
4323
4324 for (i = 0; i < dep_table->count; i++) {
4325 clocks->data[i].clocks_in_khz = dep_table->entries[i].clk;
4326 clocks->data[i].voltage_in_mv = (uint32_t)(table_info->vddc_lookup_table->
4327 entries[dep_table->entries[i].vddInd].us_vdd);
4328 clocks->num_levels++;
4329 }
4330
4331 if (i < dep_table->count)
4332 return -1;
4333
4334 return 0;
4335}
4336
4337static int vega10_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
4338 struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges)
4339{
4340 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
4341 Watermarks_t *table = &(data->smc_state_table.water_marks_table);
4342 int result = 0;
4343 uint32_t i;
4344
4345 if (!data->registry_data.disable_water_mark) {
4346 for (i = 0; i < wm_with_clock_ranges->num_wm_sets_dmif; i++) {
4347 table->WatermarkRow[WM_DCEFCLK][i].MinClock =
4348 cpu_to_le16((uint16_t)
4349 (wm_with_clock_ranges->wm_sets_dmif[i].wm_min_dcefclk_in_khz) /
4350 100);
4351 table->WatermarkRow[WM_DCEFCLK][i].MaxClock =
4352 cpu_to_le16((uint16_t)
4353 (wm_with_clock_ranges->wm_sets_dmif[i].wm_max_dcefclk_in_khz) /
4354 100);
4355 table->WatermarkRow[WM_DCEFCLK][i].MinUclk =
4356 cpu_to_le16((uint16_t)
4357 (wm_with_clock_ranges->wm_sets_dmif[i].wm_min_memclk_in_khz) /
4358 100);
4359 table->WatermarkRow[WM_DCEFCLK][i].MaxUclk =
4360 cpu_to_le16((uint16_t)
4361 (wm_with_clock_ranges->wm_sets_dmif[i].wm_max_memclk_in_khz) /
4362 100);
4363 table->WatermarkRow[WM_DCEFCLK][i].WmSetting = (uint8_t)
4364 wm_with_clock_ranges->wm_sets_dmif[i].wm_set_id;
4365 }
4366
4367 for (i = 0; i < wm_with_clock_ranges->num_wm_sets_mcif; i++) {
4368 table->WatermarkRow[WM_SOCCLK][i].MinClock =
4369 cpu_to_le16((uint16_t)
4370 (wm_with_clock_ranges->wm_sets_mcif[i].wm_min_socclk_in_khz) /
4371 100);
4372 table->WatermarkRow[WM_SOCCLK][i].MaxClock =
4373 cpu_to_le16((uint16_t)
4374 (wm_with_clock_ranges->wm_sets_mcif[i].wm_max_socclk_in_khz) /
4375 100);
4376 table->WatermarkRow[WM_SOCCLK][i].MinUclk =
4377 cpu_to_le16((uint16_t)
4378 (wm_with_clock_ranges->wm_sets_mcif[i].wm_min_memclk_in_khz) /
4379 100);
4380 table->WatermarkRow[WM_SOCCLK][i].MaxUclk =
4381 cpu_to_le16((uint16_t)
4382 (wm_with_clock_ranges->wm_sets_mcif[i].wm_max_memclk_in_khz) /
4383 100);
4384 table->WatermarkRow[WM_SOCCLK][i].WmSetting = (uint8_t)
4385 wm_with_clock_ranges->wm_sets_mcif[i].wm_set_id;
4386 }
4387 data->water_marks_bitmap = WaterMarksExist;
4388 }
4389
4390 return result;
4391}
4392
4393static int vega10_force_clock_level(struct pp_hwmgr *hwmgr,
4394 enum pp_clock_type type, uint32_t mask)
4395{
4396 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
3083696a 4397 int i;
f83a9991
EH
4398
4399 if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
4400 return -EINVAL;
4401
4402 switch (type) {
4403 case PP_SCLK:
f83a9991
EH
4404 for (i = 0; i < 32; i++) {
4405 if (mask & (1 << i))
4406 break;
4407 }
7b52db39 4408 data->smc_state_table.gfx_boot_level = i;
f83a9991 4409
7b52db39
RZ
4410 for (i = 31; i >= 0; i--) {
4411 if (mask & (1 << i))
4412 break;
4413 }
4414 data->smc_state_table.gfx_max_level = i;
4415
4416 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr),
4417 "Failed to upload boot level to lowest!",
4418 return -EINVAL);
4419
4420 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr),
4421 "Failed to upload dpm max level to highest!",
4422 return -EINVAL);
f83a9991
EH
4423 break;
4424
4425 case PP_MCLK:
f83a9991
EH
4426 for (i = 0; i < 32; i++) {
4427 if (mask & (1 << i))
4428 break;
4429 }
7b52db39
RZ
4430 data->smc_state_table.mem_boot_level = i;
4431
4432 for (i = 31; i >= 0; i--) {
4433 if (mask & (1 << i))
4434 break;
4435 }
4436 data->smc_state_table.mem_max_level = i;
4437
4438 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr),
4439 "Failed to upload boot level to lowest!",
4440 return -EINVAL);
4441
4442 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr),
4443 "Failed to upload dpm max level to highest!",
4444 return -EINVAL);
f83a9991 4445
f83a9991 4446 break;
7b52db39
RZ
4447
4448 case PP_PCIE:
f83a9991
EH
4449 default:
4450 break;
4451 }
4452
4453 return 0;
4454}
4455
4456static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
4457 enum pp_clock_type type, char *buf)
4458{
4459 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
4460 struct vega10_single_dpm_table *sclk_table = &(data->dpm_table.gfx_table);
4461 struct vega10_single_dpm_table *mclk_table = &(data->dpm_table.mem_table);
4462 struct vega10_pcie_table *pcie_table = &(data->dpm_table.pcie_table);
4463 int i, now, size = 0;
4464
4465 switch (type) {
4466 case PP_SCLK:
4467 if (data->registry_data.sclk_dpm_key_disabled)
4468 break;
4469
4470 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr->smumgr,
4471 PPSMC_MSG_GetCurrentGfxclkIndex),
4472 "Attempt to get current sclk index Failed!",
4473 return -1);
4474 PP_ASSERT_WITH_CODE(!vega10_read_arg_from_smc(hwmgr->smumgr,
4475 &now),
4476 "Attempt to read sclk index Failed!",
4477 return -1);
4478
4479 for (i = 0; i < sclk_table->count; i++)
4480 size += sprintf(buf + size, "%d: %uMhz %s\n",
4481 i, sclk_table->dpm_levels[i].value / 100,
4482 (i == now) ? "*" : "");
4483 break;
4484 case PP_MCLK:
4485 if (data->registry_data.mclk_dpm_key_disabled)
4486 break;
4487
4488 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr->smumgr,
4489 PPSMC_MSG_GetCurrentUclkIndex),
4490 "Attempt to get current mclk index Failed!",
4491 return -1);
4492 PP_ASSERT_WITH_CODE(!vega10_read_arg_from_smc(hwmgr->smumgr,
4493 &now),
4494 "Attempt to read mclk index Failed!",
4495 return -1);
4496
4497 for (i = 0; i < mclk_table->count; i++)
4498 size += sprintf(buf + size, "%d: %uMhz %s\n",
4499 i, mclk_table->dpm_levels[i].value / 100,
4500 (i == now) ? "*" : "");
4501 break;
4502 case PP_PCIE:
4503 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr->smumgr,
4504 PPSMC_MSG_GetCurrentLinkIndex),
4505 "Attempt to get current mclk index Failed!",
4506 return -1);
4507 PP_ASSERT_WITH_CODE(!vega10_read_arg_from_smc(hwmgr->smumgr,
4508 &now),
4509 "Attempt to read mclk index Failed!",
4510 return -1);
4511
4512 for (i = 0; i < pcie_table->count; i++)
4513 size += sprintf(buf + size, "%d: %s %s\n", i,
4514 (pcie_table->pcie_gen[i] == 0) ? "2.5GB, x1" :
4515 (pcie_table->pcie_gen[i] == 1) ? "5.0GB, x16" :
4516 (pcie_table->pcie_gen[i] == 2) ? "8.0GB, x16" : "",
4517 (i == now) ? "*" : "");
4518 break;
4519 default:
4520 break;
4521 }
4522 return size;
4523}
4524
4525static int vega10_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
4526{
4527 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
4528 int result = 0;
4529 uint32_t num_turned_on_displays = 1;
4530 Watermarks_t *wm_table = &(data->smc_state_table.water_marks_table);
4531 struct cgs_display_info info = {0};
4532
4533 if ((data->water_marks_bitmap & WaterMarksExist) &&
4534 !(data->water_marks_bitmap & WaterMarksLoaded)) {
4535 result = vega10_copy_table_to_smc(hwmgr->smumgr,
4536 (uint8_t *)wm_table, WMTABLE);
4537 PP_ASSERT_WITH_CODE(result, "Failed to update WMTABLE!", return EINVAL);
4538 data->water_marks_bitmap |= WaterMarksLoaded;
4539 }
4540
4541 if (data->water_marks_bitmap & WaterMarksLoaded) {
4542 cgs_get_active_displays_info(hwmgr->device, &info);
4543 num_turned_on_displays = info.display_count;
4544 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
4545 PPSMC_MSG_NumOfDisplays, num_turned_on_displays);
4546 }
4547
4548 return result;
4549}
4550
4551int vega10_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable)
4552{
4553 struct vega10_hwmgr *data =
4554 (struct vega10_hwmgr *)(hwmgr->backend);
4555
4556 if (data->smu_features[GNLD_DPM_UVD].supported) {
4557 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
4558 enable,
4559 data->smu_features[GNLD_DPM_UVD].smu_feature_bitmap),
4560 "Attempt to Enable/Disable DPM UVD Failed!",
4561 return -1);
4562 data->smu_features[GNLD_DPM_UVD].enabled = enable;
4563 }
4564 return 0;
4565}
4566
4567static int vega10_power_gate_vce(struct pp_hwmgr *hwmgr, bool bgate)
4568{
4569 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
4570
4571 data->vce_power_gated = bgate;
4572 return vega10_enable_disable_vce_dpm(hwmgr, !bgate);
4573}
4574
4575static int vega10_power_gate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
4576{
4577 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
4578
4579 data->uvd_power_gated = bgate;
4580 return vega10_enable_disable_uvd_dpm(hwmgr, !bgate);
4581}
4582
4583static inline bool vega10_are_power_levels_equal(
4584 const struct vega10_performance_level *pl1,
4585 const struct vega10_performance_level *pl2)
4586{
4587 return ((pl1->soc_clock == pl2->soc_clock) &&
4588 (pl1->gfx_clock == pl2->gfx_clock) &&
4589 (pl1->mem_clock == pl2->mem_clock));
4590}
4591
4592static int vega10_check_states_equal(struct pp_hwmgr *hwmgr,
4593 const struct pp_hw_power_state *pstate1,
4594 const struct pp_hw_power_state *pstate2, bool *equal)
4595{
4596 const struct vega10_power_state *psa;
4597 const struct vega10_power_state *psb;
4598 int i;
4599
4600 if (pstate1 == NULL || pstate2 == NULL || equal == NULL)
4601 return -EINVAL;
4602
4603 psa = cast_const_phw_vega10_power_state(pstate1);
4604 psb = cast_const_phw_vega10_power_state(pstate2);
4605 /* If the two states don't even have the same number of performance levels they cannot be the same state. */
4606 if (psa->performance_level_count != psb->performance_level_count) {
4607 *equal = false;
4608 return 0;
4609 }
4610
4611 for (i = 0; i < psa->performance_level_count; i++) {
4612 if (!vega10_are_power_levels_equal(&(psa->performance_levels[i]), &(psb->performance_levels[i]))) {
4613 /* If we have found even one performance level pair that is different the states are different. */
4614 *equal = false;
4615 return 0;
4616 }
4617 }
4618
4619 /* If all performance levels are the same try to use the UVD clocks to break the tie.*/
4620 *equal = ((psa->uvd_clks.vclk == psb->uvd_clks.vclk) && (psa->uvd_clks.dclk == psb->uvd_clks.dclk));
4621 *equal &= ((psa->vce_clks.evclk == psb->vce_clks.evclk) && (psa->vce_clks.ecclk == psb->vce_clks.ecclk));
4622 *equal &= (psa->sclk_threshold == psb->sclk_threshold);
4623
4624 return 0;
4625}
4626
4627static bool
4628vega10_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
4629{
4630 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
4631 bool is_update_required = false;
4632 struct cgs_display_info info = {0, 0, NULL};
4633
4634 cgs_get_active_displays_info(hwmgr->device, &info);
4635
4636 if (data->display_timing.num_existing_displays != info.display_count)
4637 is_update_required = true;
4638
4639 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) {
4640 if (data->display_timing.min_clock_in_sr != hwmgr->display_config.min_core_set_clock_in_sr)
4641 is_update_required = true;
4642 }
4643
4644 return is_update_required;
4645}
4646
8b9242ed
RZ
4647static int vega10_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
4648{
4649 int tmp_result, result = 0;
4650
4651 tmp_result = (vega10_is_dpm_running(hwmgr)) ? 0 : -1;
4652 PP_ASSERT_WITH_CODE(tmp_result == 0,
4653 "DPM is not running right now, no need to disable DPM!",
4654 return 0);
4655
4656 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
4657 PHM_PlatformCaps_ThermalController))
4658 vega10_disable_thermal_protection(hwmgr);
4659
4660 tmp_result = vega10_disable_power_containment(hwmgr);
4661 PP_ASSERT_WITH_CODE((tmp_result == 0),
4662 "Failed to disable power containment!", result = tmp_result);
4663
4664 tmp_result = vega10_avfs_enable(hwmgr, false);
4665 PP_ASSERT_WITH_CODE((tmp_result == 0),
4666 "Failed to disable AVFS!", result = tmp_result);
4667
4668 tmp_result = vega10_stop_dpm(hwmgr, SMC_DPM_FEATURES);
4669 PP_ASSERT_WITH_CODE((tmp_result == 0),
4670 "Failed to stop DPM!", result = tmp_result);
4671
df057e02
RZ
4672 tmp_result = vega10_disable_deep_sleep_master_switch(hwmgr);
4673 PP_ASSERT_WITH_CODE((tmp_result == 0),
4674 "Failed to disable deep sleep!", result = tmp_result);
4675
4022e4f2
RZ
4676 tmp_result = vega10_disable_ulv(hwmgr);
4677 PP_ASSERT_WITH_CODE((tmp_result == 0),
4678 "Failed to disable ulv!", result = tmp_result);
4679
8b9242ed
RZ
4680 return result;
4681}
4682
4683static int vega10_power_off_asic(struct pp_hwmgr *hwmgr)
4684{
4685 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
4686 int result;
4687
4688 result = vega10_disable_dpm_tasks(hwmgr);
4689 PP_ASSERT_WITH_CODE((0 == result),
4690 "[disable_dpm_tasks] Failed to disable DPM!",
4691 );
4692 data->water_marks_bitmap &= ~(WaterMarksLoaded);
4693
4694 return result;
4695}
4696
d6c025d2
EH
4697static void vega10_find_min_clock_index(struct pp_hwmgr *hwmgr,
4698 uint32_t *sclk_idx, uint32_t *mclk_idx,
4699 uint32_t min_sclk, uint32_t min_mclk)
4700{
4701 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
4702 struct vega10_dpm_table *dpm_table = &(data->dpm_table);
4703 uint32_t i;
4704
4705 for (i = 0; i < dpm_table->gfx_table.count; i++) {
4706 if (dpm_table->gfx_table.dpm_levels[i].enabled &&
4707 dpm_table->gfx_table.dpm_levels[i].value >= min_sclk) {
4708 *sclk_idx = i;
4709 break;
4710 }
4711 }
4712
4713 for (i = 0; i < dpm_table->mem_table.count; i++) {
4714 if (dpm_table->mem_table.dpm_levels[i].enabled &&
4715 dpm_table->mem_table.dpm_levels[i].value >= min_mclk) {
4716 *mclk_idx = i;
4717 break;
4718 }
4719 }
4720}
4721
4722static int vega10_set_power_profile_state(struct pp_hwmgr *hwmgr,
4723 struct amd_pp_profile *request)
4724{
4725 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
e0ec4506 4726 uint32_t sclk_idx = ~0, mclk_idx = ~0;
d6c025d2
EH
4727
4728 if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_AUTO)
4729 return -EINVAL;
4730
4731 vega10_find_min_clock_index(hwmgr, &sclk_idx, &mclk_idx,
4732 request->min_sclk, request->min_mclk);
4733
e0ec4506 4734 if (sclk_idx != ~0) {
d6c025d2
EH
4735 if (!data->registry_data.sclk_dpm_key_disabled)
4736 PP_ASSERT_WITH_CODE(
4737 !smum_send_msg_to_smc_with_parameter(
4738 hwmgr->smumgr,
4739 PPSMC_MSG_SetSoftMinGfxclkByIndex,
4740 sclk_idx),
4741 "Failed to set soft min sclk index!",
4742 return -EINVAL);
4743 }
4744
e0ec4506 4745 if (mclk_idx != ~0) {
d6c025d2
EH
4746 if (!data->registry_data.mclk_dpm_key_disabled)
4747 PP_ASSERT_WITH_CODE(
4748 !smum_send_msg_to_smc_with_parameter(
4749 hwmgr->smumgr,
4750 PPSMC_MSG_SetSoftMinUclkByIndex,
4751 mclk_idx),
4752 "Failed to set soft min mclk index!",
4753 return -EINVAL);
4754 }
4755
4756 return 0;
4757}
8b9242ed 4758
dd4e2237
EH
4759static int vega10_get_sclk_od(struct pp_hwmgr *hwmgr)
4760{
4761 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
4762 struct vega10_single_dpm_table *sclk_table = &(data->dpm_table.gfx_table);
4763 struct vega10_single_dpm_table *golden_sclk_table =
4764 &(data->golden_dpm_table.gfx_table);
4765 int value;
4766
4767 value = (sclk_table->dpm_levels[sclk_table->count - 1].value -
4768 golden_sclk_table->dpm_levels
4769 [golden_sclk_table->count - 1].value) *
4770 100 /
4771 golden_sclk_table->dpm_levels
4772 [golden_sclk_table->count - 1].value;
4773
4774 return value;
4775}
4776
4777static int vega10_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
4778{
4779 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
4780 struct vega10_single_dpm_table *golden_sclk_table =
4781 &(data->golden_dpm_table.gfx_table);
4782 struct pp_power_state *ps;
4783 struct vega10_power_state *vega10_ps;
4784
4785 ps = hwmgr->request_ps;
4786
4787 if (ps == NULL)
4788 return -EINVAL;
4789
4790 vega10_ps = cast_phw_vega10_power_state(&ps->hardware);
4791
4792 vega10_ps->performance_levels
4793 [vega10_ps->performance_level_count - 1].gfx_clock =
4794 golden_sclk_table->dpm_levels
4795 [golden_sclk_table->count - 1].value *
4796 value / 100 +
4797 golden_sclk_table->dpm_levels
4798 [golden_sclk_table->count - 1].value;
4799
4800 if (vega10_ps->performance_levels
4801 [vega10_ps->performance_level_count - 1].gfx_clock >
4802 hwmgr->platform_descriptor.overdriveLimit.engineClock)
4803 vega10_ps->performance_levels
4804 [vega10_ps->performance_level_count - 1].gfx_clock =
4805 hwmgr->platform_descriptor.overdriveLimit.engineClock;
4806
4807 return 0;
4808}
4809
4810static int vega10_get_mclk_od(struct pp_hwmgr *hwmgr)
4811{
4812 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
4813 struct vega10_single_dpm_table *mclk_table = &(data->dpm_table.mem_table);
4814 struct vega10_single_dpm_table *golden_mclk_table =
4815 &(data->golden_dpm_table.mem_table);
4816 int value;
4817
4818 value = (mclk_table->dpm_levels
4819 [mclk_table->count - 1].value -
4820 golden_mclk_table->dpm_levels
4821 [golden_mclk_table->count - 1].value) *
4822 100 /
4823 golden_mclk_table->dpm_levels
4824 [golden_mclk_table->count - 1].value;
4825
4826 return value;
4827}
4828
4829static int vega10_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
4830{
4831 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
4832 struct vega10_single_dpm_table *golden_mclk_table =
4833 &(data->golden_dpm_table.mem_table);
4834 struct pp_power_state *ps;
4835 struct vega10_power_state *vega10_ps;
4836
4837 ps = hwmgr->request_ps;
4838
4839 if (ps == NULL)
4840 return -EINVAL;
4841
4842 vega10_ps = cast_phw_vega10_power_state(&ps->hardware);
4843
4844 vega10_ps->performance_levels
4845 [vega10_ps->performance_level_count - 1].mem_clock =
4846 golden_mclk_table->dpm_levels
4847 [golden_mclk_table->count - 1].value *
4848 value / 100 +
4849 golden_mclk_table->dpm_levels
4850 [golden_mclk_table->count - 1].value;
4851
4852 if (vega10_ps->performance_levels
4853 [vega10_ps->performance_level_count - 1].mem_clock >
4854 hwmgr->platform_descriptor.overdriveLimit.memoryClock)
4855 vega10_ps->performance_levels
4856 [vega10_ps->performance_level_count - 1].mem_clock =
4857 hwmgr->platform_descriptor.overdriveLimit.memoryClock;
4858
4859 return 0;
4860}
8b9242ed 4861
f83a9991
EH
4862static const struct pp_hwmgr_func vega10_hwmgr_funcs = {
4863 .backend_init = vega10_hwmgr_backend_init,
4864 .backend_fini = vega10_hwmgr_backend_fini,
4865 .asic_setup = vega10_setup_asic_task,
4866 .dynamic_state_management_enable = vega10_enable_dpm_tasks,
8b9242ed 4867 .dynamic_state_management_disable = vega10_disable_dpm_tasks,
f83a9991
EH
4868 .get_num_of_pp_table_entries =
4869 vega10_get_number_of_powerplay_table_entries,
4870 .get_power_state_size = vega10_get_power_state_size,
4871 .get_pp_table_entry = vega10_get_pp_table_entry,
4872 .patch_boot_state = vega10_patch_boot_state,
4873 .apply_state_adjust_rules = vega10_apply_state_adjust_rules,
4874 .power_state_set = vega10_set_power_state_tasks,
4875 .get_sclk = vega10_dpm_get_sclk,
4876 .get_mclk = vega10_dpm_get_mclk,
4877 .notify_smc_display_config_after_ps_adjustment =
4878 vega10_notify_smc_display_config_after_ps_adjustment,
4879 .force_dpm_level = vega10_dpm_force_dpm_level,
4880 .get_temperature = vega10_thermal_get_temperature,
4881 .stop_thermal_controller = vega10_thermal_stop_thermal_controller,
4882 .get_fan_speed_info = vega10_fan_ctrl_get_fan_speed_info,
4883 .get_fan_speed_percent = vega10_fan_ctrl_get_fan_speed_percent,
4884 .set_fan_speed_percent = vega10_fan_ctrl_set_fan_speed_percent,
4885 .reset_fan_speed_to_default =
4886 vega10_fan_ctrl_reset_fan_speed_to_default,
4887 .get_fan_speed_rpm = vega10_fan_ctrl_get_fan_speed_rpm,
4888 .set_fan_speed_rpm = vega10_fan_ctrl_set_fan_speed_rpm,
4889 .uninitialize_thermal_controller =
4890 vega10_thermal_ctrl_uninitialize_thermal_controller,
4891 .set_fan_control_mode = vega10_set_fan_control_mode,
4892 .get_fan_control_mode = vega10_get_fan_control_mode,
4893 .read_sensor = vega10_read_sensor,
4894 .get_dal_power_level = vega10_get_dal_power_level,
4895 .get_clock_by_type_with_latency = vega10_get_clock_by_type_with_latency,
4896 .get_clock_by_type_with_voltage = vega10_get_clock_by_type_with_voltage,
4897 .set_watermarks_for_clocks_ranges = vega10_set_watermarks_for_clocks_ranges,
4898 .display_clock_voltage_request = vega10_display_clock_voltage_request,
4899 .force_clock_level = vega10_force_clock_level,
4900 .print_clock_levels = vega10_print_clock_levels,
4901 .display_config_changed = vega10_display_configuration_changed_task,
4902 .powergate_uvd = vega10_power_gate_uvd,
4903 .powergate_vce = vega10_power_gate_vce,
4904 .check_states_equal = vega10_check_states_equal,
4905 .check_smc_update_required_for_display_configuration =
4906 vega10_check_smc_update_required_for_display_configuration,
8b9242ed
RZ
4907 .power_off_asic = vega10_power_off_asic,
4908 .disable_smc_firmware_ctf = vega10_thermal_disable_alert,
d6c025d2 4909 .set_power_profile_state = vega10_set_power_profile_state,
dd4e2237
EH
4910 .get_sclk_od = vega10_get_sclk_od,
4911 .set_sclk_od = vega10_set_sclk_od,
4912 .get_mclk_od = vega10_get_mclk_od,
4913 .set_mclk_od = vega10_set_mclk_od,
9d90f0bd 4914 .avfs_control = vega10_avfs_enable,
f83a9991
EH
4915};
4916
4917int vega10_hwmgr_init(struct pp_hwmgr *hwmgr)
4918{
4919 hwmgr->hwmgr_func = &vega10_hwmgr_funcs;
4920 hwmgr->pptable_func = &vega10_pptable_funcs;
4921 pp_vega10_thermal_initialize(hwmgr);
4922 return 0;
4923}