]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
drm/amd/powerplay: enable ulv feature by default for vega10.
[mirror_ubuntu-artful-kernel.git] / drivers / gpu / drm / amd / powerplay / hwmgr / vega10_hwmgr.c
CommitLineData
f83a9991
EH
1/*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#include <linux/module.h>
24#include <linux/slab.h>
25#include <linux/fb.h>
26#include "linux/delay.h"
27
28#include "hwmgr.h"
29#include "amd_powerplay.h"
30#include "vega10_smumgr.h"
31#include "hardwaremanager.h"
32#include "ppatomfwctrl.h"
33#include "atomfirmware.h"
34#include "cgs_common.h"
35#include "vega10_powertune.h"
36#include "smu9.h"
37#include "smu9_driver_if.h"
38#include "vega10_inc.h"
39#include "pp_soc15.h"
40#include "pppcielanes.h"
41#include "vega10_hwmgr.h"
42#include "vega10_processpptables.h"
43#include "vega10_pptable.h"
44#include "vega10_thermal.h"
45#include "pp_debug.h"
46#include "pp_acpi.h"
47#include "amd_pcie_helpers.h"
48#include "cgs_linux.h"
49#include "ppinterrupt.h"
ab5cf3a5 50#include "pp_overdriver.h"
f83a9991
EH
51
52#define VOLTAGE_SCALE 4
53#define VOLTAGE_VID_OFFSET_SCALE1 625
54#define VOLTAGE_VID_OFFSET_SCALE2 100
55
56#define HBM_MEMORY_CHANNEL_WIDTH 128
57
58uint32_t channel_number[] = {1, 2, 0, 4, 0, 8, 0, 16, 2};
59
60#define MEM_FREQ_LOW_LATENCY 25000
61#define MEM_FREQ_HIGH_LATENCY 80000
62#define MEM_LATENCY_HIGH 245
63#define MEM_LATENCY_LOW 35
64#define MEM_LATENCY_ERR 0xFFFF
65
66#define mmDF_CS_AON0_DramBaseAddress0 0x0044
67#define mmDF_CS_AON0_DramBaseAddress0_BASE_IDX 0
68
69//DF_CS_AON0_DramBaseAddress0
70#define DF_CS_AON0_DramBaseAddress0__AddrRngVal__SHIFT 0x0
71#define DF_CS_AON0_DramBaseAddress0__LgcyMmioHoleEn__SHIFT 0x1
72#define DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT 0x4
73#define DF_CS_AON0_DramBaseAddress0__IntLvAddrSel__SHIFT 0x8
74#define DF_CS_AON0_DramBaseAddress0__DramBaseAddr__SHIFT 0xc
75#define DF_CS_AON0_DramBaseAddress0__AddrRngVal_MASK 0x00000001L
76#define DF_CS_AON0_DramBaseAddress0__LgcyMmioHoleEn_MASK 0x00000002L
77#define DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK 0x000000F0L
78#define DF_CS_AON0_DramBaseAddress0__IntLvAddrSel_MASK 0x00000700L
79#define DF_CS_AON0_DramBaseAddress0__DramBaseAddr_MASK 0xFFFFF000L
80
81const ULONG PhwVega10_Magic = (ULONG)(PHM_VIslands_Magic);
82
83struct vega10_power_state *cast_phw_vega10_power_state(
84 struct pp_hw_power_state *hw_ps)
85{
86 PP_ASSERT_WITH_CODE((PhwVega10_Magic == hw_ps->magic),
87 "Invalid Powerstate Type!",
88 return NULL;);
89
90 return (struct vega10_power_state *)hw_ps;
91}
92
93const struct vega10_power_state *cast_const_phw_vega10_power_state(
94 const struct pp_hw_power_state *hw_ps)
95{
96 PP_ASSERT_WITH_CODE((PhwVega10_Magic == hw_ps->magic),
97 "Invalid Powerstate Type!",
98 return NULL;);
99
100 return (const struct vega10_power_state *)hw_ps;
101}
102
103static void vega10_set_default_registry_data(struct pp_hwmgr *hwmgr)
104{
105 struct vega10_hwmgr *data =
106 (struct vega10_hwmgr *)(hwmgr->backend);
107
108 data->registry_data.sclk_dpm_key_disabled =
109 hwmgr->feature_mask & PP_SCLK_DPM_MASK ? false : true;
110 data->registry_data.socclk_dpm_key_disabled =
111 hwmgr->feature_mask & PP_SOCCLK_DPM_MASK ? false : true;
112 data->registry_data.mclk_dpm_key_disabled =
113 hwmgr->feature_mask & PP_MCLK_DPM_MASK ? false : true;
2d5f5f94
RZ
114 data->registry_data.pcie_dpm_key_disabled =
115 hwmgr->feature_mask & PP_PCIE_DPM_MASK ? false : true;
f83a9991
EH
116
117 data->registry_data.dcefclk_dpm_key_disabled =
118 hwmgr->feature_mask & PP_DCEFCLK_DPM_MASK ? false : true;
119
120 if (hwmgr->feature_mask & PP_POWER_CONTAINMENT_MASK) {
121 data->registry_data.power_containment_support = 1;
122 data->registry_data.enable_pkg_pwr_tracking_feature = 1;
123 data->registry_data.enable_tdc_limit_feature = 1;
124 }
125
afc0255c 126 data->registry_data.clock_stretcher_support =
97782cc9 127 hwmgr->feature_mask & PP_CLOCK_STRETCH_MASK ? false : true;
afc0255c 128
4022e4f2
RZ
129 data->registry_data.ulv_support =
130 hwmgr->feature_mask & PP_ULV_MASK ? true : false;
131
f83a9991
EH
132 data->registry_data.disable_water_mark = 0;
133
134 data->registry_data.fan_control_support = 1;
135 data->registry_data.thermal_support = 1;
136 data->registry_data.fw_ctf_enabled = 1;
137
138 data->registry_data.avfs_support = 1;
139 data->registry_data.led_dpm_enabled = 1;
140
141 data->registry_data.vr0hot_enabled = 1;
142 data->registry_data.vr1hot_enabled = 1;
143 data->registry_data.regulator_hot_gpio_support = 1;
144
145 data->display_voltage_mode = PPVEGA10_VEGA10DISPLAYVOLTAGEMODE_DFLT;
146 data->dcef_clk_quad_eqn_a = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
147 data->dcef_clk_quad_eqn_b = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
148 data->dcef_clk_quad_eqn_c = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
149 data->disp_clk_quad_eqn_a = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
150 data->disp_clk_quad_eqn_b = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
151 data->disp_clk_quad_eqn_c = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
152 data->pixel_clk_quad_eqn_a = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
153 data->pixel_clk_quad_eqn_b = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
154 data->pixel_clk_quad_eqn_c = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
155 data->phy_clk_quad_eqn_a = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
156 data->phy_clk_quad_eqn_b = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
157 data->phy_clk_quad_eqn_c = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
158
159 data->gfxclk_average_alpha = PPVEGA10_VEGA10GFXCLKAVERAGEALPHA_DFLT;
160 data->socclk_average_alpha = PPVEGA10_VEGA10SOCCLKAVERAGEALPHA_DFLT;
161 data->uclk_average_alpha = PPVEGA10_VEGA10UCLKCLKAVERAGEALPHA_DFLT;
162 data->gfx_activity_average_alpha = PPVEGA10_VEGA10GFXACTIVITYAVERAGEALPHA_DFLT;
163}
164
165static int vega10_set_features_platform_caps(struct pp_hwmgr *hwmgr)
166{
167 struct vega10_hwmgr *data =
168 (struct vega10_hwmgr *)(hwmgr->backend);
169 struct phm_ppt_v2_information *table_info =
170 (struct phm_ppt_v2_information *)hwmgr->pptable;
171 struct cgs_system_info sys_info = {0};
172 int result;
173
174 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
175 PHM_PlatformCaps_SclkDeepSleep);
176
177 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
178 PHM_PlatformCaps_DynamicPatchPowerState);
179
180 if (data->vddci_control == VEGA10_VOLTAGE_CONTROL_NONE)
181 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
182 PHM_PlatformCaps_ControlVDDCI);
183
184 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
185 PHM_PlatformCaps_TablelessHardwareInterface);
186
187 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
188 PHM_PlatformCaps_EnableSMU7ThermalManagement);
189
190 sys_info.size = sizeof(struct cgs_system_info);
191 sys_info.info_id = CGS_SYSTEM_INFO_PG_FLAGS;
192 result = cgs_query_system_info(hwmgr->device, &sys_info);
193
194 if (!result && (sys_info.value & AMD_PG_SUPPORT_UVD))
195 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
196 PHM_PlatformCaps_UVDPowerGating);
197
198 if (!result && (sys_info.value & AMD_PG_SUPPORT_VCE))
199 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
200 PHM_PlatformCaps_VCEPowerGating);
201
202 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
203 PHM_PlatformCaps_UnTabledHardwareInterface);
204
205 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
206 PHM_PlatformCaps_FanSpeedInTableIsRPM);
207
208 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
209 PHM_PlatformCaps_ODFuzzyFanControlSupport);
210
211 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
212 PHM_PlatformCaps_DynamicPowerManagement);
213
214 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
215 PHM_PlatformCaps_SMC);
216
217 /* power tune caps */
218 /* assume disabled */
219 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
220 PHM_PlatformCaps_PowerContainment);
221 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
222 PHM_PlatformCaps_SQRamping);
223 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
224 PHM_PlatformCaps_DBRamping);
225 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
226 PHM_PlatformCaps_TDRamping);
227 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
228 PHM_PlatformCaps_TCPRamping);
229
230 if (data->registry_data.power_containment_support)
231 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
232 PHM_PlatformCaps_PowerContainment);
233 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
234 PHM_PlatformCaps_CAC);
235
236 if (table_info->tdp_table->usClockStretchAmount &&
237 data->registry_data.clock_stretcher_support)
238 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
239 PHM_PlatformCaps_ClockStretcher);
240
241 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
242 PHM_PlatformCaps_RegulatorHot);
243 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
244 PHM_PlatformCaps_AutomaticDCTransition);
245
246 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
247 PHM_PlatformCaps_UVDDPM);
248 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
249 PHM_PlatformCaps_VCEDPM);
250
251 return 0;
252}
253
254static void vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr)
255{
256 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
257 int i;
258
259 vega10_initialize_power_tune_defaults(hwmgr);
260
261 for (i = 0; i < GNLD_FEATURES_MAX; i++) {
262 data->smu_features[i].smu_feature_id = 0xffff;
263 data->smu_features[i].smu_feature_bitmap = 1 << i;
264 data->smu_features[i].enabled = false;
265 data->smu_features[i].supported = false;
266 }
267
268 data->smu_features[GNLD_DPM_PREFETCHER].smu_feature_id =
269 FEATURE_DPM_PREFETCHER_BIT;
270 data->smu_features[GNLD_DPM_GFXCLK].smu_feature_id =
271 FEATURE_DPM_GFXCLK_BIT;
272 data->smu_features[GNLD_DPM_UCLK].smu_feature_id =
273 FEATURE_DPM_UCLK_BIT;
274 data->smu_features[GNLD_DPM_SOCCLK].smu_feature_id =
275 FEATURE_DPM_SOCCLK_BIT;
276 data->smu_features[GNLD_DPM_UVD].smu_feature_id =
277 FEATURE_DPM_UVD_BIT;
278 data->smu_features[GNLD_DPM_VCE].smu_feature_id =
279 FEATURE_DPM_VCE_BIT;
280 data->smu_features[GNLD_DPM_MP0CLK].smu_feature_id =
281 FEATURE_DPM_MP0CLK_BIT;
282 data->smu_features[GNLD_DPM_LINK].smu_feature_id =
283 FEATURE_DPM_LINK_BIT;
284 data->smu_features[GNLD_DPM_DCEFCLK].smu_feature_id =
285 FEATURE_DPM_DCEFCLK_BIT;
286 data->smu_features[GNLD_ULV].smu_feature_id =
287 FEATURE_ULV_BIT;
288 data->smu_features[GNLD_AVFS].smu_feature_id =
289 FEATURE_AVFS_BIT;
290 data->smu_features[GNLD_DS_GFXCLK].smu_feature_id =
291 FEATURE_DS_GFXCLK_BIT;
292 data->smu_features[GNLD_DS_SOCCLK].smu_feature_id =
293 FEATURE_DS_SOCCLK_BIT;
294 data->smu_features[GNLD_DS_LCLK].smu_feature_id =
295 FEATURE_DS_LCLK_BIT;
296 data->smu_features[GNLD_PPT].smu_feature_id =
297 FEATURE_PPT_BIT;
298 data->smu_features[GNLD_TDC].smu_feature_id =
299 FEATURE_TDC_BIT;
300 data->smu_features[GNLD_THERMAL].smu_feature_id =
301 FEATURE_THERMAL_BIT;
302 data->smu_features[GNLD_GFX_PER_CU_CG].smu_feature_id =
303 FEATURE_GFX_PER_CU_CG_BIT;
304 data->smu_features[GNLD_RM].smu_feature_id =
305 FEATURE_RM_BIT;
306 data->smu_features[GNLD_DS_DCEFCLK].smu_feature_id =
307 FEATURE_DS_DCEFCLK_BIT;
308 data->smu_features[GNLD_ACDC].smu_feature_id =
309 FEATURE_ACDC_BIT;
310 data->smu_features[GNLD_VR0HOT].smu_feature_id =
311 FEATURE_VR0HOT_BIT;
312 data->smu_features[GNLD_VR1HOT].smu_feature_id =
313 FEATURE_VR1HOT_BIT;
314 data->smu_features[GNLD_FW_CTF].smu_feature_id =
315 FEATURE_FW_CTF_BIT;
316 data->smu_features[GNLD_LED_DISPLAY].smu_feature_id =
317 FEATURE_LED_DISPLAY_BIT;
318 data->smu_features[GNLD_FAN_CONTROL].smu_feature_id =
319 FEATURE_FAN_CONTROL_BIT;
320 data->smu_features[GNLD_VOLTAGE_CONTROLLER].smu_feature_id =
321 FEATURE_VOLTAGE_CONTROLLER_BIT;
322
323 if (!data->registry_data.prefetcher_dpm_key_disabled)
324 data->smu_features[GNLD_DPM_PREFETCHER].supported = true;
325
326 if (!data->registry_data.sclk_dpm_key_disabled)
327 data->smu_features[GNLD_DPM_GFXCLK].supported = true;
328
329 if (!data->registry_data.mclk_dpm_key_disabled)
330 data->smu_features[GNLD_DPM_UCLK].supported = true;
331
332 if (!data->registry_data.socclk_dpm_key_disabled)
333 data->smu_features[GNLD_DPM_SOCCLK].supported = true;
334
335 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
336 PHM_PlatformCaps_UVDDPM))
337 data->smu_features[GNLD_DPM_UVD].supported = true;
338
339 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
340 PHM_PlatformCaps_VCEDPM))
341 data->smu_features[GNLD_DPM_VCE].supported = true;
342
343 if (!data->registry_data.pcie_dpm_key_disabled)
344 data->smu_features[GNLD_DPM_LINK].supported = true;
345
346 if (!data->registry_data.dcefclk_dpm_key_disabled)
347 data->smu_features[GNLD_DPM_DCEFCLK].supported = true;
348
349 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
350 PHM_PlatformCaps_SclkDeepSleep) &&
351 data->registry_data.sclk_deep_sleep_support) {
352 data->smu_features[GNLD_DS_GFXCLK].supported = true;
353 data->smu_features[GNLD_DS_SOCCLK].supported = true;
354 data->smu_features[GNLD_DS_LCLK].supported = true;
355 }
356
357 if (data->registry_data.enable_pkg_pwr_tracking_feature)
358 data->smu_features[GNLD_PPT].supported = true;
359
360 if (data->registry_data.enable_tdc_limit_feature)
361 data->smu_features[GNLD_TDC].supported = true;
362
363 if (data->registry_data.thermal_support)
364 data->smu_features[GNLD_THERMAL].supported = true;
365
366 if (data->registry_data.fan_control_support)
367 data->smu_features[GNLD_FAN_CONTROL].supported = true;
368
369 if (data->registry_data.fw_ctf_enabled)
370 data->smu_features[GNLD_FW_CTF].supported = true;
371
372 if (data->registry_data.avfs_support)
373 data->smu_features[GNLD_AVFS].supported = true;
374
375 if (data->registry_data.led_dpm_enabled)
376 data->smu_features[GNLD_LED_DISPLAY].supported = true;
377
378 if (data->registry_data.vr1hot_enabled)
379 data->smu_features[GNLD_VR1HOT].supported = true;
380
381 if (data->registry_data.vr0hot_enabled)
382 data->smu_features[GNLD_VR0HOT].supported = true;
383
384}
385
386#ifdef PPLIB_VEGA10_EVV_SUPPORT
387static int vega10_get_socclk_for_voltage_evv(struct pp_hwmgr *hwmgr,
388 phm_ppt_v1_voltage_lookup_table *lookup_table,
389 uint16_t virtual_voltage_id, int32_t *socclk)
390{
391 uint8_t entry_id;
392 uint8_t voltage_id;
393 struct phm_ppt_v2_information *table_info =
394 (struct phm_ppt_v2_information *)(hwmgr->pptable);
395
396 PP_ASSERT_WITH_CODE(lookup_table->count != 0,
397 "Lookup table is empty",
398 return -EINVAL);
399
400 /* search for leakage voltage ID 0xff01 ~ 0xff08 and sclk */
401 for (entry_id = 0; entry_id < table_info->vdd_dep_on_sclk->count; entry_id++) {
402 voltage_id = table_info->vdd_dep_on_socclk->entries[entry_id].vddInd;
403 if (lookup_table->entries[voltage_id].us_vdd == virtual_voltage_id)
404 break;
405 }
406
407 PP_ASSERT_WITH_CODE(entry_id < table_info->vdd_dep_on_socclk->count,
408 "Can't find requested voltage id in vdd_dep_on_socclk table!",
409 return -EINVAL);
410
411 *socclk = table_info->vdd_dep_on_socclk->entries[entry_id].clk;
412
413 return 0;
414}
415
416#define ATOM_VIRTUAL_VOLTAGE_ID0 0xff01
417/**
418* Get Leakage VDDC based on leakage ID.
419*
420* @param hwmgr the address of the powerplay hardware manager.
421* @return always 0.
422*/
423static int vega10_get_evv_voltages(struct pp_hwmgr *hwmgr)
424{
425 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
426 uint16_t vv_id;
427 uint32_t vddc = 0;
428 uint16_t i, j;
429 uint32_t sclk = 0;
430 struct phm_ppt_v2_information *table_info =
431 (struct phm_ppt_v2_information *)hwmgr->pptable;
432 struct phm_ppt_v1_clock_voltage_dependency_table *socclk_table =
433 table_info->vdd_dep_on_socclk;
434 int result;
435
436 for (i = 0; i < VEGA10_MAX_LEAKAGE_COUNT; i++) {
437 vv_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
438
439 if (!vega10_get_socclk_for_voltage_evv(hwmgr,
440 table_info->vddc_lookup_table, vv_id, &sclk)) {
441 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
442 PHM_PlatformCaps_ClockStretcher)) {
443 for (j = 1; j < socclk_table->count; j++) {
444 if (socclk_table->entries[j].clk == sclk &&
445 socclk_table->entries[j].cks_enable == 0) {
446 sclk += 5000;
447 break;
448 }
449 }
450 }
451
452 PP_ASSERT_WITH_CODE(!atomctrl_get_voltage_evv_on_sclk_ai(hwmgr,
453 VOLTAGE_TYPE_VDDC, sclk, vv_id, &vddc),
454 "Error retrieving EVV voltage value!",
455 continue);
456
457
458 /* need to make sure vddc is less than 2v or else, it could burn the ASIC. */
459 PP_ASSERT_WITH_CODE((vddc < 2000 && vddc != 0),
460 "Invalid VDDC value", result = -EINVAL;);
461
462 /* the voltage should not be zero nor equal to leakage ID */
463 if (vddc != 0 && vddc != vv_id) {
464 data->vddc_leakage.actual_voltage[data->vddc_leakage.count] = (uint16_t)(vddc/100);
465 data->vddc_leakage.leakage_id[data->vddc_leakage.count] = vv_id;
466 data->vddc_leakage.count++;
467 }
468 }
469 }
470
471 return 0;
472}
473
474/**
475 * Change virtual leakage voltage to actual value.
476 *
477 * @param hwmgr the address of the powerplay hardware manager.
478 * @param pointer to changing voltage
479 * @param pointer to leakage table
480 */
481static void vega10_patch_with_vdd_leakage(struct pp_hwmgr *hwmgr,
482 uint16_t *voltage, struct vega10_leakage_voltage *leakage_table)
483{
484 uint32_t index;
485
486 /* search for leakage voltage ID 0xff01 ~ 0xff08 */
487 for (index = 0; index < leakage_table->count; index++) {
488 /* if this voltage matches a leakage voltage ID */
489 /* patch with actual leakage voltage */
490 if (leakage_table->leakage_id[index] == *voltage) {
491 *voltage = leakage_table->actual_voltage[index];
492 break;
493 }
494 }
495
496 if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0)
497 pr_info("Voltage value looks like a Leakage ID \
498 but it's not patched\n");
499}
500
501/**
502* Patch voltage lookup table by EVV leakages.
503*
504* @param hwmgr the address of the powerplay hardware manager.
505* @param pointer to voltage lookup table
506* @param pointer to leakage table
507* @return always 0
508*/
509static int vega10_patch_lookup_table_with_leakage(struct pp_hwmgr *hwmgr,
510 phm_ppt_v1_voltage_lookup_table *lookup_table,
511 struct vega10_leakage_voltage *leakage_table)
512{
513 uint32_t i;
514
515 for (i = 0; i < lookup_table->count; i++)
516 vega10_patch_with_vdd_leakage(hwmgr,
517 &lookup_table->entries[i].us_vdd, leakage_table);
518
519 return 0;
520}
521
522static int vega10_patch_clock_voltage_limits_with_vddc_leakage(
523 struct pp_hwmgr *hwmgr, struct vega10_leakage_voltage *leakage_table,
524 uint16_t *vddc)
525{
526 vega10_patch_with_vdd_leakage(hwmgr, (uint16_t *)vddc, leakage_table);
527
528 return 0;
529}
530#endif
531
532static int vega10_patch_voltage_dependency_tables_with_lookup_table(
533 struct pp_hwmgr *hwmgr)
534{
535 uint8_t entry_id;
536 uint8_t voltage_id;
537 struct phm_ppt_v2_information *table_info =
538 (struct phm_ppt_v2_information *)(hwmgr->pptable);
539 struct phm_ppt_v1_clock_voltage_dependency_table *socclk_table =
540 table_info->vdd_dep_on_socclk;
541 struct phm_ppt_v1_clock_voltage_dependency_table *gfxclk_table =
542 table_info->vdd_dep_on_sclk;
543 struct phm_ppt_v1_clock_voltage_dependency_table *dcefclk_table =
544 table_info->vdd_dep_on_dcefclk;
545 struct phm_ppt_v1_clock_voltage_dependency_table *pixclk_table =
546 table_info->vdd_dep_on_pixclk;
547 struct phm_ppt_v1_clock_voltage_dependency_table *dspclk_table =
548 table_info->vdd_dep_on_dispclk;
549 struct phm_ppt_v1_clock_voltage_dependency_table *phyclk_table =
550 table_info->vdd_dep_on_phyclk;
551 struct phm_ppt_v1_clock_voltage_dependency_table *mclk_table =
552 table_info->vdd_dep_on_mclk;
553 struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
554 table_info->mm_dep_table;
555
556 for (entry_id = 0; entry_id < socclk_table->count; entry_id++) {
557 voltage_id = socclk_table->entries[entry_id].vddInd;
558 socclk_table->entries[entry_id].vddc =
559 table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
560 }
561
562 for (entry_id = 0; entry_id < gfxclk_table->count; entry_id++) {
563 voltage_id = gfxclk_table->entries[entry_id].vddInd;
564 gfxclk_table->entries[entry_id].vddc =
565 table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
566 }
567
568 for (entry_id = 0; entry_id < dcefclk_table->count; entry_id++) {
569 voltage_id = dcefclk_table->entries[entry_id].vddInd;
570 dcefclk_table->entries[entry_id].vddc =
571 table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
572 }
573
574 for (entry_id = 0; entry_id < pixclk_table->count; entry_id++) {
575 voltage_id = pixclk_table->entries[entry_id].vddInd;
576 pixclk_table->entries[entry_id].vddc =
577 table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
578 }
579
580 for (entry_id = 0; entry_id < dspclk_table->count; entry_id++) {
581 voltage_id = dspclk_table->entries[entry_id].vddInd;
582 dspclk_table->entries[entry_id].vddc =
583 table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
584 }
585
586 for (entry_id = 0; entry_id < phyclk_table->count; entry_id++) {
587 voltage_id = phyclk_table->entries[entry_id].vddInd;
588 phyclk_table->entries[entry_id].vddc =
589 table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
590 }
591
592 for (entry_id = 0; entry_id < mclk_table->count; ++entry_id) {
593 voltage_id = mclk_table->entries[entry_id].vddInd;
594 mclk_table->entries[entry_id].vddc =
595 table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
596 voltage_id = mclk_table->entries[entry_id].vddciInd;
597 mclk_table->entries[entry_id].vddci =
598 table_info->vddci_lookup_table->entries[voltage_id].us_vdd;
599 voltage_id = mclk_table->entries[entry_id].mvddInd;
600 mclk_table->entries[entry_id].mvdd =
601 table_info->vddmem_lookup_table->entries[voltage_id].us_vdd;
602 }
603
604 for (entry_id = 0; entry_id < mm_table->count; ++entry_id) {
605 voltage_id = mm_table->entries[entry_id].vddcInd;
606 mm_table->entries[entry_id].vddc =
607 table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
608 }
609
610 return 0;
611
612}
613
614static int vega10_sort_lookup_table(struct pp_hwmgr *hwmgr,
615 struct phm_ppt_v1_voltage_lookup_table *lookup_table)
616{
617 uint32_t table_size, i, j;
618 struct phm_ppt_v1_voltage_lookup_record tmp_voltage_lookup_record;
619
620 PP_ASSERT_WITH_CODE(lookup_table && lookup_table->count,
621 "Lookup table is empty", return -EINVAL);
622
623 table_size = lookup_table->count;
624
625 /* Sorting voltages */
626 for (i = 0; i < table_size - 1; i++) {
627 for (j = i + 1; j > 0; j--) {
628 if (lookup_table->entries[j].us_vdd <
629 lookup_table->entries[j - 1].us_vdd) {
630 tmp_voltage_lookup_record = lookup_table->entries[j - 1];
631 lookup_table->entries[j - 1] = lookup_table->entries[j];
632 lookup_table->entries[j] = tmp_voltage_lookup_record;
633 }
634 }
635 }
636
637 return 0;
638}
639
640static int vega10_complete_dependency_tables(struct pp_hwmgr *hwmgr)
641{
642 int result = 0;
643 int tmp_result;
644 struct phm_ppt_v2_information *table_info =
645 (struct phm_ppt_v2_information *)(hwmgr->pptable);
646#ifdef PPLIB_VEGA10_EVV_SUPPORT
647 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
648
649 tmp_result = vega10_patch_lookup_table_with_leakage(hwmgr,
650 table_info->vddc_lookup_table, &(data->vddc_leakage));
651 if (tmp_result)
652 result = tmp_result;
653
654 tmp_result = vega10_patch_clock_voltage_limits_with_vddc_leakage(hwmgr,
655 &(data->vddc_leakage), &table_info->max_clock_voltage_on_dc.vddc);
656 if (tmp_result)
657 result = tmp_result;
658#endif
659
660 tmp_result = vega10_patch_voltage_dependency_tables_with_lookup_table(hwmgr);
661 if (tmp_result)
662 result = tmp_result;
663
664 tmp_result = vega10_sort_lookup_table(hwmgr, table_info->vddc_lookup_table);
665 if (tmp_result)
666 result = tmp_result;
667
668 return result;
669}
670
671static int vega10_set_private_data_based_on_pptable(struct pp_hwmgr *hwmgr)
672{
673 struct phm_ppt_v2_information *table_info =
674 (struct phm_ppt_v2_information *)(hwmgr->pptable);
675 struct phm_ppt_v1_clock_voltage_dependency_table *allowed_sclk_vdd_table =
676 table_info->vdd_dep_on_socclk;
677 struct phm_ppt_v1_clock_voltage_dependency_table *allowed_mclk_vdd_table =
678 table_info->vdd_dep_on_mclk;
679
680 PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table,
681 "VDD dependency on SCLK table is missing. \
682 This table is mandatory", return -EINVAL);
683 PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table->count >= 1,
684 "VDD dependency on SCLK table is empty. \
685 This table is mandatory", return -EINVAL);
686
687 PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table,
688 "VDD dependency on MCLK table is missing. \
689 This table is mandatory", return -EINVAL);
690 PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table->count >= 1,
691 "VDD dependency on MCLK table is empty. \
692 This table is mandatory", return -EINVAL);
693
694 table_info->max_clock_voltage_on_ac.sclk =
695 allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].clk;
696 table_info->max_clock_voltage_on_ac.mclk =
697 allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].clk;
698 table_info->max_clock_voltage_on_ac.vddc =
699 allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].vddc;
700 table_info->max_clock_voltage_on_ac.vddci =
701 allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].vddci;
702
703 hwmgr->dyn_state.max_clock_voltage_on_ac.sclk =
704 table_info->max_clock_voltage_on_ac.sclk;
705 hwmgr->dyn_state.max_clock_voltage_on_ac.mclk =
706 table_info->max_clock_voltage_on_ac.mclk;
707 hwmgr->dyn_state.max_clock_voltage_on_ac.vddc =
708 table_info->max_clock_voltage_on_ac.vddc;
709 hwmgr->dyn_state.max_clock_voltage_on_ac.vddci =
710 table_info->max_clock_voltage_on_ac.vddci;
711
712 return 0;
713}
714
715static int vega10_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
716{
717 kfree(hwmgr->dyn_state.vddc_dep_on_dal_pwrl);
718 hwmgr->dyn_state.vddc_dep_on_dal_pwrl = NULL;
719
720 kfree(hwmgr->backend);
721 hwmgr->backend = NULL;
722
723 return 0;
724}
725
726static int vega10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
727{
728 int result = 0;
729 struct vega10_hwmgr *data;
730 uint32_t config_telemetry = 0;
731 struct pp_atomfwctrl_voltage_table vol_table;
732 struct cgs_system_info sys_info = {0};
733
734 data = kzalloc(sizeof(struct vega10_hwmgr), GFP_KERNEL);
735 if (data == NULL)
736 return -ENOMEM;
737
738 hwmgr->backend = data;
739
740 vega10_set_default_registry_data(hwmgr);
741
742 data->disable_dpm_mask = 0xff;
743 data->workload_mask = 0xff;
744
745 /* need to set voltage control types before EVV patching */
746 data->vddc_control = VEGA10_VOLTAGE_CONTROL_NONE;
747 data->mvdd_control = VEGA10_VOLTAGE_CONTROL_NONE;
748 data->vddci_control = VEGA10_VOLTAGE_CONTROL_NONE;
749
750 /* VDDCR_SOC */
751 if (pp_atomfwctrl_is_voltage_controlled_by_gpio_v4(hwmgr,
752 VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2)) {
753 if (!pp_atomfwctrl_get_voltage_table_v4(hwmgr,
754 VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2,
755 &vol_table)) {
756 config_telemetry = ((vol_table.telemetry_slope << 8) & 0xff00) |
757 (vol_table.telemetry_offset & 0xff);
758 data->vddc_control = VEGA10_VOLTAGE_CONTROL_BY_SVID2;
759 }
760 } else {
761 kfree(hwmgr->backend);
762 hwmgr->backend = NULL;
763 PP_ASSERT_WITH_CODE(false,
764 "VDDCR_SOC is not SVID2!",
765 return -1);
766 }
767
768 /* MVDDC */
769 if (pp_atomfwctrl_is_voltage_controlled_by_gpio_v4(hwmgr,
770 VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2)) {
771 if (!pp_atomfwctrl_get_voltage_table_v4(hwmgr,
772 VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2,
773 &vol_table)) {
774 config_telemetry |=
775 ((vol_table.telemetry_slope << 24) & 0xff000000) |
776 ((vol_table.telemetry_offset << 16) & 0xff0000);
777 data->mvdd_control = VEGA10_VOLTAGE_CONTROL_BY_SVID2;
778 }
779 }
780
781 /* VDDCI_MEM */
782 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
783 PHM_PlatformCaps_ControlVDDCI)) {
784 if (pp_atomfwctrl_is_voltage_controlled_by_gpio_v4(hwmgr,
785 VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT))
786 data->vddci_control = VEGA10_VOLTAGE_CONTROL_BY_GPIO;
787 }
788
789 data->config_telemetry = config_telemetry;
790
791 vega10_set_features_platform_caps(hwmgr);
792
793 vega10_init_dpm_defaults(hwmgr);
794
795#ifdef PPLIB_VEGA10_EVV_SUPPORT
796 /* Get leakage voltage based on leakage ID. */
797 PP_ASSERT_WITH_CODE(!vega10_get_evv_voltages(hwmgr),
798 "Get EVV Voltage Failed. Abort Driver loading!",
799 return -1);
800#endif
801
802 /* Patch our voltage dependency table with actual leakage voltage
803 * We need to perform leakage translation before it's used by other functions
804 */
805 vega10_complete_dependency_tables(hwmgr);
806
807 /* Parse pptable data read from VBIOS */
808 vega10_set_private_data_based_on_pptable(hwmgr);
809
810 data->is_tlu_enabled = false;
811
812 hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
813 VEGA10_MAX_HARDWARE_POWERLEVELS;
814 hwmgr->platform_descriptor.hardwarePerformanceLevels = 2;
815 hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50;
816
817 hwmgr->platform_descriptor.vbiosInterruptId = 0x20000400; /* IRQ_SOURCE1_SW_INT */
818 /* The true clock step depends on the frequency, typically 4.5 or 9 MHz. Here we use 5. */
819 hwmgr->platform_descriptor.clockStep.engineClock = 500;
820 hwmgr->platform_descriptor.clockStep.memoryClock = 500;
821
822 sys_info.size = sizeof(struct cgs_system_info);
823 sys_info.info_id = CGS_SYSTEM_INFO_GFX_CU_INFO;
824 result = cgs_query_system_info(hwmgr->device, &sys_info);
825 data->total_active_cus = sys_info.value;
826 /* Setup default Overdrive Fan control settings */
827 data->odn_fan_table.target_fan_speed =
828 hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM;
829 data->odn_fan_table.target_temperature =
830 hwmgr->thermal_controller.
831 advanceFanControlParameters.ucTargetTemperature;
832 data->odn_fan_table.min_performance_clock =
833 hwmgr->thermal_controller.advanceFanControlParameters.
834 ulMinFanSCLKAcousticLimit;
835 data->odn_fan_table.min_fan_limit =
836 hwmgr->thermal_controller.
837 advanceFanControlParameters.usFanPWMMinLimit *
838 hwmgr->thermal_controller.fanInfo.ulMaxRPM / 100;
839
840 return result;
841}
842
843static int vega10_init_sclk_threshold(struct pp_hwmgr *hwmgr)
844{
845 struct vega10_hwmgr *data =
846 (struct vega10_hwmgr *)(hwmgr->backend);
847
848 data->low_sclk_interrupt_threshold = 0;
849
850 return 0;
851}
852
853static int vega10_setup_dpm_led_config(struct pp_hwmgr *hwmgr)
854{
855 struct vega10_hwmgr *data =
856 (struct vega10_hwmgr *)(hwmgr->backend);
857 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
858
859 struct pp_atomfwctrl_voltage_table table;
860 uint8_t i, j;
861 uint32_t mask = 0;
862 uint32_t tmp;
863 int32_t ret = 0;
864
865 ret = pp_atomfwctrl_get_voltage_table_v4(hwmgr, VOLTAGE_TYPE_LEDDPM,
866 VOLTAGE_OBJ_GPIO_LUT, &table);
867
868 if (!ret) {
869 tmp = table.mask_low;
870 for (i = 0, j = 0; i < 32; i++) {
871 if (tmp & 1) {
872 mask |= (uint32_t)(i << (8 * j));
873 if (++j >= 3)
874 break;
875 }
876 tmp >>= 1;
877 }
878 }
879
880 pp_table->LedPin0 = (uint8_t)(mask & 0xff);
881 pp_table->LedPin1 = (uint8_t)((mask >> 8) & 0xff);
882 pp_table->LedPin2 = (uint8_t)((mask >> 16) & 0xff);
883 return 0;
884}
885
886static int vega10_setup_asic_task(struct pp_hwmgr *hwmgr)
887{
888 PP_ASSERT_WITH_CODE(!vega10_init_sclk_threshold(hwmgr),
889 "Failed to init sclk threshold!",
890 return -EINVAL);
891
892 PP_ASSERT_WITH_CODE(!vega10_setup_dpm_led_config(hwmgr),
893 "Failed to set up led dpm config!",
894 return -EINVAL);
895
896 return 0;
897}
898
899static bool vega10_is_dpm_running(struct pp_hwmgr *hwmgr)
900{
901 uint32_t features_enabled;
902
903 if (!vega10_get_smc_features(hwmgr->smumgr, &features_enabled)) {
904 if (features_enabled & SMC_DPM_FEATURES)
905 return true;
906 }
907 return false;
908}
909
910/**
911* Remove repeated voltage values and create table with unique values.
912*
913* @param hwmgr the address of the powerplay hardware manager.
914* @param vol_table the pointer to changing voltage table
915* @return 0 in success
916*/
917
918static int vega10_trim_voltage_table(struct pp_hwmgr *hwmgr,
919 struct pp_atomfwctrl_voltage_table *vol_table)
920{
921 uint32_t i, j;
922 uint16_t vvalue;
923 bool found = false;
924 struct pp_atomfwctrl_voltage_table *table;
925
926 PP_ASSERT_WITH_CODE(vol_table,
927 "Voltage Table empty.", return -EINVAL);
928 table = kzalloc(sizeof(struct pp_atomfwctrl_voltage_table),
929 GFP_KERNEL);
930
931 if (!table)
932 return -ENOMEM;
933
934 table->mask_low = vol_table->mask_low;
935 table->phase_delay = vol_table->phase_delay;
936
937 for (i = 0; i < vol_table->count; i++) {
938 vvalue = vol_table->entries[i].value;
939 found = false;
940
941 for (j = 0; j < table->count; j++) {
942 if (vvalue == table->entries[j].value) {
943 found = true;
944 break;
945 }
946 }
947
948 if (!found) {
949 table->entries[table->count].value = vvalue;
950 table->entries[table->count].smio_low =
951 vol_table->entries[i].smio_low;
952 table->count++;
953 }
954 }
955
956 memcpy(vol_table, table, sizeof(struct pp_atomfwctrl_voltage_table));
957 kfree(table);
958
959 return 0;
960}
961
962static int vega10_get_mvdd_voltage_table(struct pp_hwmgr *hwmgr,
963 phm_ppt_v1_clock_voltage_dependency_table *dep_table,
964 struct pp_atomfwctrl_voltage_table *vol_table)
965{
966 int i;
967
968 PP_ASSERT_WITH_CODE(dep_table->count,
969 "Voltage Dependency Table empty.",
970 return -EINVAL);
971
972 vol_table->mask_low = 0;
973 vol_table->phase_delay = 0;
974 vol_table->count = dep_table->count;
975
976 for (i = 0; i < vol_table->count; i++) {
977 vol_table->entries[i].value = dep_table->entries[i].mvdd;
978 vol_table->entries[i].smio_low = 0;
979 }
980
981 PP_ASSERT_WITH_CODE(!vega10_trim_voltage_table(hwmgr,
982 vol_table),
983 "Failed to trim MVDD Table!",
984 return -1);
985
986 return 0;
987}
988
989static int vega10_get_vddci_voltage_table(struct pp_hwmgr *hwmgr,
990 phm_ppt_v1_clock_voltage_dependency_table *dep_table,
991 struct pp_atomfwctrl_voltage_table *vol_table)
992{
993 uint32_t i;
994
995 PP_ASSERT_WITH_CODE(dep_table->count,
996 "Voltage Dependency Table empty.",
997 return -EINVAL);
998
999 vol_table->mask_low = 0;
1000 vol_table->phase_delay = 0;
1001 vol_table->count = dep_table->count;
1002
1003 for (i = 0; i < dep_table->count; i++) {
1004 vol_table->entries[i].value = dep_table->entries[i].vddci;
1005 vol_table->entries[i].smio_low = 0;
1006 }
1007
1008 PP_ASSERT_WITH_CODE(!vega10_trim_voltage_table(hwmgr, vol_table),
1009 "Failed to trim VDDCI table.",
1010 return -1);
1011
1012 return 0;
1013}
1014
1015static int vega10_get_vdd_voltage_table(struct pp_hwmgr *hwmgr,
1016 phm_ppt_v1_clock_voltage_dependency_table *dep_table,
1017 struct pp_atomfwctrl_voltage_table *vol_table)
1018{
1019 int i;
1020
1021 PP_ASSERT_WITH_CODE(dep_table->count,
1022 "Voltage Dependency Table empty.",
1023 return -EINVAL);
1024
1025 vol_table->mask_low = 0;
1026 vol_table->phase_delay = 0;
1027 vol_table->count = dep_table->count;
1028
1029 for (i = 0; i < vol_table->count; i++) {
1030 vol_table->entries[i].value = dep_table->entries[i].vddc;
1031 vol_table->entries[i].smio_low = 0;
1032 }
1033
1034 return 0;
1035}
1036
1037/* ---- Voltage Tables ----
1038 * If the voltage table would be bigger than
1039 * what will fit into the state table on
1040 * the SMC keep only the higher entries.
1041 */
1042static void vega10_trim_voltage_table_to_fit_state_table(
1043 struct pp_hwmgr *hwmgr,
1044 uint32_t max_vol_steps,
1045 struct pp_atomfwctrl_voltage_table *vol_table)
1046{
1047 unsigned int i, diff;
1048
1049 if (vol_table->count <= max_vol_steps)
1050 return;
1051
1052 diff = vol_table->count - max_vol_steps;
1053
1054 for (i = 0; i < max_vol_steps; i++)
1055 vol_table->entries[i] = vol_table->entries[i + diff];
1056
1057 vol_table->count = max_vol_steps;
1058}
1059
1060/**
1061* Create Voltage Tables.
1062*
1063* @param hwmgr the address of the powerplay hardware manager.
1064* @return always 0
1065*/
1066static int vega10_construct_voltage_tables(struct pp_hwmgr *hwmgr)
1067{
1068 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
1069 struct phm_ppt_v2_information *table_info =
1070 (struct phm_ppt_v2_information *)hwmgr->pptable;
1071 int result;
1072
1073 if (data->mvdd_control == VEGA10_VOLTAGE_CONTROL_BY_SVID2 ||
1074 data->mvdd_control == VEGA10_VOLTAGE_CONTROL_NONE) {
1075 result = vega10_get_mvdd_voltage_table(hwmgr,
1076 table_info->vdd_dep_on_mclk,
1077 &(data->mvdd_voltage_table));
1078 PP_ASSERT_WITH_CODE(!result,
1079 "Failed to retrieve MVDDC table!",
1080 return result);
1081 }
1082
1083 if (data->vddci_control == VEGA10_VOLTAGE_CONTROL_NONE) {
1084 result = vega10_get_vddci_voltage_table(hwmgr,
1085 table_info->vdd_dep_on_mclk,
1086 &(data->vddci_voltage_table));
1087 PP_ASSERT_WITH_CODE(!result,
1088 "Failed to retrieve VDDCI_MEM table!",
1089 return result);
1090 }
1091
1092 if (data->vddc_control == VEGA10_VOLTAGE_CONTROL_BY_SVID2 ||
1093 data->vddc_control == VEGA10_VOLTAGE_CONTROL_NONE) {
1094 result = vega10_get_vdd_voltage_table(hwmgr,
1095 table_info->vdd_dep_on_sclk,
1096 &(data->vddc_voltage_table));
1097 PP_ASSERT_WITH_CODE(!result,
1098 "Failed to retrieve VDDCR_SOC table!",
1099 return result);
1100 }
1101
1102 PP_ASSERT_WITH_CODE(data->vddc_voltage_table.count <= 16,
1103 "Too many voltage values for VDDC. Trimming to fit state table.",
1104 vega10_trim_voltage_table_to_fit_state_table(hwmgr,
1105 16, &(data->vddc_voltage_table)));
1106
1107 PP_ASSERT_WITH_CODE(data->vddci_voltage_table.count <= 16,
1108 "Too many voltage values for VDDCI. Trimming to fit state table.",
1109 vega10_trim_voltage_table_to_fit_state_table(hwmgr,
1110 16, &(data->vddci_voltage_table)));
1111
1112 PP_ASSERT_WITH_CODE(data->mvdd_voltage_table.count <= 16,
1113 "Too many voltage values for MVDD. Trimming to fit state table.",
1114 vega10_trim_voltage_table_to_fit_state_table(hwmgr,
1115 16, &(data->mvdd_voltage_table)));
1116
1117
1118 return 0;
1119}
1120
1121/*
1122 * @fn vega10_init_dpm_state
1123 * @brief Function to initialize all Soft Min/Max and Hard Min/Max to 0xff.
1124 *
1125 * @param dpm_state - the address of the DPM Table to initiailize.
1126 * @return None.
1127 */
1128static void vega10_init_dpm_state(struct vega10_dpm_state *dpm_state)
1129{
1130 dpm_state->soft_min_level = 0xff;
1131 dpm_state->soft_max_level = 0xff;
1132 dpm_state->hard_min_level = 0xff;
1133 dpm_state->hard_max_level = 0xff;
1134}
1135
1136static void vega10_setup_default_single_dpm_table(struct pp_hwmgr *hwmgr,
1137 struct vega10_single_dpm_table *dpm_table,
1138 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table)
1139{
1140 int i;
1141
1142 for (i = 0; i < dep_table->count; i++) {
b7a1f0e3 1143 if (i == 0 || dpm_table->dpm_levels[dpm_table->count - 1].value <=
f83a9991
EH
1144 dep_table->entries[i].clk) {
1145 dpm_table->dpm_levels[dpm_table->count].value =
1146 dep_table->entries[i].clk;
1147 dpm_table->dpm_levels[dpm_table->count].enabled = true;
1148 dpm_table->count++;
1149 }
1150 }
1151}
1152static int vega10_setup_default_pcie_table(struct pp_hwmgr *hwmgr)
1153{
1154 struct vega10_hwmgr *data =
1155 (struct vega10_hwmgr *)(hwmgr->backend);
1156 struct vega10_pcie_table *pcie_table = &(data->dpm_table.pcie_table);
1157 struct phm_ppt_v2_information *table_info =
1158 (struct phm_ppt_v2_information *)(hwmgr->pptable);
1159 struct phm_ppt_v1_pcie_table *bios_pcie_table =
1160 table_info->pcie_table;
1161 uint32_t i;
1162
1163 PP_ASSERT_WITH_CODE(bios_pcie_table->count,
1164 "Incorrect number of PCIE States from VBIOS!",
1165 return -1);
1166
b6dc60cf 1167 for (i = 0; i < NUM_LINK_LEVELS; i++) {
f83a9991
EH
1168 if (data->registry_data.pcieSpeedOverride)
1169 pcie_table->pcie_gen[i] =
1170 data->registry_data.pcieSpeedOverride;
1171 else
1172 pcie_table->pcie_gen[i] =
1173 bios_pcie_table->entries[i].gen_speed;
1174
1175 if (data->registry_data.pcieLaneOverride)
676b4087
RZ
1176 pcie_table->pcie_lane[i] = (uint8_t)encode_pcie_lane_width(
1177 data->registry_data.pcieLaneOverride);
f83a9991 1178 else
676b4087
RZ
1179 pcie_table->pcie_lane[i] = (uint8_t)encode_pcie_lane_width(
1180 bios_pcie_table->entries[i].lane_width);
1181 printk("pcie_table->pcie_lane[%d] is %d %d\n", i, pcie_table->pcie_lane[i], bios_pcie_table->entries[i].lane_width);
f83a9991
EH
1182 if (data->registry_data.pcieClockOverride)
1183 pcie_table->lclk[i] =
1184 data->registry_data.pcieClockOverride;
1185 else
1186 pcie_table->lclk[i] =
1187 bios_pcie_table->entries[i].pcie_sclk;
f83a9991
EH
1188 }
1189
00c4855e 1190 pcie_table->count = NUM_LINK_LEVELS;
f83a9991
EH
1191
1192 return 0;
1193}
1194
1195/*
1196 * This function is to initialize all DPM state tables
1197 * for SMU based on the dependency table.
1198 * Dynamic state patching function will then trim these
1199 * state tables to the allowed range based
1200 * on the power policy or external client requests,
1201 * such as UVD request, etc.
1202 */
1203static int vega10_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
1204{
1205 struct vega10_hwmgr *data =
1206 (struct vega10_hwmgr *)(hwmgr->backend);
1207 struct phm_ppt_v2_information *table_info =
1208 (struct phm_ppt_v2_information *)(hwmgr->pptable);
1209 struct vega10_single_dpm_table *dpm_table;
1210 uint32_t i;
1211
1212 struct phm_ppt_v1_clock_voltage_dependency_table *dep_soc_table =
1213 table_info->vdd_dep_on_socclk;
1214 struct phm_ppt_v1_clock_voltage_dependency_table *dep_gfx_table =
1215 table_info->vdd_dep_on_sclk;
1216 struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table =
1217 table_info->vdd_dep_on_mclk;
1218 struct phm_ppt_v1_mm_clock_voltage_dependency_table *dep_mm_table =
1219 table_info->mm_dep_table;
1220 struct phm_ppt_v1_clock_voltage_dependency_table *dep_dcef_table =
1221 table_info->vdd_dep_on_dcefclk;
1222 struct phm_ppt_v1_clock_voltage_dependency_table *dep_pix_table =
1223 table_info->vdd_dep_on_pixclk;
1224 struct phm_ppt_v1_clock_voltage_dependency_table *dep_disp_table =
1225 table_info->vdd_dep_on_dispclk;
1226 struct phm_ppt_v1_clock_voltage_dependency_table *dep_phy_table =
1227 table_info->vdd_dep_on_phyclk;
1228
1229 PP_ASSERT_WITH_CODE(dep_soc_table,
1230 "SOCCLK dependency table is missing. This table is mandatory",
1231 return -EINVAL);
1232 PP_ASSERT_WITH_CODE(dep_soc_table->count >= 1,
1233 "SOCCLK dependency table is empty. This table is mandatory",
1234 return -EINVAL);
1235
1236 PP_ASSERT_WITH_CODE(dep_gfx_table,
1237 "GFXCLK dependency table is missing. This table is mandatory",
1238 return -EINVAL);
1239 PP_ASSERT_WITH_CODE(dep_gfx_table->count >= 1,
1240 "GFXCLK dependency table is empty. This table is mandatory",
1241 return -EINVAL);
1242
1243 PP_ASSERT_WITH_CODE(dep_mclk_table,
1244 "MCLK dependency table is missing. This table is mandatory",
1245 return -EINVAL);
1246 PP_ASSERT_WITH_CODE(dep_mclk_table->count >= 1,
1247 "MCLK dependency table has to have is missing. This table is mandatory",
1248 return -EINVAL);
1249
1250 /* Initialize Sclk DPM table based on allow Sclk values */
1251 data->dpm_table.soc_table.count = 0;
1252 data->dpm_table.gfx_table.count = 0;
1253 data->dpm_table.dcef_table.count = 0;
1254
1255 dpm_table = &(data->dpm_table.soc_table);
1256 vega10_setup_default_single_dpm_table(hwmgr,
1257 dpm_table,
1258 dep_soc_table);
1259
1260 vega10_init_dpm_state(&(dpm_table->dpm_state));
1261
1262 dpm_table = &(data->dpm_table.gfx_table);
1263 vega10_setup_default_single_dpm_table(hwmgr,
1264 dpm_table,
1265 dep_gfx_table);
1266 vega10_init_dpm_state(&(dpm_table->dpm_state));
1267
1268 /* Initialize Mclk DPM table based on allow Mclk values */
1269 data->dpm_table.mem_table.count = 0;
1270 dpm_table = &(data->dpm_table.mem_table);
1271 vega10_setup_default_single_dpm_table(hwmgr,
1272 dpm_table,
1273 dep_mclk_table);
1274 vega10_init_dpm_state(&(dpm_table->dpm_state));
1275
1276 data->dpm_table.eclk_table.count = 0;
1277 dpm_table = &(data->dpm_table.eclk_table);
1278 for (i = 0; i < dep_mm_table->count; i++) {
1279 if (i == 0 || dpm_table->dpm_levels
b7a1f0e3 1280 [dpm_table->count - 1].value <=
f83a9991
EH
1281 dep_mm_table->entries[i].eclk) {
1282 dpm_table->dpm_levels[dpm_table->count].value =
1283 dep_mm_table->entries[i].eclk;
1284 dpm_table->dpm_levels[dpm_table->count].enabled =
1285 (i == 0) ? true : false;
1286 dpm_table->count++;
1287 }
1288 }
1289 vega10_init_dpm_state(&(dpm_table->dpm_state));
1290
1291 data->dpm_table.vclk_table.count = 0;
1292 data->dpm_table.dclk_table.count = 0;
1293 dpm_table = &(data->dpm_table.vclk_table);
1294 for (i = 0; i < dep_mm_table->count; i++) {
1295 if (i == 0 || dpm_table->dpm_levels
b7a1f0e3 1296 [dpm_table->count - 1].value <=
f83a9991
EH
1297 dep_mm_table->entries[i].vclk) {
1298 dpm_table->dpm_levels[dpm_table->count].value =
1299 dep_mm_table->entries[i].vclk;
1300 dpm_table->dpm_levels[dpm_table->count].enabled =
1301 (i == 0) ? true : false;
1302 dpm_table->count++;
1303 }
1304 }
1305 vega10_init_dpm_state(&(dpm_table->dpm_state));
1306
1307 dpm_table = &(data->dpm_table.dclk_table);
1308 for (i = 0; i < dep_mm_table->count; i++) {
1309 if (i == 0 || dpm_table->dpm_levels
b7a1f0e3 1310 [dpm_table->count - 1].value <=
f83a9991
EH
1311 dep_mm_table->entries[i].dclk) {
1312 dpm_table->dpm_levels[dpm_table->count].value =
1313 dep_mm_table->entries[i].dclk;
1314 dpm_table->dpm_levels[dpm_table->count].enabled =
1315 (i == 0) ? true : false;
1316 dpm_table->count++;
1317 }
1318 }
1319 vega10_init_dpm_state(&(dpm_table->dpm_state));
1320
1321 /* Assume there is no headless Vega10 for now */
1322 dpm_table = &(data->dpm_table.dcef_table);
1323 vega10_setup_default_single_dpm_table(hwmgr,
1324 dpm_table,
1325 dep_dcef_table);
1326
1327 vega10_init_dpm_state(&(dpm_table->dpm_state));
1328
1329 dpm_table = &(data->dpm_table.pixel_table);
1330 vega10_setup_default_single_dpm_table(hwmgr,
1331 dpm_table,
1332 dep_pix_table);
1333
1334 vega10_init_dpm_state(&(dpm_table->dpm_state));
1335
1336 dpm_table = &(data->dpm_table.display_table);
1337 vega10_setup_default_single_dpm_table(hwmgr,
1338 dpm_table,
1339 dep_disp_table);
1340
1341 vega10_init_dpm_state(&(dpm_table->dpm_state));
1342
1343 dpm_table = &(data->dpm_table.phy_table);
1344 vega10_setup_default_single_dpm_table(hwmgr,
1345 dpm_table,
1346 dep_phy_table);
1347
1348 vega10_init_dpm_state(&(dpm_table->dpm_state));
1349
1350 vega10_setup_default_pcie_table(hwmgr);
1351
1352 /* save a copy of the default DPM table */
1353 memcpy(&(data->golden_dpm_table), &(data->dpm_table),
1354 sizeof(struct vega10_dpm_table));
1355
1356 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1357 PHM_PlatformCaps_ODNinACSupport) ||
1358 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1359 PHM_PlatformCaps_ODNinDCSupport)) {
1360 data->odn_dpm_table.odn_core_clock_dpm_levels.
1361 number_of_performance_levels = data->dpm_table.gfx_table.count;
1362 for (i = 0; i < data->dpm_table.gfx_table.count; i++) {
1363 data->odn_dpm_table.odn_core_clock_dpm_levels.
1364 performance_level_entries[i].clock =
1365 data->dpm_table.gfx_table.dpm_levels[i].value;
1366 data->odn_dpm_table.odn_core_clock_dpm_levels.
1367 performance_level_entries[i].enabled = true;
1368 }
1369
1370 data->odn_dpm_table.vdd_dependency_on_sclk.count =
1371 dep_gfx_table->count;
1372 for (i = 0; i < dep_gfx_table->count; i++) {
1373 data->odn_dpm_table.vdd_dependency_on_sclk.entries[i].clk =
1374 dep_gfx_table->entries[i].clk;
1375 data->odn_dpm_table.vdd_dependency_on_sclk.entries[i].vddInd =
1376 dep_gfx_table->entries[i].vddInd;
1377 data->odn_dpm_table.vdd_dependency_on_sclk.entries[i].cks_enable =
1378 dep_gfx_table->entries[i].cks_enable;
1379 data->odn_dpm_table.vdd_dependency_on_sclk.entries[i].cks_voffset =
1380 dep_gfx_table->entries[i].cks_voffset;
1381 }
1382
1383 data->odn_dpm_table.odn_memory_clock_dpm_levels.
1384 number_of_performance_levels = data->dpm_table.mem_table.count;
1385 for (i = 0; i < data->dpm_table.mem_table.count; i++) {
1386 data->odn_dpm_table.odn_memory_clock_dpm_levels.
1387 performance_level_entries[i].clock =
1388 data->dpm_table.mem_table.dpm_levels[i].value;
1389 data->odn_dpm_table.odn_memory_clock_dpm_levels.
1390 performance_level_entries[i].enabled = true;
1391 }
1392
1393 data->odn_dpm_table.vdd_dependency_on_mclk.count = dep_mclk_table->count;
1394 for (i = 0; i < dep_mclk_table->count; i++) {
1395 data->odn_dpm_table.vdd_dependency_on_mclk.entries[i].clk =
1396 dep_mclk_table->entries[i].clk;
1397 data->odn_dpm_table.vdd_dependency_on_mclk.entries[i].vddInd =
1398 dep_mclk_table->entries[i].vddInd;
1399 data->odn_dpm_table.vdd_dependency_on_mclk.entries[i].vddci =
1400 dep_mclk_table->entries[i].vddci;
1401 }
1402 }
1403
1404 return 0;
1405}
1406
1407/*
1408 * @fn vega10_populate_ulv_state
1409 * @brief Function to provide parameters for Utral Low Voltage state to SMC.
1410 *
1411 * @param hwmgr - the address of the hardware manager.
1412 * @return Always 0.
1413 */
1414static int vega10_populate_ulv_state(struct pp_hwmgr *hwmgr)
1415{
1416 struct vega10_hwmgr *data =
1417 (struct vega10_hwmgr *)(hwmgr->backend);
1418 struct phm_ppt_v2_information *table_info =
1419 (struct phm_ppt_v2_information *)(hwmgr->pptable);
1420
1421 data->smc_state_table.pp_table.UlvOffsetVid =
effa290c 1422 (uint8_t)table_info->us_ulv_voltage_offset;
f83a9991
EH
1423
1424 data->smc_state_table.pp_table.UlvSmnclkDid =
1425 (uint8_t)(table_info->us_ulv_smnclk_did);
1426 data->smc_state_table.pp_table.UlvMp1clkDid =
1427 (uint8_t)(table_info->us_ulv_mp1clk_did);
1428 data->smc_state_table.pp_table.UlvGfxclkBypass =
1429 (uint8_t)(table_info->us_ulv_gfxclk_bypass);
1430 data->smc_state_table.pp_table.UlvPhaseSheddingPsi0 =
1431 (uint8_t)(data->vddc_voltage_table.psi0_enable);
1432 data->smc_state_table.pp_table.UlvPhaseSheddingPsi1 =
1433 (uint8_t)(data->vddc_voltage_table.psi1_enable);
1434
1435 return 0;
1436}
1437
1438static int vega10_populate_single_lclk_level(struct pp_hwmgr *hwmgr,
1439 uint32_t lclock, uint8_t *curr_lclk_did)
1440{
1441 struct pp_atomfwctrl_clock_dividers_soc15 dividers;
1442
1443 PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(
1444 hwmgr,
1445 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
1446 lclock, &dividers),
1447 "Failed to get LCLK clock settings from VBIOS!",
1448 return -1);
1449
1450 *curr_lclk_did = dividers.ulDid;
1451
1452 return 0;
1453}
1454
1455static int vega10_populate_smc_link_levels(struct pp_hwmgr *hwmgr)
1456{
1457 int result = -1;
1458 struct vega10_hwmgr *data =
1459 (struct vega10_hwmgr *)(hwmgr->backend);
1460 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
1461 struct vega10_pcie_table *pcie_table =
1462 &(data->dpm_table.pcie_table);
1463 uint32_t i, j;
1464
1465 for (i = 0; i < pcie_table->count; i++) {
1466 pp_table->PcieGenSpeed[i] = pcie_table->pcie_gen[i];
1467 pp_table->PcieLaneCount[i] = pcie_table->pcie_lane[i];
1468
1469 result = vega10_populate_single_lclk_level(hwmgr,
1470 pcie_table->lclk[i], &(pp_table->LclkDid[i]));
1471 if (result) {
1472 pr_info("Populate LClock Level %d Failed!\n", i);
1473 return result;
1474 }
1475 }
1476
1477 j = i - 1;
1478 while (i < NUM_LINK_LEVELS) {
1479 pp_table->PcieGenSpeed[i] = pcie_table->pcie_gen[j];
1480 pp_table->PcieLaneCount[i] = pcie_table->pcie_lane[j];
1481
1482 result = vega10_populate_single_lclk_level(hwmgr,
1483 pcie_table->lclk[j], &(pp_table->LclkDid[i]));
1484 if (result) {
1485 pr_info("Populate LClock Level %d Failed!\n", i);
1486 return result;
1487 }
1488 i++;
1489 }
1490
1491 return result;
1492}
1493
1494/**
1495* Populates single SMC GFXSCLK structure using the provided engine clock
1496*
1497* @param hwmgr the address of the hardware manager
1498* @param gfx_clock the GFX clock to use to populate the structure.
1499* @param current_gfxclk_level location in PPTable for the SMC GFXCLK structure.
1500*/
1501
1502static int vega10_populate_single_gfx_level(struct pp_hwmgr *hwmgr,
1503 uint32_t gfx_clock, PllSetting_t *current_gfxclk_level)
1504{
1505 struct phm_ppt_v2_information *table_info =
1506 (struct phm_ppt_v2_information *)(hwmgr->pptable);
1507 struct phm_ppt_v1_clock_voltage_dependency_table *dep_on_sclk =
1508 table_info->vdd_dep_on_sclk;
1509 struct vega10_hwmgr *data =
1510 (struct vega10_hwmgr *)(hwmgr->backend);
1511 struct pp_atomfwctrl_clock_dividers_soc15 dividers;
1512 uint32_t i;
1513
1514 if (data->apply_overdrive_next_settings_mask &
1515 DPMTABLE_OD_UPDATE_VDDC)
1516 dep_on_sclk = (struct phm_ppt_v1_clock_voltage_dependency_table *)
1517 &(data->odn_dpm_table.vdd_dependency_on_sclk);
1518
1519 PP_ASSERT_WITH_CODE(dep_on_sclk,
1520 "Invalid SOC_VDD-GFX_CLK Dependency Table!",
1521 return -EINVAL);
1522
1523 for (i = 0; i < dep_on_sclk->count; i++) {
1524 if (dep_on_sclk->entries[i].clk == gfx_clock)
1525 break;
1526 }
1527
1528 PP_ASSERT_WITH_CODE(dep_on_sclk->count > i,
1529 "Cannot find gfx_clk in SOC_VDD-GFX_CLK!",
1530 return -EINVAL);
1531 PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(hwmgr,
1532 COMPUTE_GPUCLK_INPUT_FLAG_GFXCLK,
1533 gfx_clock, &dividers),
1534 "Failed to get GFX Clock settings from VBIOS!",
1535 return -EINVAL);
1536
1537 /* Feedback Multiplier: bit 0:8 int, bit 15:12 post_div, bit 31:16 frac */
1538 current_gfxclk_level->FbMult =
1539 cpu_to_le32(dividers.ulPll_fb_mult);
1540 /* Spread FB Multiplier bit: bit 0:8 int, bit 31:16 frac */
327fce0c
RZ
1541 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1542 PHM_PlatformCaps_EngineSpreadSpectrumSupport))
1543 current_gfxclk_level->SsOn = dividers.ucPll_ss_enable;
1544 else
1545 current_gfxclk_level->SsOn = 0;
f83a9991
EH
1546 current_gfxclk_level->SsFbMult =
1547 cpu_to_le32(dividers.ulPll_ss_fbsmult);
1548 current_gfxclk_level->SsSlewFrac =
1549 cpu_to_le16(dividers.usPll_ss_slew_frac);
1550 current_gfxclk_level->Did = (uint8_t)(dividers.ulDid);
1551
1552 return 0;
1553}
1554
1555/**
1556 * @brief Populates single SMC SOCCLK structure using the provided clock.
1557 *
1558 * @param hwmgr - the address of the hardware manager.
1559 * @param soc_clock - the SOC clock to use to populate the structure.
1560 * @param current_socclk_level - location in PPTable for the SMC SOCCLK structure.
1561 * @return 0 on success..
1562 */
1563static int vega10_populate_single_soc_level(struct pp_hwmgr *hwmgr,
1564 uint32_t soc_clock, uint8_t *current_soc_did,
1565 uint8_t *current_vol_index)
1566{
1567 struct phm_ppt_v2_information *table_info =
1568 (struct phm_ppt_v2_information *)(hwmgr->pptable);
1569 struct phm_ppt_v1_clock_voltage_dependency_table *dep_on_soc =
1570 table_info->vdd_dep_on_socclk;
1571 struct pp_atomfwctrl_clock_dividers_soc15 dividers;
1572 uint32_t i;
1573
1574 PP_ASSERT_WITH_CODE(dep_on_soc,
1575 "Invalid SOC_VDD-SOC_CLK Dependency Table!",
1576 return -EINVAL);
1577 for (i = 0; i < dep_on_soc->count; i++) {
1578 if (dep_on_soc->entries[i].clk == soc_clock)
1579 break;
1580 }
1581 PP_ASSERT_WITH_CODE(dep_on_soc->count > i,
1582 "Cannot find SOC_CLK in SOC_VDD-SOC_CLK Dependency Table",
1583 return -EINVAL);
1584 PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(hwmgr,
1585 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
1586 soc_clock, &dividers),
1587 "Failed to get SOC Clock settings from VBIOS!",
1588 return -EINVAL);
1589
1590 *current_soc_did = (uint8_t)dividers.ulDid;
1591 *current_vol_index = (uint8_t)(dep_on_soc->entries[i].vddInd);
1592
1593 return 0;
1594}
1595
1596uint16_t vega10_locate_vddc_given_clock(struct pp_hwmgr *hwmgr,
1597 uint32_t clk,
1598 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table)
1599{
1600 uint16_t i;
1601
1602 for (i = 0; i < dep_table->count; i++) {
1603 if (dep_table->entries[i].clk == clk)
1604 return dep_table->entries[i].vddc;
1605 }
1606
1607 pr_info("[LocateVddcGivenClock] Cannot locate SOC Vddc for this clock!");
1608 return 0;
1609}
1610
1611/**
1612* Populates all SMC SCLK levels' structure based on the trimmed allowed dpm engine clock states
1613*
1614* @param hwmgr the address of the hardware manager
1615*/
1616static int vega10_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
1617{
1618 struct vega10_hwmgr *data =
1619 (struct vega10_hwmgr *)(hwmgr->backend);
1620 struct phm_ppt_v2_information *table_info =
1621 (struct phm_ppt_v2_information *)(hwmgr->pptable);
1622 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table =
1623 table_info->vdd_dep_on_socclk;
1624 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
1625 struct vega10_single_dpm_table *dpm_table = &(data->dpm_table.gfx_table);
1626 int result = 0;
1627 uint32_t i, j;
1628
1629 for (i = 0; i < dpm_table->count; i++) {
1630 result = vega10_populate_single_gfx_level(hwmgr,
1631 dpm_table->dpm_levels[i].value,
1632 &(pp_table->GfxclkLevel[i]));
1633 if (result)
1634 return result;
1635 }
1636
1637 j = i - 1;
1638 while (i < NUM_GFXCLK_DPM_LEVELS) {
1639 result = vega10_populate_single_gfx_level(hwmgr,
1640 dpm_table->dpm_levels[j].value,
1641 &(pp_table->GfxclkLevel[i]));
1642 if (result)
1643 return result;
1644 i++;
1645 }
1646
1647 pp_table->GfxclkSlewRate =
1648 cpu_to_le16(table_info->us_gfxclk_slew_rate);
1649
1650 dpm_table = &(data->dpm_table.soc_table);
1651 for (i = 0; i < dpm_table->count; i++) {
1652 pp_table->SocVid[i] =
1653 (uint8_t)convert_to_vid(
1654 vega10_locate_vddc_given_clock(hwmgr,
1655 dpm_table->dpm_levels[i].value,
1656 dep_table));
1657 result = vega10_populate_single_soc_level(hwmgr,
1658 dpm_table->dpm_levels[i].value,
1659 &(pp_table->SocclkDid[i]),
1660 &(pp_table->SocDpmVoltageIndex[i]));
1661 if (result)
1662 return result;
1663 }
1664
1665 j = i - 1;
1666 while (i < NUM_SOCCLK_DPM_LEVELS) {
1667 pp_table->SocVid[i] = pp_table->SocVid[j];
1668 result = vega10_populate_single_soc_level(hwmgr,
1669 dpm_table->dpm_levels[j].value,
1670 &(pp_table->SocclkDid[i]),
1671 &(pp_table->SocDpmVoltageIndex[i]));
1672 if (result)
1673 return result;
1674 i++;
1675 }
1676
1677 return result;
1678}
1679
1680/**
1681 * @brief Populates single SMC GFXCLK structure using the provided clock.
1682 *
1683 * @param hwmgr - the address of the hardware manager.
1684 * @param mem_clock - the memory clock to use to populate the structure.
1685 * @return 0 on success..
1686 */
1687static int vega10_populate_single_memory_level(struct pp_hwmgr *hwmgr,
1688 uint32_t mem_clock, uint8_t *current_mem_vid,
1689 PllSetting_t *current_memclk_level, uint8_t *current_mem_soc_vind)
1690{
1691 struct vega10_hwmgr *data =
1692 (struct vega10_hwmgr *)(hwmgr->backend);
1693 struct phm_ppt_v2_information *table_info =
1694 (struct phm_ppt_v2_information *)(hwmgr->pptable);
1695 struct phm_ppt_v1_clock_voltage_dependency_table *dep_on_mclk =
1696 table_info->vdd_dep_on_mclk;
1697 struct pp_atomfwctrl_clock_dividers_soc15 dividers;
1698 uint32_t i;
1699
1700 if (data->apply_overdrive_next_settings_mask &
1701 DPMTABLE_OD_UPDATE_VDDC)
1702 dep_on_mclk = (struct phm_ppt_v1_clock_voltage_dependency_table *)
1703 &data->odn_dpm_table.vdd_dependency_on_mclk;
1704
1705 PP_ASSERT_WITH_CODE(dep_on_mclk,
1706 "Invalid SOC_VDD-UCLK Dependency Table!",
1707 return -EINVAL);
1708
1709 for (i = 0; i < dep_on_mclk->count; i++) {
1710 if (dep_on_mclk->entries[i].clk == mem_clock)
1711 break;
1712 }
1713
1714 PP_ASSERT_WITH_CODE(dep_on_mclk->count > i,
1715 "Cannot find UCLK in SOC_VDD-UCLK Dependency Table!",
1716 return -EINVAL);
1717
1718 PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(
1719 hwmgr, COMPUTE_GPUCLK_INPUT_FLAG_UCLK, mem_clock, &dividers),
1720 "Failed to get UCLK settings from VBIOS!",
1721 return -1);
1722
1723 *current_mem_vid =
1724 (uint8_t)(convert_to_vid(dep_on_mclk->entries[i].mvdd));
1725 *current_mem_soc_vind =
1726 (uint8_t)(dep_on_mclk->entries[i].vddInd);
1727 current_memclk_level->FbMult = cpu_to_le32(dividers.ulPll_fb_mult);
1728 current_memclk_level->Did = (uint8_t)(dividers.ulDid);
1729
1730 PP_ASSERT_WITH_CODE(current_memclk_level->Did >= 1,
1731 "Invalid Divider ID!",
1732 return -EINVAL);
1733
1734 return 0;
1735}
1736
1737/**
1738 * @brief Populates all SMC MCLK levels' structure based on the trimmed allowed dpm memory clock states.
1739 *
1740 * @param pHwMgr - the address of the hardware manager.
1741 * @return PP_Result_OK on success.
1742 */
1743static int vega10_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
1744{
1745 struct vega10_hwmgr *data =
1746 (struct vega10_hwmgr *)(hwmgr->backend);
1747 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
1748 struct vega10_single_dpm_table *dpm_table =
1749 &(data->dpm_table.mem_table);
1750 int result = 0;
1751 uint32_t i, j, reg, mem_channels;
1752
1753 for (i = 0; i < dpm_table->count; i++) {
1754 result = vega10_populate_single_memory_level(hwmgr,
1755 dpm_table->dpm_levels[i].value,
1756 &(pp_table->MemVid[i]),
1757 &(pp_table->UclkLevel[i]),
1758 &(pp_table->MemSocVoltageIndex[i]));
1759 if (result)
1760 return result;
1761 }
1762
1763 j = i - 1;
1764 while (i < NUM_UCLK_DPM_LEVELS) {
1765 result = vega10_populate_single_memory_level(hwmgr,
1766 dpm_table->dpm_levels[j].value,
1767 &(pp_table->MemVid[i]),
1768 &(pp_table->UclkLevel[i]),
1769 &(pp_table->MemSocVoltageIndex[i]));
1770 if (result)
1771 return result;
1772 i++;
1773 }
1774
1775 reg = soc15_get_register_offset(DF_HWID, 0,
1776 mmDF_CS_AON0_DramBaseAddress0_BASE_IDX,
1777 mmDF_CS_AON0_DramBaseAddress0);
1778 mem_channels = (cgs_read_register(hwmgr->device, reg) &
1779 DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK) >>
1780 DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT;
1781 pp_table->NumMemoryChannels = cpu_to_le16(mem_channels);
1782 pp_table->MemoryChannelWidth =
1783 cpu_to_le16(HBM_MEMORY_CHANNEL_WIDTH *
1784 channel_number[mem_channels]);
1785
1786 pp_table->LowestUclkReservedForUlv =
1787 (uint8_t)(data->lowest_uclk_reserved_for_ulv);
1788
1789 return result;
1790}
1791
1792static int vega10_populate_single_display_type(struct pp_hwmgr *hwmgr,
1793 DSPCLK_e disp_clock)
1794{
1795 struct vega10_hwmgr *data =
1796 (struct vega10_hwmgr *)(hwmgr->backend);
1797 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
1798 struct phm_ppt_v2_information *table_info =
1799 (struct phm_ppt_v2_information *)
1800 (hwmgr->pptable);
1801 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table;
1802 uint32_t i;
1803 uint16_t clk = 0, vddc = 0;
1804 uint8_t vid = 0;
1805
1806 switch (disp_clock) {
1807 case DSPCLK_DCEFCLK:
1808 dep_table = table_info->vdd_dep_on_dcefclk;
1809 break;
1810 case DSPCLK_DISPCLK:
1811 dep_table = table_info->vdd_dep_on_dispclk;
1812 break;
1813 case DSPCLK_PIXCLK:
1814 dep_table = table_info->vdd_dep_on_pixclk;
1815 break;
1816 case DSPCLK_PHYCLK:
1817 dep_table = table_info->vdd_dep_on_phyclk;
1818 break;
1819 default:
1820 return -1;
1821 }
1822
1823 PP_ASSERT_WITH_CODE(dep_table->count <= NUM_DSPCLK_LEVELS,
1824 "Number Of Entries Exceeded maximum!",
1825 return -1);
1826
1827 for (i = 0; i < dep_table->count; i++) {
1828 clk = (uint16_t)(dep_table->entries[i].clk / 100);
1829 vddc = table_info->vddc_lookup_table->
1830 entries[dep_table->entries[i].vddInd].us_vdd;
1831 vid = (uint8_t)convert_to_vid(vddc);
1832 pp_table->DisplayClockTable[disp_clock][i].Freq =
1833 cpu_to_le16(clk);
1834 pp_table->DisplayClockTable[disp_clock][i].Vid =
1835 cpu_to_le16(vid);
1836 }
1837
1838 while (i < NUM_DSPCLK_LEVELS) {
1839 pp_table->DisplayClockTable[disp_clock][i].Freq =
1840 cpu_to_le16(clk);
1841 pp_table->DisplayClockTable[disp_clock][i].Vid =
1842 cpu_to_le16(vid);
1843 i++;
1844 }
1845
1846 return 0;
1847}
1848
1849static int vega10_populate_all_display_clock_levels(struct pp_hwmgr *hwmgr)
1850{
1851 uint32_t i;
1852
1853 for (i = 0; i < DSPCLK_COUNT; i++) {
1854 PP_ASSERT_WITH_CODE(!vega10_populate_single_display_type(hwmgr, i),
1855 "Failed to populate Clock in DisplayClockTable!",
1856 return -1);
1857 }
1858
1859 return 0;
1860}
1861
1862static int vega10_populate_single_eclock_level(struct pp_hwmgr *hwmgr,
1863 uint32_t eclock, uint8_t *current_eclk_did,
1864 uint8_t *current_soc_vol)
1865{
1866 struct phm_ppt_v2_information *table_info =
1867 (struct phm_ppt_v2_information *)(hwmgr->pptable);
1868 struct phm_ppt_v1_mm_clock_voltage_dependency_table *dep_table =
1869 table_info->mm_dep_table;
1870 struct pp_atomfwctrl_clock_dividers_soc15 dividers;
1871 uint32_t i;
1872
1873 PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(hwmgr,
1874 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
1875 eclock, &dividers),
1876 "Failed to get ECLK clock settings from VBIOS!",
1877 return -1);
1878
1879 *current_eclk_did = (uint8_t)dividers.ulDid;
1880
1881 for (i = 0; i < dep_table->count; i++) {
1882 if (dep_table->entries[i].eclk == eclock)
1883 *current_soc_vol = dep_table->entries[i].vddcInd;
1884 }
1885
1886 return 0;
1887}
1888
1889static int vega10_populate_smc_vce_levels(struct pp_hwmgr *hwmgr)
1890{
1891 struct vega10_hwmgr *data =
1892 (struct vega10_hwmgr *)(hwmgr->backend);
1893 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
1894 struct vega10_single_dpm_table *dpm_table = &(data->dpm_table.eclk_table);
1895 int result = -EINVAL;
1896 uint32_t i, j;
1897
1898 for (i = 0; i < dpm_table->count; i++) {
1899 result = vega10_populate_single_eclock_level(hwmgr,
1900 dpm_table->dpm_levels[i].value,
1901 &(pp_table->EclkDid[i]),
1902 &(pp_table->VceDpmVoltageIndex[i]));
1903 if (result)
1904 return result;
1905 }
1906
1907 j = i - 1;
1908 while (i < NUM_VCE_DPM_LEVELS) {
1909 result = vega10_populate_single_eclock_level(hwmgr,
1910 dpm_table->dpm_levels[j].value,
1911 &(pp_table->EclkDid[i]),
1912 &(pp_table->VceDpmVoltageIndex[i]));
1913 if (result)
1914 return result;
1915 i++;
1916 }
1917
1918 return result;
1919}
1920
1921static int vega10_populate_single_vclock_level(struct pp_hwmgr *hwmgr,
1922 uint32_t vclock, uint8_t *current_vclk_did)
1923{
1924 struct pp_atomfwctrl_clock_dividers_soc15 dividers;
1925
1926 PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(hwmgr,
1927 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
1928 vclock, &dividers),
1929 "Failed to get VCLK clock settings from VBIOS!",
1930 return -EINVAL);
1931
1932 *current_vclk_did = (uint8_t)dividers.ulDid;
1933
1934 return 0;
1935}
1936
1937static int vega10_populate_single_dclock_level(struct pp_hwmgr *hwmgr,
1938 uint32_t dclock, uint8_t *current_dclk_did)
1939{
1940 struct pp_atomfwctrl_clock_dividers_soc15 dividers;
1941
1942 PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(hwmgr,
1943 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
1944 dclock, &dividers),
1945 "Failed to get DCLK clock settings from VBIOS!",
1946 return -EINVAL);
1947
1948 *current_dclk_did = (uint8_t)dividers.ulDid;
1949
1950 return 0;
1951}
1952
1953static int vega10_populate_smc_uvd_levels(struct pp_hwmgr *hwmgr)
1954{
1955 struct vega10_hwmgr *data =
1956 (struct vega10_hwmgr *)(hwmgr->backend);
1957 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
1958 struct vega10_single_dpm_table *vclk_dpm_table =
1959 &(data->dpm_table.vclk_table);
1960 struct vega10_single_dpm_table *dclk_dpm_table =
1961 &(data->dpm_table.dclk_table);
1962 struct phm_ppt_v2_information *table_info =
1963 (struct phm_ppt_v2_information *)(hwmgr->pptable);
1964 struct phm_ppt_v1_mm_clock_voltage_dependency_table *dep_table =
1965 table_info->mm_dep_table;
1966 int result = -EINVAL;
1967 uint32_t i, j;
1968
1969 for (i = 0; i < vclk_dpm_table->count; i++) {
1970 result = vega10_populate_single_vclock_level(hwmgr,
1971 vclk_dpm_table->dpm_levels[i].value,
1972 &(pp_table->VclkDid[i]));
1973 if (result)
1974 return result;
1975 }
1976
1977 j = i - 1;
1978 while (i < NUM_UVD_DPM_LEVELS) {
1979 result = vega10_populate_single_vclock_level(hwmgr,
1980 vclk_dpm_table->dpm_levels[j].value,
1981 &(pp_table->VclkDid[i]));
1982 if (result)
1983 return result;
1984 i++;
1985 }
1986
1987 for (i = 0; i < dclk_dpm_table->count; i++) {
1988 result = vega10_populate_single_dclock_level(hwmgr,
1989 dclk_dpm_table->dpm_levels[i].value,
1990 &(pp_table->DclkDid[i]));
1991 if (result)
1992 return result;
1993 }
1994
1995 j = i - 1;
1996 while (i < NUM_UVD_DPM_LEVELS) {
1997 result = vega10_populate_single_dclock_level(hwmgr,
1998 dclk_dpm_table->dpm_levels[j].value,
1999 &(pp_table->DclkDid[i]));
2000 if (result)
2001 return result;
2002 i++;
2003 }
2004
2005 for (i = 0; i < dep_table->count; i++) {
2006 if (dep_table->entries[i].vclk ==
2007 vclk_dpm_table->dpm_levels[i].value &&
2008 dep_table->entries[i].dclk ==
2009 dclk_dpm_table->dpm_levels[i].value)
2010 pp_table->UvdDpmVoltageIndex[i] =
2011 dep_table->entries[i].vddcInd;
2012 else
2013 return -1;
2014 }
2015
2016 j = i - 1;
2017 while (i < NUM_UVD_DPM_LEVELS) {
2018 pp_table->UvdDpmVoltageIndex[i] = dep_table->entries[j].vddcInd;
2019 i++;
2020 }
2021
2022 return 0;
2023}
2024
2025static int vega10_populate_clock_stretcher_table(struct pp_hwmgr *hwmgr)
2026{
2027 struct vega10_hwmgr *data =
2028 (struct vega10_hwmgr *)(hwmgr->backend);
2029 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
2030 struct phm_ppt_v2_information *table_info =
2031 (struct phm_ppt_v2_information *)(hwmgr->pptable);
2032 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table =
2033 table_info->vdd_dep_on_sclk;
2034 uint32_t i;
2035
afc0255c 2036 for (i = 0; i < dep_table->count; i++) {
f83a9991 2037 pp_table->CksEnable[i] = dep_table->entries[i].cks_enable;
afc0255c
RZ
2038 pp_table->CksVidOffset[i] = (uint8_t)(dep_table->entries[i].cks_voffset
2039 * VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
f83a9991
EH
2040 }
2041
2042 return 0;
2043}
2044
2045static int vega10_populate_avfs_parameters(struct pp_hwmgr *hwmgr)
2046{
2047 struct vega10_hwmgr *data =
2048 (struct vega10_hwmgr *)(hwmgr->backend);
2049 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
2050 struct phm_ppt_v2_information *table_info =
2051 (struct phm_ppt_v2_information *)(hwmgr->pptable);
2052 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table =
2053 table_info->vdd_dep_on_sclk;
2054 struct pp_atomfwctrl_avfs_parameters avfs_params = {0};
2055 int result = 0;
2056 uint32_t i;
2057
2058 pp_table->MinVoltageVid = (uint8_t)0xff;
2059 pp_table->MaxVoltageVid = (uint8_t)0;
2060
2061 if (data->smu_features[GNLD_AVFS].supported) {
2062 result = pp_atomfwctrl_get_avfs_information(hwmgr, &avfs_params);
2063 if (!result) {
2064 pp_table->MinVoltageVid = (uint8_t)
f83a9991 2065 convert_to_vid((uint16_t)(avfs_params.ulMinVddc));
6524e494
RZ
2066 pp_table->MaxVoltageVid = (uint8_t)
2067 convert_to_vid((uint16_t)(avfs_params.ulMaxVddc));
2068
2069 pp_table->AConstant[0] = cpu_to_le32(avfs_params.ulMeanNsigmaAcontant0);
2070 pp_table->AConstant[1] = cpu_to_le32(avfs_params.ulMeanNsigmaAcontant1);
2071 pp_table->AConstant[2] = cpu_to_le32(avfs_params.ulMeanNsigmaAcontant2);
2072 pp_table->DC_tol_sigma = cpu_to_le16(avfs_params.usMeanNsigmaDcTolSigma);
2073 pp_table->Platform_mean = cpu_to_le16(avfs_params.usMeanNsigmaPlatformMean);
2074 pp_table->Platform_sigma = cpu_to_le16(avfs_params.usMeanNsigmaDcTolSigma);
2075 pp_table->PSM_Age_CompFactor = cpu_to_le16(avfs_params.usPsmAgeComfactor);
f83a9991
EH
2076
2077 pp_table->BtcGbVdroopTableCksOff.a0 =
2078 cpu_to_le32(avfs_params.ulGbVdroopTableCksoffA0);
6524e494 2079 pp_table->BtcGbVdroopTableCksOff.a0_shift = 20;
f83a9991
EH
2080 pp_table->BtcGbVdroopTableCksOff.a1 =
2081 cpu_to_le32(avfs_params.ulGbVdroopTableCksoffA1);
6524e494 2082 pp_table->BtcGbVdroopTableCksOff.a1_shift = 20;
f83a9991
EH
2083 pp_table->BtcGbVdroopTableCksOff.a2 =
2084 cpu_to_le32(avfs_params.ulGbVdroopTableCksoffA2);
6524e494
RZ
2085 pp_table->BtcGbVdroopTableCksOff.a2_shift = 20;
2086
2087 pp_table->OverrideBtcGbCksOn = avfs_params.ucEnableGbVdroopTableCkson;
2088 pp_table->BtcGbVdroopTableCksOn.a0 =
2089 cpu_to_le32(avfs_params.ulGbVdroopTableCksonA0);
2090 pp_table->BtcGbVdroopTableCksOn.a0_shift = 20;
2091 pp_table->BtcGbVdroopTableCksOn.a1 =
2092 cpu_to_le32(avfs_params.ulGbVdroopTableCksonA1);
2093 pp_table->BtcGbVdroopTableCksOn.a1_shift = 20;
2094 pp_table->BtcGbVdroopTableCksOn.a2 =
2095 cpu_to_le32(avfs_params.ulGbVdroopTableCksonA2);
2096 pp_table->BtcGbVdroopTableCksOn.a2_shift = 20;
f83a9991
EH
2097
2098 pp_table->AvfsGbCksOn.m1 =
2099 cpu_to_le32(avfs_params.ulGbFuseTableCksonM1);
2100 pp_table->AvfsGbCksOn.m2 =
6524e494 2101 cpu_to_le16(avfs_params.ulGbFuseTableCksonM2);
f83a9991
EH
2102 pp_table->AvfsGbCksOn.b =
2103 cpu_to_le32(avfs_params.ulGbFuseTableCksonB);
2104 pp_table->AvfsGbCksOn.m1_shift = 24;
2105 pp_table->AvfsGbCksOn.m2_shift = 12;
6524e494 2106 pp_table->AvfsGbCksOn.b_shift = 0;
f83a9991 2107
6524e494
RZ
2108 pp_table->OverrideAvfsGbCksOn =
2109 avfs_params.ucEnableGbFuseTableCkson;
f83a9991
EH
2110 pp_table->AvfsGbCksOff.m1 =
2111 cpu_to_le32(avfs_params.ulGbFuseTableCksoffM1);
2112 pp_table->AvfsGbCksOff.m2 =
6524e494 2113 cpu_to_le16(avfs_params.ulGbFuseTableCksoffM2);
f83a9991
EH
2114 pp_table->AvfsGbCksOff.b =
2115 cpu_to_le32(avfs_params.ulGbFuseTableCksoffB);
2116 pp_table->AvfsGbCksOff.m1_shift = 24;
2117 pp_table->AvfsGbCksOff.m2_shift = 12;
6524e494
RZ
2118 pp_table->AvfsGbCksOff.b_shift = 0;
2119
2120 for (i = 0; i < dep_table->count; i++) {
2121 if (dep_table->entries[i].sclk_offset == 0)
2122 pp_table->StaticVoltageOffsetVid[i] = 248;
2123 else
2124 pp_table->StaticVoltageOffsetVid[i] =
2125 (uint8_t)(dep_table->entries[i].sclk_offset *
f83a9991
EH
2126 VOLTAGE_VID_OFFSET_SCALE2 /
2127 VOLTAGE_VID_OFFSET_SCALE1);
6524e494 2128 }
f83a9991
EH
2129
2130 if ((PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2131 data->disp_clk_quad_eqn_a) &&
2132 (PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2133 data->disp_clk_quad_eqn_b)) {
2134 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m1 =
2135 (int32_t)data->disp_clk_quad_eqn_a;
2136 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m2 =
6524e494 2137 (int32_t)data->disp_clk_quad_eqn_b;
f83a9991
EH
2138 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].b =
2139 (int32_t)data->disp_clk_quad_eqn_c;
2140 } else {
2141 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m1 =
2142 (int32_t)avfs_params.ulDispclk2GfxclkM1;
2143 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m2 =
6524e494 2144 (int32_t)avfs_params.ulDispclk2GfxclkM2;
f83a9991
EH
2145 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].b =
2146 (int32_t)avfs_params.ulDispclk2GfxclkB;
2147 }
2148
2149 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m1_shift = 24;
2150 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m2_shift = 12;
4bae05e1 2151 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].b_shift = 12;
f83a9991
EH
2152
2153 if ((PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2154 data->dcef_clk_quad_eqn_a) &&
2155 (PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2156 data->dcef_clk_quad_eqn_b)) {
2157 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m1 =
2158 (int32_t)data->dcef_clk_quad_eqn_a;
2159 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m2 =
6524e494 2160 (int32_t)data->dcef_clk_quad_eqn_b;
f83a9991
EH
2161 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].b =
2162 (int32_t)data->dcef_clk_quad_eqn_c;
2163 } else {
2164 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m1 =
2165 (int32_t)avfs_params.ulDcefclk2GfxclkM1;
2166 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m2 =
6524e494 2167 (int32_t)avfs_params.ulDcefclk2GfxclkM2;
f83a9991
EH
2168 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].b =
2169 (int32_t)avfs_params.ulDcefclk2GfxclkB;
2170 }
2171
2172 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m1_shift = 24;
2173 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m2_shift = 12;
4bae05e1 2174 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].b_shift = 12;
f83a9991
EH
2175
2176 if ((PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2177 data->pixel_clk_quad_eqn_a) &&
2178 (PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2179 data->pixel_clk_quad_eqn_b)) {
2180 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m1 =
2181 (int32_t)data->pixel_clk_quad_eqn_a;
2182 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m2 =
6524e494 2183 (int32_t)data->pixel_clk_quad_eqn_b;
f83a9991
EH
2184 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].b =
2185 (int32_t)data->pixel_clk_quad_eqn_c;
2186 } else {
2187 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m1 =
2188 (int32_t)avfs_params.ulPixelclk2GfxclkM1;
2189 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m2 =
6524e494 2190 (int32_t)avfs_params.ulPixelclk2GfxclkM2;
f83a9991
EH
2191 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].b =
2192 (int32_t)avfs_params.ulPixelclk2GfxclkB;
2193 }
2194
2195 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m1_shift = 24;
2196 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m2_shift = 12;
4bae05e1 2197 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].b_shift = 12;
f83a9991
EH
2198 if ((PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2199 data->phy_clk_quad_eqn_a) &&
2200 (PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2201 data->phy_clk_quad_eqn_b)) {
2202 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m1 =
2203 (int32_t)data->phy_clk_quad_eqn_a;
2204 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m2 =
6524e494 2205 (int32_t)data->phy_clk_quad_eqn_b;
f83a9991
EH
2206 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].b =
2207 (int32_t)data->phy_clk_quad_eqn_c;
2208 } else {
2209 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m1 =
2210 (int32_t)avfs_params.ulPhyclk2GfxclkM1;
2211 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m2 =
6524e494 2212 (int32_t)avfs_params.ulPhyclk2GfxclkM2;
f83a9991
EH
2213 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].b =
2214 (int32_t)avfs_params.ulPhyclk2GfxclkB;
2215 }
2216
2217 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m1_shift = 24;
2218 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m2_shift = 12;
4bae05e1 2219 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].b_shift = 12;
f83a9991
EH
2220 } else {
2221 data->smu_features[GNLD_AVFS].supported = false;
2222 }
2223 }
2224
2225 return 0;
2226}
2227
2228static int vega10_populate_gpio_parameters(struct pp_hwmgr *hwmgr)
2229{
2230 struct vega10_hwmgr *data =
2231 (struct vega10_hwmgr *)(hwmgr->backend);
2232 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
2233 struct pp_atomfwctrl_gpio_parameters gpio_params = {0};
2234 int result;
2235
2236 result = pp_atomfwctrl_get_gpio_information(hwmgr, &gpio_params);
2237 if (!result) {
2238 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2239 PHM_PlatformCaps_RegulatorHot) &&
2240 (data->registry_data.regulator_hot_gpio_support)) {
2241 pp_table->VR0HotGpio = gpio_params.ucVR0HotGpio;
2242 pp_table->VR0HotPolarity = gpio_params.ucVR0HotPolarity;
2243 pp_table->VR1HotGpio = gpio_params.ucVR1HotGpio;
2244 pp_table->VR1HotPolarity = gpio_params.ucVR1HotPolarity;
2245 } else {
2246 pp_table->VR0HotGpio = 0;
2247 pp_table->VR0HotPolarity = 0;
2248 pp_table->VR1HotGpio = 0;
2249 pp_table->VR1HotPolarity = 0;
2250 }
2251
2252 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2253 PHM_PlatformCaps_AutomaticDCTransition) &&
2254 (data->registry_data.ac_dc_switch_gpio_support)) {
2255 pp_table->AcDcGpio = gpio_params.ucAcDcGpio;
2256 pp_table->AcDcPolarity = gpio_params.ucAcDcPolarity;
2257 } else {
2258 pp_table->AcDcGpio = 0;
2259 pp_table->AcDcPolarity = 0;
2260 }
2261 }
2262
2263 return result;
2264}
2265
2266static int vega10_avfs_enable(struct pp_hwmgr *hwmgr, bool enable)
2267{
2268 struct vega10_hwmgr *data =
2269 (struct vega10_hwmgr *)(hwmgr->backend);
2270
2271 if (data->smu_features[GNLD_AVFS].supported) {
2272 if (enable) {
2273 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
2274 true,
2275 data->smu_features[GNLD_AVFS].smu_feature_bitmap),
2276 "[avfs_control] Attempt to Enable AVFS feature Failed!",
2277 return -1);
2278 data->smu_features[GNLD_AVFS].enabled = true;
2279 } else {
2280 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
2281 false,
2282 data->smu_features[GNLD_AVFS].smu_feature_id),
2283 "[avfs_control] Attempt to Disable AVFS feature Failed!",
2284 return -1);
2285 data->smu_features[GNLD_AVFS].enabled = false;
2286 }
2287 }
2288
2289 return 0;
2290}
2291
ab5cf3a5
RZ
2292static int vega10_populate_and_upload_avfs_fuse_override(struct pp_hwmgr *hwmgr)
2293{
2294 int result = 0;
2295
2296 uint64_t serial_number = 0;
2297 uint32_t top32, bottom32;
2298 struct phm_fuses_default fuse;
2299
2300 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
2301 AvfsFuseOverride_t *avfs_fuse_table = &(data->smc_state_table.avfs_fuse_override_table);
2302
2303 smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_ReadSerialNumTop32);
2304 vega10_read_arg_from_smc(hwmgr->smumgr, &top32);
2305
2306 smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_ReadSerialNumBottom32);
2307 vega10_read_arg_from_smc(hwmgr->smumgr, &bottom32);
2308
2309 serial_number = ((uint64_t)bottom32 << 32) | top32;
2310
2311 if (pp_override_get_default_fuse_value(serial_number, vega10_fuses_default, &fuse) == 0) {
2312 avfs_fuse_table->VFT0_b = fuse.VFT0_b;
2313 avfs_fuse_table->VFT0_m1 = fuse.VFT0_m1;
2314 avfs_fuse_table->VFT0_m2 = fuse.VFT0_m2;
2315 avfs_fuse_table->VFT1_b = fuse.VFT1_b;
2316 avfs_fuse_table->VFT1_m1 = fuse.VFT1_m1;
2317 avfs_fuse_table->VFT1_m2 = fuse.VFT1_m2;
2318 avfs_fuse_table->VFT2_b = fuse.VFT2_b;
2319 avfs_fuse_table->VFT2_m1 = fuse.VFT2_m1;
2320 avfs_fuse_table->VFT2_m2 = fuse.VFT2_m2;
2321 result = vega10_copy_table_to_smc(hwmgr->smumgr,
2322 (uint8_t *)avfs_fuse_table, AVFSFUSETABLE);
2323 PP_ASSERT_WITH_CODE(!result,
2324 "Failed to upload FuseOVerride!",
2325 );
2326 }
2327
2328 return result;
2329}
2330
d6c025d2
EH
2331static int vega10_save_default_power_profile(struct pp_hwmgr *hwmgr)
2332{
2333 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
2334 struct vega10_single_dpm_table *dpm_table = &(data->dpm_table.gfx_table);
2335 uint32_t min_level;
2336
2337 hwmgr->default_gfx_power_profile.type = AMD_PP_GFX_PROFILE;
2338 hwmgr->default_compute_power_profile.type = AMD_PP_COMPUTE_PROFILE;
2339
2340 /* Optimize compute power profile: Use only highest
2341 * 2 power levels (if more than 2 are available)
2342 */
2343 if (dpm_table->count > 2)
2344 min_level = dpm_table->count - 2;
2345 else if (dpm_table->count == 2)
2346 min_level = 1;
2347 else
2348 min_level = 0;
2349
2350 hwmgr->default_compute_power_profile.min_sclk =
2351 dpm_table->dpm_levels[min_level].value;
2352
2353 hwmgr->gfx_power_profile = hwmgr->default_gfx_power_profile;
2354 hwmgr->compute_power_profile = hwmgr->default_compute_power_profile;
2355
2356 return 0;
2357}
2358
f83a9991
EH
2359/**
2360* Initializes the SMC table and uploads it
2361*
2362* @param hwmgr the address of the powerplay hardware manager.
2363* @param pInput the pointer to input data (PowerState)
2364* @return always 0
2365*/
2366static int vega10_init_smc_table(struct pp_hwmgr *hwmgr)
2367{
2368 int result;
2369 struct vega10_hwmgr *data =
2370 (struct vega10_hwmgr *)(hwmgr->backend);
2371 struct phm_ppt_v2_information *table_info =
2372 (struct phm_ppt_v2_information *)(hwmgr->pptable);
2373 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
2374 struct pp_atomfwctrl_voltage_table voltage_table;
05ee3215 2375 struct pp_atomfwctrl_bios_boot_up_values boot_up_values;
f83a9991
EH
2376
2377 result = vega10_setup_default_dpm_tables(hwmgr);
2378 PP_ASSERT_WITH_CODE(!result,
2379 "Failed to setup default DPM tables!",
2380 return result);
2381
2382 pp_atomfwctrl_get_voltage_table_v4(hwmgr, VOLTAGE_TYPE_VDDC,
2383 VOLTAGE_OBJ_SVID2, &voltage_table);
2384 pp_table->MaxVidStep = voltage_table.max_vid_step;
2385
2386 pp_table->GfxDpmVoltageMode =
2387 (uint8_t)(table_info->uc_gfx_dpm_voltage_mode);
2388 pp_table->SocDpmVoltageMode =
2389 (uint8_t)(table_info->uc_soc_dpm_voltage_mode);
2390 pp_table->UclkDpmVoltageMode =
2391 (uint8_t)(table_info->uc_uclk_dpm_voltage_mode);
2392 pp_table->UvdDpmVoltageMode =
2393 (uint8_t)(table_info->uc_uvd_dpm_voltage_mode);
2394 pp_table->VceDpmVoltageMode =
2395 (uint8_t)(table_info->uc_vce_dpm_voltage_mode);
2396 pp_table->Mp0DpmVoltageMode =
2397 (uint8_t)(table_info->uc_mp0_dpm_voltage_mode);
effa290c 2398
f83a9991
EH
2399 pp_table->DisplayDpmVoltageMode =
2400 (uint8_t)(table_info->uc_dcef_dpm_voltage_mode);
2401
2402 if (data->registry_data.ulv_support &&
2403 table_info->us_ulv_voltage_offset) {
2404 result = vega10_populate_ulv_state(hwmgr);
2405 PP_ASSERT_WITH_CODE(!result,
2406 "Failed to initialize ULV state!",
2407 return result);
2408 }
2409
2410 result = vega10_populate_smc_link_levels(hwmgr);
2411 PP_ASSERT_WITH_CODE(!result,
2412 "Failed to initialize Link Level!",
2413 return result);
2414
2415 result = vega10_populate_all_graphic_levels(hwmgr);
2416 PP_ASSERT_WITH_CODE(!result,
2417 "Failed to initialize Graphics Level!",
2418 return result);
2419
2420 result = vega10_populate_all_memory_levels(hwmgr);
2421 PP_ASSERT_WITH_CODE(!result,
2422 "Failed to initialize Memory Level!",
2423 return result);
2424
2425 result = vega10_populate_all_display_clock_levels(hwmgr);
2426 PP_ASSERT_WITH_CODE(!result,
2427 "Failed to initialize Display Level!",
2428 return result);
2429
2430 result = vega10_populate_smc_vce_levels(hwmgr);
2431 PP_ASSERT_WITH_CODE(!result,
2432 "Failed to initialize VCE Level!",
2433 return result);
2434
2435 result = vega10_populate_smc_uvd_levels(hwmgr);
2436 PP_ASSERT_WITH_CODE(!result,
2437 "Failed to initialize UVD Level!",
2438 return result);
2439
afc0255c 2440 if (data->registry_data.clock_stretcher_support) {
f83a9991
EH
2441 result = vega10_populate_clock_stretcher_table(hwmgr);
2442 PP_ASSERT_WITH_CODE(!result,
2443 "Failed to populate Clock Stretcher Table!",
2444 return result);
2445 }
2446
05ee3215
RZ
2447 result = pp_atomfwctrl_get_vbios_bootup_values(hwmgr, &boot_up_values);
2448 if (!result) {
2449 data->vbios_boot_state.vddc = boot_up_values.usVddc;
2450 data->vbios_boot_state.vddci = boot_up_values.usVddci;
2451 data->vbios_boot_state.mvddc = boot_up_values.usMvddc;
2452 data->vbios_boot_state.gfx_clock = boot_up_values.ulGfxClk;
2453 data->vbios_boot_state.mem_clock = boot_up_values.ulUClk;
2454 data->vbios_boot_state.soc_clock = boot_up_values.ulSocClk;
2455 if (0 != boot_up_values.usVddc) {
2456 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
2457 PPSMC_MSG_SetFloorSocVoltage,
2458 (boot_up_values.usVddc * 4));
2459 data->vbios_boot_state.bsoc_vddc_lock = true;
2460 } else {
2461 data->vbios_boot_state.bsoc_vddc_lock = false;
2462 }
2463 }
2464
f83a9991
EH
2465 result = vega10_populate_avfs_parameters(hwmgr);
2466 PP_ASSERT_WITH_CODE(!result,
2467 "Failed to initialize AVFS Parameters!",
2468 return result);
2469
2470 result = vega10_populate_gpio_parameters(hwmgr);
2471 PP_ASSERT_WITH_CODE(!result,
2472 "Failed to initialize GPIO Parameters!",
2473 return result);
2474
2475 pp_table->GfxclkAverageAlpha = (uint8_t)
2476 (data->gfxclk_average_alpha);
2477 pp_table->SocclkAverageAlpha = (uint8_t)
2478 (data->socclk_average_alpha);
2479 pp_table->UclkAverageAlpha = (uint8_t)
2480 (data->uclk_average_alpha);
2481 pp_table->GfxActivityAverageAlpha = (uint8_t)
2482 (data->gfx_activity_average_alpha);
2483
ab5cf3a5
RZ
2484 vega10_populate_and_upload_avfs_fuse_override(hwmgr);
2485
f83a9991
EH
2486 result = vega10_copy_table_to_smc(hwmgr->smumgr,
2487 (uint8_t *)pp_table, PPTABLE);
2488 PP_ASSERT_WITH_CODE(!result,
2489 "Failed to upload PPtable!", return result);
2490
2211a787
RZ
2491 result = vega10_avfs_enable(hwmgr, true);
2492 PP_ASSERT_WITH_CODE(!result, "Attempt to enable AVFS feature Failed!",
f83a9991 2493 return result);
f83a9991 2494
d6c025d2
EH
2495 vega10_save_default_power_profile(hwmgr);
2496
f83a9991
EH
2497 return 0;
2498}
2499
2500static int vega10_enable_thermal_protection(struct pp_hwmgr *hwmgr)
2501{
2502 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
2503
2504 if (data->smu_features[GNLD_THERMAL].supported) {
2505 if (data->smu_features[GNLD_THERMAL].enabled)
2506 pr_info("THERMAL Feature Already enabled!");
2507
2508 PP_ASSERT_WITH_CODE(
2509 !vega10_enable_smc_features(hwmgr->smumgr,
2510 true,
2511 data->smu_features[GNLD_THERMAL].smu_feature_bitmap),
2512 "Enable THERMAL Feature Failed!",
2513 return -1);
2514 data->smu_features[GNLD_THERMAL].enabled = true;
2515 }
2516
2517 return 0;
2518}
2519
8b9242ed
RZ
2520static int vega10_disable_thermal_protection(struct pp_hwmgr *hwmgr)
2521{
2522 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
2523
2524 if (data->smu_features[GNLD_THERMAL].supported) {
2525 if (!data->smu_features[GNLD_THERMAL].enabled)
2526 pr_info("THERMAL Feature Already disabled!");
2527
2528 PP_ASSERT_WITH_CODE(
2529 !vega10_enable_smc_features(hwmgr->smumgr,
2530 false,
2531 data->smu_features[GNLD_THERMAL].smu_feature_bitmap),
2532 "disable THERMAL Feature Failed!",
2533 return -1);
2534 data->smu_features[GNLD_THERMAL].enabled = false;
2535 }
2536
2537 return 0;
2538}
2539
f83a9991
EH
2540static int vega10_enable_vrhot_feature(struct pp_hwmgr *hwmgr)
2541{
2542 struct vega10_hwmgr *data =
2543 (struct vega10_hwmgr *)(hwmgr->backend);
2544
2545 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2546 PHM_PlatformCaps_RegulatorHot)) {
2547 if (data->smu_features[GNLD_VR0HOT].supported) {
2548 PP_ASSERT_WITH_CODE(
2549 !vega10_enable_smc_features(hwmgr->smumgr,
2550 true,
2551 data->smu_features[GNLD_VR0HOT].smu_feature_bitmap),
2552 "Attempt to Enable VR0 Hot feature Failed!",
2553 return -1);
2554 data->smu_features[GNLD_VR0HOT].enabled = true;
2555 } else {
2556 if (data->smu_features[GNLD_VR1HOT].supported) {
2557 PP_ASSERT_WITH_CODE(
2558 !vega10_enable_smc_features(hwmgr->smumgr,
2559 true,
2560 data->smu_features[GNLD_VR1HOT].smu_feature_bitmap),
2561 "Attempt to Enable VR0 Hot feature Failed!",
2562 return -1);
2563 data->smu_features[GNLD_VR1HOT].enabled = true;
2564 }
2565 }
2566 }
2567 return 0;
2568}
2569
2570static int vega10_enable_ulv(struct pp_hwmgr *hwmgr)
2571{
2572 struct vega10_hwmgr *data =
2573 (struct vega10_hwmgr *)(hwmgr->backend);
2574
2575 if (data->registry_data.ulv_support) {
2576 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
2577 true, data->smu_features[GNLD_ULV].smu_feature_bitmap),
2578 "Enable ULV Feature Failed!",
2579 return -1);
2580 data->smu_features[GNLD_ULV].enabled = true;
2581 }
2582
2583 return 0;
2584}
2585
4022e4f2
RZ
2586static int vega10_disable_ulv(struct pp_hwmgr *hwmgr)
2587{
2588 struct vega10_hwmgr *data =
2589 (struct vega10_hwmgr *)(hwmgr->backend);
2590
2591 if (data->registry_data.ulv_support) {
2592 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
2593 false, data->smu_features[GNLD_ULV].smu_feature_bitmap),
2594 "disable ULV Feature Failed!",
2595 return -EINVAL);
2596 data->smu_features[GNLD_ULV].enabled = false;
2597 }
2598
2599 return 0;
2600}
2601
f83a9991
EH
2602static int vega10_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
2603{
2604 struct vega10_hwmgr *data =
2605 (struct vega10_hwmgr *)(hwmgr->backend);
2606
2607 if (data->smu_features[GNLD_DS_GFXCLK].supported) {
2608 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
2609 true, data->smu_features[GNLD_DS_GFXCLK].smu_feature_bitmap),
2610 "Attempt to Enable DS_GFXCLK Feature Failed!",
2611 return -1);
2612 data->smu_features[GNLD_DS_GFXCLK].enabled = true;
2613 }
2614
2615 if (data->smu_features[GNLD_DS_SOCCLK].supported) {
2616 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
2617 true, data->smu_features[GNLD_DS_SOCCLK].smu_feature_bitmap),
2618 "Attempt to Enable DS_GFXCLK Feature Failed!",
2619 return -1);
2620 data->smu_features[GNLD_DS_SOCCLK].enabled = true;
2621 }
2622
2623 if (data->smu_features[GNLD_DS_LCLK].supported) {
2624 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
2625 true, data->smu_features[GNLD_DS_LCLK].smu_feature_bitmap),
2626 "Attempt to Enable DS_GFXCLK Feature Failed!",
2627 return -1);
2628 data->smu_features[GNLD_DS_LCLK].enabled = true;
2629 }
2630
2631 return 0;
2632}
2633
8b9242ed
RZ
2634static int vega10_stop_dpm(struct pp_hwmgr *hwmgr, uint32_t bitmap)
2635{
2636 struct vega10_hwmgr *data =
2637 (struct vega10_hwmgr *)(hwmgr->backend);
2638 uint32_t i, feature_mask = 0;
2639
2640
2641 if(data->smu_features[GNLD_LED_DISPLAY].supported == true){
2642 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
2643 true, data->smu_features[GNLD_LED_DISPLAY].smu_feature_bitmap),
2644 "Attempt to Enable LED DPM feature Failed!", return -EINVAL);
2645 data->smu_features[GNLD_LED_DISPLAY].enabled = true;
2646 }
2647
2648 for (i = 0; i < GNLD_DPM_MAX; i++) {
2649 if (data->smu_features[i].smu_feature_bitmap & bitmap) {
2650 if (data->smu_features[i].supported) {
2651 if (data->smu_features[i].enabled) {
2652 feature_mask |= data->smu_features[i].
2653 smu_feature_bitmap;
2654 data->smu_features[i].enabled = false;
2655 }
2656 }
2657 }
2658 }
2659
2660 vega10_enable_smc_features(hwmgr->smumgr, false, feature_mask);
2661
2662 return 0;
2663}
2664
f83a9991
EH
2665/**
2666 * @brief Tell SMC to enabled the supported DPMs.
2667 *
2668 * @param hwmgr - the address of the powerplay hardware manager.
2669 * @Param bitmap - bitmap for the features to enabled.
2670 * @return 0 on at least one DPM is successfully enabled.
2671 */
2672static int vega10_start_dpm(struct pp_hwmgr *hwmgr, uint32_t bitmap)
2673{
2674 struct vega10_hwmgr *data =
2675 (struct vega10_hwmgr *)(hwmgr->backend);
2676 uint32_t i, feature_mask = 0;
2677
2678 for (i = 0; i < GNLD_DPM_MAX; i++) {
2679 if (data->smu_features[i].smu_feature_bitmap & bitmap) {
2680 if (data->smu_features[i].supported) {
2681 if (!data->smu_features[i].enabled) {
2682 feature_mask |= data->smu_features[i].
2683 smu_feature_bitmap;
2684 data->smu_features[i].enabled = true;
2685 }
2686 }
2687 }
2688 }
2689
2690 if (vega10_enable_smc_features(hwmgr->smumgr,
2691 true, feature_mask)) {
2692 for (i = 0; i < GNLD_DPM_MAX; i++) {
2693 if (data->smu_features[i].smu_feature_bitmap &
2694 feature_mask)
2695 data->smu_features[i].enabled = false;
2696 }
2697 }
2698
2699 if(data->smu_features[GNLD_LED_DISPLAY].supported == true){
2700 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
2701 true, data->smu_features[GNLD_LED_DISPLAY].smu_feature_bitmap),
2702 "Attempt to Enable LED DPM feature Failed!", return -EINVAL);
2703 data->smu_features[GNLD_LED_DISPLAY].enabled = true;
2704 }
2705
05ee3215
RZ
2706 if (data->vbios_boot_state.bsoc_vddc_lock) {
2707 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
2708 PPSMC_MSG_SetFloorSocVoltage, 0);
2709 data->vbios_boot_state.bsoc_vddc_lock = false;
2710 }
2711
f83a9991
EH
2712 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2713 PHM_PlatformCaps_Falcon_QuickTransition)) {
2714 if (data->smu_features[GNLD_ACDC].supported) {
2715 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
2716 true, data->smu_features[GNLD_ACDC].smu_feature_bitmap),
2717 "Attempt to Enable DS_GFXCLK Feature Failed!",
2718 return -1);
2719 data->smu_features[GNLD_ACDC].enabled = true;
2720 }
2721 }
2722
2723 return 0;
2724}
2725
2726static int vega10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
2727{
2728 struct vega10_hwmgr *data =
2729 (struct vega10_hwmgr *)(hwmgr->backend);
2730 int tmp_result, result = 0;
2731
2732 tmp_result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
2733 PPSMC_MSG_ConfigureTelemetry, data->config_telemetry);
2734 PP_ASSERT_WITH_CODE(!tmp_result,
2735 "Failed to configure telemetry!",
2736 return tmp_result);
2737
f83a9991
EH
2738 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
2739 PPSMC_MSG_NumOfDisplays, 0);
2740
2741 tmp_result = (!vega10_is_dpm_running(hwmgr)) ? 0 : -1;
2742 PP_ASSERT_WITH_CODE(!tmp_result,
2743 "DPM is already running right , skipping re-enablement!",
2744 return 0);
2745
2746 tmp_result = vega10_construct_voltage_tables(hwmgr);
2747 PP_ASSERT_WITH_CODE(!tmp_result,
2748 "Failed to contruct voltage tables!",
2749 result = tmp_result);
2750
2751 tmp_result = vega10_init_smc_table(hwmgr);
2752 PP_ASSERT_WITH_CODE(!tmp_result,
2753 "Failed to initialize SMC table!",
2754 result = tmp_result);
2755
2756 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2757 PHM_PlatformCaps_ThermalController)) {
2758 tmp_result = vega10_enable_thermal_protection(hwmgr);
2759 PP_ASSERT_WITH_CODE(!tmp_result,
2760 "Failed to enable thermal protection!",
2761 result = tmp_result);
2762 }
2763
2764 tmp_result = vega10_enable_vrhot_feature(hwmgr);
2765 PP_ASSERT_WITH_CODE(!tmp_result,
2766 "Failed to enable VR hot feature!",
2767 result = tmp_result);
2768
f83a9991
EH
2769 tmp_result = vega10_enable_deep_sleep_master_switch(hwmgr);
2770 PP_ASSERT_WITH_CODE(!tmp_result,
2771 "Failed to enable deep sleep master switch!",
2772 result = tmp_result);
2773
2774 tmp_result = vega10_start_dpm(hwmgr, SMC_DPM_FEATURES);
2775 PP_ASSERT_WITH_CODE(!tmp_result,
2776 "Failed to start DPM!", result = tmp_result);
2777
2778 tmp_result = vega10_enable_power_containment(hwmgr);
2779 PP_ASSERT_WITH_CODE(!tmp_result,
2780 "Failed to enable power containment!",
2781 result = tmp_result);
2782
2783 tmp_result = vega10_power_control_set_level(hwmgr);
2784 PP_ASSERT_WITH_CODE(!tmp_result,
2785 "Failed to power control set level!",
2786 result = tmp_result);
2787
4022e4f2
RZ
2788 tmp_result = vega10_enable_ulv(hwmgr);
2789 PP_ASSERT_WITH_CODE(!tmp_result,
2790 "Failed to enable ULV!",
2791 result = tmp_result);
2792
f83a9991
EH
2793 return result;
2794}
2795
2796static int vega10_get_power_state_size(struct pp_hwmgr *hwmgr)
2797{
2798 return sizeof(struct vega10_power_state);
2799}
2800
2801static int vega10_get_pp_table_entry_callback_func(struct pp_hwmgr *hwmgr,
2802 void *state, struct pp_power_state *power_state,
2803 void *pp_table, uint32_t classification_flag)
2804{
2805 struct vega10_power_state *vega10_power_state =
2806 cast_phw_vega10_power_state(&(power_state->hardware));
2807 struct vega10_performance_level *performance_level;
2808 ATOM_Vega10_State *state_entry = (ATOM_Vega10_State *)state;
2809 ATOM_Vega10_POWERPLAYTABLE *powerplay_table =
2810 (ATOM_Vega10_POWERPLAYTABLE *)pp_table;
2811 ATOM_Vega10_SOCCLK_Dependency_Table *socclk_dep_table =
2812 (ATOM_Vega10_SOCCLK_Dependency_Table *)
2813 (((unsigned long)powerplay_table) +
2814 le16_to_cpu(powerplay_table->usSocclkDependencyTableOffset));
2815 ATOM_Vega10_GFXCLK_Dependency_Table *gfxclk_dep_table =
2816 (ATOM_Vega10_GFXCLK_Dependency_Table *)
2817 (((unsigned long)powerplay_table) +
2818 le16_to_cpu(powerplay_table->usGfxclkDependencyTableOffset));
2819 ATOM_Vega10_MCLK_Dependency_Table *mclk_dep_table =
2820 (ATOM_Vega10_MCLK_Dependency_Table *)
2821 (((unsigned long)powerplay_table) +
2822 le16_to_cpu(powerplay_table->usMclkDependencyTableOffset));
2823
2824
2825 /* The following fields are not initialized here:
2826 * id orderedList allStatesList
2827 */
2828 power_state->classification.ui_label =
2829 (le16_to_cpu(state_entry->usClassification) &
2830 ATOM_PPLIB_CLASSIFICATION_UI_MASK) >>
2831 ATOM_PPLIB_CLASSIFICATION_UI_SHIFT;
2832 power_state->classification.flags = classification_flag;
2833 /* NOTE: There is a classification2 flag in BIOS
2834 * that is not being used right now
2835 */
2836 power_state->classification.temporary_state = false;
2837 power_state->classification.to_be_deleted = false;
2838
2839 power_state->validation.disallowOnDC =
2840 ((le32_to_cpu(state_entry->ulCapsAndSettings) &
2841 ATOM_Vega10_DISALLOW_ON_DC) != 0);
2842
2843 power_state->display.disableFrameModulation = false;
2844 power_state->display.limitRefreshrate = false;
2845 power_state->display.enableVariBright =
2846 ((le32_to_cpu(state_entry->ulCapsAndSettings) &
2847 ATOM_Vega10_ENABLE_VARIBRIGHT) != 0);
2848
2849 power_state->validation.supportedPowerLevels = 0;
2850 power_state->uvd_clocks.VCLK = 0;
2851 power_state->uvd_clocks.DCLK = 0;
2852 power_state->temperatures.min = 0;
2853 power_state->temperatures.max = 0;
2854
2855 performance_level = &(vega10_power_state->performance_levels
2856 [vega10_power_state->performance_level_count++]);
2857
2858 PP_ASSERT_WITH_CODE(
2859 (vega10_power_state->performance_level_count <
2860 NUM_GFXCLK_DPM_LEVELS),
2861 "Performance levels exceeds SMC limit!",
2862 return -1);
2863
2864 PP_ASSERT_WITH_CODE(
2865 (vega10_power_state->performance_level_count <=
2866 hwmgr->platform_descriptor.
2867 hardwareActivityPerformanceLevels),
2868 "Performance levels exceeds Driver limit!",
2869 return -1);
2870
2871 /* Performance levels are arranged from low to high. */
2872 performance_level->soc_clock = socclk_dep_table->entries
2873 [state_entry->ucSocClockIndexLow].ulClk;
2874 performance_level->gfx_clock = gfxclk_dep_table->entries
2875 [state_entry->ucGfxClockIndexLow].ulClk;
2876 performance_level->mem_clock = mclk_dep_table->entries
2877 [state_entry->ucMemClockIndexLow].ulMemClk;
2878
2879 performance_level = &(vega10_power_state->performance_levels
2880 [vega10_power_state->performance_level_count++]);
2881
2882 performance_level->soc_clock = socclk_dep_table->entries
2883 [state_entry->ucSocClockIndexHigh].ulClk;
2884 performance_level->gfx_clock = gfxclk_dep_table->entries
2885 [state_entry->ucGfxClockIndexHigh].ulClk;
2886 performance_level->mem_clock = mclk_dep_table->entries
2887 [state_entry->ucMemClockIndexHigh].ulMemClk;
2888 return 0;
2889}
2890
2891static int vega10_get_pp_table_entry(struct pp_hwmgr *hwmgr,
2892 unsigned long entry_index, struct pp_power_state *state)
2893{
2894 int result;
2895 struct vega10_power_state *ps;
2896
2897 state->hardware.magic = PhwVega10_Magic;
2898
2899 ps = cast_phw_vega10_power_state(&state->hardware);
2900
2901 result = vega10_get_powerplay_table_entry(hwmgr, entry_index, state,
2902 vega10_get_pp_table_entry_callback_func);
2903
2904 /*
2905 * This is the earliest time we have all the dependency table
2906 * and the VBIOS boot state
2907 */
2908 /* set DC compatible flag if this state supports DC */
2909 if (!state->validation.disallowOnDC)
2910 ps->dc_compatible = true;
2911
2912 ps->uvd_clks.vclk = state->uvd_clocks.VCLK;
2913 ps->uvd_clks.dclk = state->uvd_clocks.DCLK;
2914
2915 return 0;
2916}
2917
2918static int vega10_patch_boot_state(struct pp_hwmgr *hwmgr,
2919 struct pp_hw_power_state *hw_ps)
2920{
2921 return 0;
2922}
2923
2924static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
2925 struct pp_power_state *request_ps,
2926 const struct pp_power_state *current_ps)
2927{
2928 struct vega10_power_state *vega10_ps =
2929 cast_phw_vega10_power_state(&request_ps->hardware);
2930 uint32_t sclk;
2931 uint32_t mclk;
2932 struct PP_Clocks minimum_clocks = {0};
2933 bool disable_mclk_switching;
2934 bool disable_mclk_switching_for_frame_lock;
2935 bool disable_mclk_switching_for_vr;
2936 bool force_mclk_high;
2937 struct cgs_display_info info = {0};
2938 const struct phm_clock_and_voltage_limits *max_limits;
2939 uint32_t i;
2940 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
2941 struct phm_ppt_v2_information *table_info =
2942 (struct phm_ppt_v2_information *)(hwmgr->pptable);
2943 int32_t count;
2944 uint32_t stable_pstate_sclk_dpm_percentage;
2945 uint32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0;
2946 uint32_t latency;
2947
2948 data->battery_state = (PP_StateUILabel_Battery ==
2949 request_ps->classification.ui_label);
2950
2951 if (vega10_ps->performance_level_count != 2)
2952 pr_info("VI should always have 2 performance levels");
2953
2954 max_limits = (PP_PowerSource_AC == hwmgr->power_source) ?
2955 &(hwmgr->dyn_state.max_clock_voltage_on_ac) :
2956 &(hwmgr->dyn_state.max_clock_voltage_on_dc);
2957
2958 /* Cap clock DPM tables at DC MAX if it is in DC. */
2959 if (PP_PowerSource_DC == hwmgr->power_source) {
2960 for (i = 0; i < vega10_ps->performance_level_count; i++) {
2961 if (vega10_ps->performance_levels[i].mem_clock >
2962 max_limits->mclk)
2963 vega10_ps->performance_levels[i].mem_clock =
2964 max_limits->mclk;
2965 if (vega10_ps->performance_levels[i].gfx_clock >
2966 max_limits->sclk)
2967 vega10_ps->performance_levels[i].gfx_clock =
2968 max_limits->sclk;
2969 }
2970 }
2971
2972 vega10_ps->vce_clks.evclk = hwmgr->vce_arbiter.evclk;
2973 vega10_ps->vce_clks.ecclk = hwmgr->vce_arbiter.ecclk;
2974
2975 cgs_get_active_displays_info(hwmgr->device, &info);
2976
2977 /* result = PHM_CheckVBlankTime(hwmgr, &vblankTooShort);*/
2978 minimum_clocks.engineClock = hwmgr->display_config.min_core_set_clock;
2979 /* minimum_clocks.memoryClock = hwmgr->display_config.min_mem_set_clock; */
2980
2981 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2982 PHM_PlatformCaps_StablePState)) {
2983 PP_ASSERT_WITH_CODE(
2984 data->registry_data.stable_pstate_sclk_dpm_percentage >= 1 &&
2985 data->registry_data.stable_pstate_sclk_dpm_percentage <= 100,
2986 "percent sclk value must range from 1% to 100%, setting default value",
2987 stable_pstate_sclk_dpm_percentage = 75);
2988
2989 max_limits = &(hwmgr->dyn_state.max_clock_voltage_on_ac);
2990 stable_pstate_sclk = (max_limits->sclk *
2991 stable_pstate_sclk_dpm_percentage) / 100;
2992
2993 for (count = table_info->vdd_dep_on_sclk->count - 1;
2994 count >= 0; count--) {
2995 if (stable_pstate_sclk >=
2996 table_info->vdd_dep_on_sclk->entries[count].clk) {
2997 stable_pstate_sclk =
2998 table_info->vdd_dep_on_sclk->entries[count].clk;
2999 break;
3000 }
3001 }
3002
3003 if (count < 0)
3004 stable_pstate_sclk = table_info->vdd_dep_on_sclk->entries[0].clk;
3005
3006 stable_pstate_mclk = max_limits->mclk;
3007
3008 minimum_clocks.engineClock = stable_pstate_sclk;
3009 minimum_clocks.memoryClock = stable_pstate_mclk;
3010 }
3011
3012 if (minimum_clocks.engineClock < hwmgr->gfx_arbiter.sclk)
3013 minimum_clocks.engineClock = hwmgr->gfx_arbiter.sclk;
3014
3015 if (minimum_clocks.memoryClock < hwmgr->gfx_arbiter.mclk)
3016 minimum_clocks.memoryClock = hwmgr->gfx_arbiter.mclk;
3017
3018 vega10_ps->sclk_threshold = hwmgr->gfx_arbiter.sclk_threshold;
3019
3020 if (hwmgr->gfx_arbiter.sclk_over_drive) {
3021 PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.sclk_over_drive <=
3022 hwmgr->platform_descriptor.overdriveLimit.engineClock),
3023 "Overdrive sclk exceeds limit",
3024 hwmgr->gfx_arbiter.sclk_over_drive =
3025 hwmgr->platform_descriptor.overdriveLimit.engineClock);
3026
3027 if (hwmgr->gfx_arbiter.sclk_over_drive >= hwmgr->gfx_arbiter.sclk)
3028 vega10_ps->performance_levels[1].gfx_clock =
3029 hwmgr->gfx_arbiter.sclk_over_drive;
3030 }
3031
3032 if (hwmgr->gfx_arbiter.mclk_over_drive) {
3033 PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.mclk_over_drive <=
3034 hwmgr->platform_descriptor.overdriveLimit.memoryClock),
3035 "Overdrive mclk exceeds limit",
3036 hwmgr->gfx_arbiter.mclk_over_drive =
3037 hwmgr->platform_descriptor.overdriveLimit.memoryClock);
3038
3039 if (hwmgr->gfx_arbiter.mclk_over_drive >= hwmgr->gfx_arbiter.mclk)
3040 vega10_ps->performance_levels[1].mem_clock =
3041 hwmgr->gfx_arbiter.mclk_over_drive;
3042 }
3043
3044 disable_mclk_switching_for_frame_lock = phm_cap_enabled(
3045 hwmgr->platform_descriptor.platformCaps,
3046 PHM_PlatformCaps_DisableMclkSwitchingForFrameLock);
3047 disable_mclk_switching_for_vr = phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3048 PHM_PlatformCaps_DisableMclkSwitchForVR);
3049 force_mclk_high = phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3050 PHM_PlatformCaps_ForceMclkHigh);
3051
3052 disable_mclk_switching = (info.display_count > 1) ||
3053 disable_mclk_switching_for_frame_lock ||
3054 disable_mclk_switching_for_vr ||
3055 force_mclk_high;
3056
3057 sclk = vega10_ps->performance_levels[0].gfx_clock;
3058 mclk = vega10_ps->performance_levels[0].mem_clock;
3059
3060 if (sclk < minimum_clocks.engineClock)
3061 sclk = (minimum_clocks.engineClock > max_limits->sclk) ?
3062 max_limits->sclk : minimum_clocks.engineClock;
3063
3064 if (mclk < minimum_clocks.memoryClock)
3065 mclk = (minimum_clocks.memoryClock > max_limits->mclk) ?
3066 max_limits->mclk : minimum_clocks.memoryClock;
3067
3068 vega10_ps->performance_levels[0].gfx_clock = sclk;
3069 vega10_ps->performance_levels[0].mem_clock = mclk;
3070
3071 vega10_ps->performance_levels[1].gfx_clock =
3072 (vega10_ps->performance_levels[1].gfx_clock >=
3073 vega10_ps->performance_levels[0].gfx_clock) ?
3074 vega10_ps->performance_levels[1].gfx_clock :
3075 vega10_ps->performance_levels[0].gfx_clock;
3076
3077 if (disable_mclk_switching) {
3078 /* Set Mclk the max of level 0 and level 1 */
3079 if (mclk < vega10_ps->performance_levels[1].mem_clock)
3080 mclk = vega10_ps->performance_levels[1].mem_clock;
3081
3082 /* Find the lowest MCLK frequency that is within
3083 * the tolerable latency defined in DAL
3084 */
3085 latency = 0;
3086 for (i = 0; i < data->mclk_latency_table.count; i++) {
3087 if ((data->mclk_latency_table.entries[i].latency <= latency) &&
3088 (data->mclk_latency_table.entries[i].frequency >=
3089 vega10_ps->performance_levels[0].mem_clock) &&
3090 (data->mclk_latency_table.entries[i].frequency <=
3091 vega10_ps->performance_levels[1].mem_clock))
3092 mclk = data->mclk_latency_table.entries[i].frequency;
3093 }
3094 vega10_ps->performance_levels[0].mem_clock = mclk;
3095 } else {
3096 if (vega10_ps->performance_levels[1].mem_clock <
3097 vega10_ps->performance_levels[0].mem_clock)
3098 vega10_ps->performance_levels[1].mem_clock =
3099 vega10_ps->performance_levels[0].mem_clock;
3100 }
3101
3102 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3103 PHM_PlatformCaps_StablePState)) {
3104 for (i = 0; i < vega10_ps->performance_level_count; i++) {
3105 vega10_ps->performance_levels[i].gfx_clock = stable_pstate_sclk;
3106 vega10_ps->performance_levels[i].mem_clock = stable_pstate_mclk;
3107 }
3108 }
3109
3110 return 0;
3111}
3112
3113static int vega10_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, const void *input)
3114{
3115 const struct phm_set_power_state_input *states =
3116 (const struct phm_set_power_state_input *)input;
3117 const struct vega10_power_state *vega10_ps =
3118 cast_const_phw_vega10_power_state(states->pnew_state);
3119 struct vega10_hwmgr *data =
3120 (struct vega10_hwmgr *)(hwmgr->backend);
3121 struct vega10_single_dpm_table *sclk_table =
3122 &(data->dpm_table.gfx_table);
3123 uint32_t sclk = vega10_ps->performance_levels
3124 [vega10_ps->performance_level_count - 1].gfx_clock;
3125 struct vega10_single_dpm_table *mclk_table =
3126 &(data->dpm_table.mem_table);
3127 uint32_t mclk = vega10_ps->performance_levels
3128 [vega10_ps->performance_level_count - 1].mem_clock;
3129 struct PP_Clocks min_clocks = {0};
3130 uint32_t i;
3131 struct cgs_display_info info = {0};
3132
3133 data->need_update_dpm_table = 0;
3134
3135 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3136 PHM_PlatformCaps_ODNinACSupport) ||
3137 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3138 PHM_PlatformCaps_ODNinDCSupport)) {
3139 for (i = 0; i < sclk_table->count; i++) {
3140 if (sclk == sclk_table->dpm_levels[i].value)
3141 break;
3142 }
3143
3144 if (!(data->apply_overdrive_next_settings_mask &
3145 DPMTABLE_OD_UPDATE_SCLK) && i >= sclk_table->count) {
3146 /* Check SCLK in DAL's minimum clocks
3147 * in case DeepSleep divider update is required.
3148 */
3149 if (data->display_timing.min_clock_in_sr !=
3150 min_clocks.engineClockInSR &&
3151 (min_clocks.engineClockInSR >=
3152 VEGA10_MINIMUM_ENGINE_CLOCK ||
3153 data->display_timing.min_clock_in_sr >=
3154 VEGA10_MINIMUM_ENGINE_CLOCK))
3155 data->need_update_dpm_table |= DPMTABLE_UPDATE_SCLK;
3156 }
3157
3158 cgs_get_active_displays_info(hwmgr->device, &info);
3159
3160 if (data->display_timing.num_existing_displays !=
3161 info.display_count)
3162 data->need_update_dpm_table |= DPMTABLE_UPDATE_MCLK;
3163 } else {
3164 for (i = 0; i < sclk_table->count; i++) {
3165 if (sclk == sclk_table->dpm_levels[i].value)
3166 break;
3167 }
3168
3169 if (i >= sclk_table->count)
3170 data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
3171 else {
3172 /* Check SCLK in DAL's minimum clocks
3173 * in case DeepSleep divider update is required.
3174 */
3175 if (data->display_timing.min_clock_in_sr !=
3176 min_clocks.engineClockInSR &&
3177 (min_clocks.engineClockInSR >=
3178 VEGA10_MINIMUM_ENGINE_CLOCK ||
3179 data->display_timing.min_clock_in_sr >=
3180 VEGA10_MINIMUM_ENGINE_CLOCK))
3181 data->need_update_dpm_table |= DPMTABLE_UPDATE_SCLK;
3182 }
3183
3184 for (i = 0; i < mclk_table->count; i++) {
3185 if (mclk == mclk_table->dpm_levels[i].value)
3186 break;
3187 }
3188
3189 cgs_get_active_displays_info(hwmgr->device, &info);
3190
3191 if (i >= mclk_table->count)
3192 data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
3193
3194 if (data->display_timing.num_existing_displays !=
3195 info.display_count ||
3196 i >= mclk_table->count)
3197 data->need_update_dpm_table |= DPMTABLE_UPDATE_MCLK;
3198 }
3199 return 0;
3200}
3201
3202static int vega10_populate_and_upload_sclk_mclk_dpm_levels(
3203 struct pp_hwmgr *hwmgr, const void *input)
3204{
3205 int result = 0;
3206 const struct phm_set_power_state_input *states =
3207 (const struct phm_set_power_state_input *)input;
3208 const struct vega10_power_state *vega10_ps =
3209 cast_const_phw_vega10_power_state(states->pnew_state);
3210 struct vega10_hwmgr *data =
3211 (struct vega10_hwmgr *)(hwmgr->backend);
3212 uint32_t sclk = vega10_ps->performance_levels
3213 [vega10_ps->performance_level_count - 1].gfx_clock;
3214 uint32_t mclk = vega10_ps->performance_levels
3215 [vega10_ps->performance_level_count - 1].mem_clock;
3216 struct vega10_dpm_table *dpm_table = &data->dpm_table;
3217 struct vega10_dpm_table *golden_dpm_table =
3218 &data->golden_dpm_table;
3219 uint32_t dpm_count, clock_percent;
3220 uint32_t i;
3221
3222 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3223 PHM_PlatformCaps_ODNinACSupport) ||
3224 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3225 PHM_PlatformCaps_ODNinDCSupport)) {
3226
3227 if (!data->need_update_dpm_table &&
3228 !data->apply_optimized_settings &&
3229 !data->apply_overdrive_next_settings_mask)
3230 return 0;
3231
3232 if (data->apply_overdrive_next_settings_mask &
3233 DPMTABLE_OD_UPDATE_SCLK) {
3234 for (dpm_count = 0;
3235 dpm_count < dpm_table->gfx_table.count;
3236 dpm_count++) {
3237 dpm_table->gfx_table.dpm_levels[dpm_count].enabled =
3238 data->odn_dpm_table.odn_core_clock_dpm_levels.
3239 performance_level_entries[dpm_count].enabled;
3240 dpm_table->gfx_table.dpm_levels[dpm_count].value =
3241 data->odn_dpm_table.odn_core_clock_dpm_levels.
3242 performance_level_entries[dpm_count].clock;
3243 }
3244 }
3245
3246 if (data->apply_overdrive_next_settings_mask &
3247 DPMTABLE_OD_UPDATE_MCLK) {
3248 for (dpm_count = 0;
3249 dpm_count < dpm_table->mem_table.count;
3250 dpm_count++) {
3251 dpm_table->mem_table.dpm_levels[dpm_count].enabled =
3252 data->odn_dpm_table.odn_memory_clock_dpm_levels.
3253 performance_level_entries[dpm_count].enabled;
3254 dpm_table->mem_table.dpm_levels[dpm_count].value =
3255 data->odn_dpm_table.odn_memory_clock_dpm_levels.
3256 performance_level_entries[dpm_count].clock;
3257 }
3258 }
3259
3260 if ((data->need_update_dpm_table & DPMTABLE_UPDATE_SCLK) ||
3261 data->apply_optimized_settings ||
3262 (data->apply_overdrive_next_settings_mask &
3263 DPMTABLE_OD_UPDATE_SCLK)) {
3264 result = vega10_populate_all_graphic_levels(hwmgr);
3265 PP_ASSERT_WITH_CODE(!result,
3266 "Failed to populate SCLK during \
3267 PopulateNewDPMClocksStates Function!",
3268 return result);
3269 }
3270
3271 if ((data->need_update_dpm_table & DPMTABLE_UPDATE_MCLK) ||
3272 (data->apply_overdrive_next_settings_mask &
3273 DPMTABLE_OD_UPDATE_MCLK)){
3274 result = vega10_populate_all_memory_levels(hwmgr);
3275 PP_ASSERT_WITH_CODE(!result,
3276 "Failed to populate MCLK during \
3277 PopulateNewDPMClocksStates Function!",
3278 return result);
3279 }
3280 } else {
3281 if (!data->need_update_dpm_table &&
3282 !data->apply_optimized_settings)
3283 return 0;
3284
3285 if (data->need_update_dpm_table & DPMTABLE_OD_UPDATE_SCLK &&
3286 data->smu_features[GNLD_DPM_GFXCLK].supported) {
3287 dpm_table->
3288 gfx_table.dpm_levels[dpm_table->gfx_table.count - 1].
3289 value = sclk;
3290
3291 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3292 PHM_PlatformCaps_OD6PlusinACSupport) ||
3293 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3294 PHM_PlatformCaps_OD6PlusinDCSupport)) {
3295 /* Need to do calculation based on the golden DPM table
3296 * as the Heatmap GPU Clock axis is also based on
3297 * the default values
3298 */
3299 PP_ASSERT_WITH_CODE(
3300 golden_dpm_table->gfx_table.dpm_levels
3301 [golden_dpm_table->gfx_table.count - 1].value,
3302 "Divide by 0!",
3303 return -1);
3304
3305 dpm_count = dpm_table->gfx_table.count < 2 ?
3306 0 : dpm_table->gfx_table.count - 2;
3307 for (i = dpm_count; i > 1; i--) {
3308 if (sclk > golden_dpm_table->gfx_table.dpm_levels
3309 [golden_dpm_table->gfx_table.count - 1].value) {
3310 clock_percent =
3311 ((sclk - golden_dpm_table->gfx_table.dpm_levels
3312 [golden_dpm_table->gfx_table.count - 1].value) *
3313 100) /
3314 golden_dpm_table->gfx_table.dpm_levels
3315 [golden_dpm_table->gfx_table.count - 1].value;
3316
3317 dpm_table->gfx_table.dpm_levels[i].value =
3318 golden_dpm_table->gfx_table.dpm_levels[i].value +
3319 (golden_dpm_table->gfx_table.dpm_levels[i].value *
3320 clock_percent) / 100;
3321 } else if (golden_dpm_table->
3322 gfx_table.dpm_levels[dpm_table->gfx_table.count-1].value >
3323 sclk) {
3324 clock_percent =
3325 ((golden_dpm_table->gfx_table.dpm_levels
3326 [golden_dpm_table->gfx_table.count - 1].value -
3327 sclk) * 100) /
3328 golden_dpm_table->gfx_table.dpm_levels
3329 [golden_dpm_table->gfx_table.count-1].value;
3330
3331 dpm_table->gfx_table.dpm_levels[i].value =
3332 golden_dpm_table->gfx_table.dpm_levels[i].value -
3333 (golden_dpm_table->gfx_table.dpm_levels[i].value *
3334 clock_percent) / 100;
3335 } else
3336 dpm_table->gfx_table.dpm_levels[i].value =
3337 golden_dpm_table->gfx_table.dpm_levels[i].value;
3338 }
3339 }
3340 }
3341
3342 if (data->need_update_dpm_table & DPMTABLE_OD_UPDATE_MCLK &&
3343 data->smu_features[GNLD_DPM_UCLK].supported) {
3344 dpm_table->
3345 mem_table.dpm_levels[dpm_table->mem_table.count - 1].
3346 value = mclk;
3347
3348 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3349 PHM_PlatformCaps_OD6PlusinACSupport) ||
3350 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3351 PHM_PlatformCaps_OD6PlusinDCSupport)) {
3352
3353 PP_ASSERT_WITH_CODE(
3354 golden_dpm_table->mem_table.dpm_levels
3355 [golden_dpm_table->mem_table.count - 1].value,
3356 "Divide by 0!",
3357 return -1);
3358
3359 dpm_count = dpm_table->mem_table.count < 2 ?
3360 0 : dpm_table->mem_table.count - 2;
3361 for (i = dpm_count; i > 1; i--) {
3362 if (mclk > golden_dpm_table->mem_table.dpm_levels
3363 [golden_dpm_table->mem_table.count-1].value) {
3364 clock_percent = ((mclk -
3365 golden_dpm_table->mem_table.dpm_levels
3366 [golden_dpm_table->mem_table.count-1].value) *
3367 100) /
3368 golden_dpm_table->mem_table.dpm_levels
3369 [golden_dpm_table->mem_table.count-1].value;
3370
3371 dpm_table->mem_table.dpm_levels[i].value =
3372 golden_dpm_table->mem_table.dpm_levels[i].value +
3373 (golden_dpm_table->mem_table.dpm_levels[i].value *
3374 clock_percent) / 100;
3375 } else if (golden_dpm_table->mem_table.dpm_levels
3376 [dpm_table->mem_table.count-1].value > mclk) {
3377 clock_percent = ((golden_dpm_table->mem_table.dpm_levels
3378 [golden_dpm_table->mem_table.count-1].value - mclk) *
3379 100) /
3380 golden_dpm_table->mem_table.dpm_levels
3381 [golden_dpm_table->mem_table.count-1].value;
3382
3383 dpm_table->mem_table.dpm_levels[i].value =
3384 golden_dpm_table->mem_table.dpm_levels[i].value -
3385 (golden_dpm_table->mem_table.dpm_levels[i].value *
3386 clock_percent) / 100;
3387 } else
3388 dpm_table->mem_table.dpm_levels[i].value =
3389 golden_dpm_table->mem_table.dpm_levels[i].value;
3390 }
3391 }
3392 }
3393
3394 if ((data->need_update_dpm_table &
3395 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK)) ||
3396 data->apply_optimized_settings) {
3397 result = vega10_populate_all_graphic_levels(hwmgr);
3398 PP_ASSERT_WITH_CODE(!result,
3399 "Failed to populate SCLK during \
3400 PopulateNewDPMClocksStates Function!",
3401 return result);
3402 }
3403
3404 if (data->need_update_dpm_table &
3405 (DPMTABLE_OD_UPDATE_MCLK + DPMTABLE_UPDATE_MCLK)) {
3406 result = vega10_populate_all_memory_levels(hwmgr);
3407 PP_ASSERT_WITH_CODE(!result,
3408 "Failed to populate MCLK during \
3409 PopulateNewDPMClocksStates Function!",
3410 return result);
3411 }
3412 }
3413
3414 return result;
3415}
3416
3417static int vega10_trim_single_dpm_states(struct pp_hwmgr *hwmgr,
3418 struct vega10_single_dpm_table *dpm_table,
3419 uint32_t low_limit, uint32_t high_limit)
3420{
3421 uint32_t i;
3422
3423 for (i = 0; i < dpm_table->count; i++) {
3424 if ((dpm_table->dpm_levels[i].value < low_limit) ||
3425 (dpm_table->dpm_levels[i].value > high_limit))
3426 dpm_table->dpm_levels[i].enabled = false;
3427 else
3428 dpm_table->dpm_levels[i].enabled = true;
3429 }
3430 return 0;
3431}
3432
3433static int vega10_trim_single_dpm_states_with_mask(struct pp_hwmgr *hwmgr,
3434 struct vega10_single_dpm_table *dpm_table,
3435 uint32_t low_limit, uint32_t high_limit,
3436 uint32_t disable_dpm_mask)
3437{
3438 uint32_t i;
3439
3440 for (i = 0; i < dpm_table->count; i++) {
3441 if ((dpm_table->dpm_levels[i].value < low_limit) ||
3442 (dpm_table->dpm_levels[i].value > high_limit))
3443 dpm_table->dpm_levels[i].enabled = false;
3444 else if (!((1 << i) & disable_dpm_mask))
3445 dpm_table->dpm_levels[i].enabled = false;
3446 else
3447 dpm_table->dpm_levels[i].enabled = true;
3448 }
3449 return 0;
3450}
3451
3452static int vega10_trim_dpm_states(struct pp_hwmgr *hwmgr,
3453 const struct vega10_power_state *vega10_ps)
3454{
3455 struct vega10_hwmgr *data =
3456 (struct vega10_hwmgr *)(hwmgr->backend);
3457 uint32_t high_limit_count;
3458
3459 PP_ASSERT_WITH_CODE((vega10_ps->performance_level_count >= 1),
3460 "power state did not have any performance level",
3461 return -1);
3462
3463 high_limit_count = (vega10_ps->performance_level_count == 1) ? 0 : 1;
3464
3465 vega10_trim_single_dpm_states(hwmgr,
3466 &(data->dpm_table.soc_table),
3467 vega10_ps->performance_levels[0].soc_clock,
3468 vega10_ps->performance_levels[high_limit_count].soc_clock);
3469
3470 vega10_trim_single_dpm_states_with_mask(hwmgr,
3471 &(data->dpm_table.gfx_table),
3472 vega10_ps->performance_levels[0].gfx_clock,
3473 vega10_ps->performance_levels[high_limit_count].gfx_clock,
3474 data->disable_dpm_mask);
3475
3476 vega10_trim_single_dpm_states(hwmgr,
3477 &(data->dpm_table.mem_table),
3478 vega10_ps->performance_levels[0].mem_clock,
3479 vega10_ps->performance_levels[high_limit_count].mem_clock);
3480
3481 return 0;
3482}
3483
3484static uint32_t vega10_find_lowest_dpm_level(
3485 struct vega10_single_dpm_table *table)
3486{
3487 uint32_t i;
3488
3489 for (i = 0; i < table->count; i++) {
3490 if (table->dpm_levels[i].enabled)
3491 break;
3492 }
3493
3494 return i;
3495}
3496
3497static uint32_t vega10_find_highest_dpm_level(
3498 struct vega10_single_dpm_table *table)
3499{
3500 uint32_t i = 0;
3501
3502 if (table->count <= MAX_REGULAR_DPM_NUMBER) {
3503 for (i = table->count; i > 0; i--) {
3504 if (table->dpm_levels[i - 1].enabled)
3505 return i - 1;
3506 }
3507 } else {
3508 pr_info("DPM Table Has Too Many Entries!");
3509 return MAX_REGULAR_DPM_NUMBER - 1;
3510 }
3511
3512 return i;
3513}
3514
3515static void vega10_apply_dal_minimum_voltage_request(
3516 struct pp_hwmgr *hwmgr)
3517{
3518 return;
3519}
3520
3521static int vega10_upload_dpm_bootup_level(struct pp_hwmgr *hwmgr)
3522{
3523 struct vega10_hwmgr *data =
3524 (struct vega10_hwmgr *)(hwmgr->backend);
3525
3526 vega10_apply_dal_minimum_voltage_request(hwmgr);
3527
3528 if (!data->registry_data.sclk_dpm_key_disabled) {
3529 if (data->smc_state_table.gfx_boot_level !=
3530 data->dpm_table.gfx_table.dpm_state.soft_min_level) {
3531 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter(
3532 hwmgr->smumgr,
3533 PPSMC_MSG_SetSoftMinGfxclkByIndex,
3534 data->smc_state_table.gfx_boot_level),
3535 "Failed to set soft min sclk index!",
3536 return -EINVAL);
3537 data->dpm_table.gfx_table.dpm_state.soft_min_level =
3538 data->smc_state_table.gfx_boot_level;
3539 }
3540 }
3541
3542 if (!data->registry_data.mclk_dpm_key_disabled) {
3543 if (data->smc_state_table.mem_boot_level !=
3544 data->dpm_table.mem_table.dpm_state.soft_min_level) {
3545 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter(
3546 hwmgr->smumgr,
3547 PPSMC_MSG_SetSoftMinUclkByIndex,
3548 data->smc_state_table.mem_boot_level),
3549 "Failed to set soft min mclk index!",
3550 return -EINVAL);
3551
3552 data->dpm_table.mem_table.dpm_state.soft_min_level =
3553 data->smc_state_table.mem_boot_level;
3554 }
3555 }
3556
3557 return 0;
3558}
3559
3560static int vega10_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
3561{
3562 struct vega10_hwmgr *data =
3563 (struct vega10_hwmgr *)(hwmgr->backend);
3564
3565 vega10_apply_dal_minimum_voltage_request(hwmgr);
3566
3567 if (!data->registry_data.sclk_dpm_key_disabled) {
3568 if (data->smc_state_table.gfx_max_level !=
3569 data->dpm_table.gfx_table.dpm_state.soft_max_level) {
3570 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter(
3571 hwmgr->smumgr,
3572 PPSMC_MSG_SetSoftMaxGfxclkByIndex,
3573 data->smc_state_table.gfx_max_level),
3574 "Failed to set soft max sclk index!",
3575 return -EINVAL);
3576 data->dpm_table.gfx_table.dpm_state.soft_max_level =
3577 data->smc_state_table.gfx_max_level;
3578 }
3579 }
3580
3581 if (!data->registry_data.mclk_dpm_key_disabled) {
3582 if (data->smc_state_table.mem_max_level !=
3583 data->dpm_table.mem_table.dpm_state.soft_max_level) {
3584 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter(
3585 hwmgr->smumgr,
3586 PPSMC_MSG_SetSoftMaxUclkByIndex,
3587 data->smc_state_table.mem_max_level),
3588 "Failed to set soft max mclk index!",
3589 return -EINVAL);
3590 data->dpm_table.mem_table.dpm_state.soft_max_level =
3591 data->smc_state_table.mem_max_level;
3592 }
3593 }
3594
3595 return 0;
3596}
3597
3598static int vega10_generate_dpm_level_enable_mask(
3599 struct pp_hwmgr *hwmgr, const void *input)
3600{
3601 struct vega10_hwmgr *data =
3602 (struct vega10_hwmgr *)(hwmgr->backend);
3603 const struct phm_set_power_state_input *states =
3604 (const struct phm_set_power_state_input *)input;
3605 const struct vega10_power_state *vega10_ps =
3606 cast_const_phw_vega10_power_state(states->pnew_state);
3607 int i;
3608
3609 PP_ASSERT_WITH_CODE(!vega10_trim_dpm_states(hwmgr, vega10_ps),
3610 "Attempt to Trim DPM States Failed!",
3611 return -1);
3612
3613 data->smc_state_table.gfx_boot_level =
3614 vega10_find_lowest_dpm_level(&(data->dpm_table.gfx_table));
3615 data->smc_state_table.gfx_max_level =
3616 vega10_find_highest_dpm_level(&(data->dpm_table.gfx_table));
3617 data->smc_state_table.mem_boot_level =
3618 vega10_find_lowest_dpm_level(&(data->dpm_table.mem_table));
3619 data->smc_state_table.mem_max_level =
3620 vega10_find_highest_dpm_level(&(data->dpm_table.mem_table));
3621
3622 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr),
3623 "Attempt to upload DPM Bootup Levels Failed!",
3624 return -1);
3625 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr),
3626 "Attempt to upload DPM Max Levels Failed!",
3627 return -1);
3628 for(i = data->smc_state_table.gfx_boot_level; i < data->smc_state_table.gfx_max_level; i++)
3629 data->dpm_table.gfx_table.dpm_levels[i].enabled = true;
3630
3631
3632 for(i = data->smc_state_table.mem_boot_level; i < data->smc_state_table.mem_max_level; i++)
3633 data->dpm_table.mem_table.dpm_levels[i].enabled = true;
3634
3635 return 0;
3636}
3637
3638int vega10_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
3639{
3640 struct vega10_hwmgr *data =
3641 (struct vega10_hwmgr *)(hwmgr->backend);
3642
3643 if (data->smu_features[GNLD_DPM_VCE].supported) {
3644 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
3645 enable,
3646 data->smu_features[GNLD_DPM_VCE].smu_feature_bitmap),
3647 "Attempt to Enable/Disable DPM VCE Failed!",
3648 return -1);
3649 data->smu_features[GNLD_DPM_VCE].enabled = enable;
3650 }
3651
3652 return 0;
3653}
3654
3655static int vega10_update_sclk_threshold(struct pp_hwmgr *hwmgr)
3656{
3657 struct vega10_hwmgr *data =
3658 (struct vega10_hwmgr *)(hwmgr->backend);
3659 int result = 0;
3660 uint32_t low_sclk_interrupt_threshold = 0;
3661
3662 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3663 PHM_PlatformCaps_SclkThrottleLowNotification)
3664 && (hwmgr->gfx_arbiter.sclk_threshold !=
3665 data->low_sclk_interrupt_threshold)) {
3666 data->low_sclk_interrupt_threshold =
3667 hwmgr->gfx_arbiter.sclk_threshold;
3668 low_sclk_interrupt_threshold =
3669 data->low_sclk_interrupt_threshold;
3670
3671 data->smc_state_table.pp_table.LowGfxclkInterruptThreshold =
3672 cpu_to_le32(low_sclk_interrupt_threshold);
3673
3674 /* This message will also enable SmcToHost Interrupt */
3675 result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
3676 PPSMC_MSG_SetLowGfxclkInterruptThreshold,
3677 (uint32_t)low_sclk_interrupt_threshold);
3678 }
3679
3680 return result;
3681}
3682
3683static int vega10_set_power_state_tasks(struct pp_hwmgr *hwmgr,
3684 const void *input)
3685{
3686 int tmp_result, result = 0;
3687 struct vega10_hwmgr *data =
3688 (struct vega10_hwmgr *)(hwmgr->backend);
3689 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
3690
3691 tmp_result = vega10_find_dpm_states_clocks_in_dpm_table(hwmgr, input);
3692 PP_ASSERT_WITH_CODE(!tmp_result,
3693 "Failed to find DPM states clocks in DPM table!",
3694 result = tmp_result);
3695
3696 tmp_result = vega10_populate_and_upload_sclk_mclk_dpm_levels(hwmgr, input);
3697 PP_ASSERT_WITH_CODE(!tmp_result,
3698 "Failed to populate and upload SCLK MCLK DPM levels!",
3699 result = tmp_result);
3700
3701 tmp_result = vega10_generate_dpm_level_enable_mask(hwmgr, input);
3702 PP_ASSERT_WITH_CODE(!tmp_result,
3703 "Failed to generate DPM level enabled mask!",
3704 result = tmp_result);
3705
3706 tmp_result = vega10_update_sclk_threshold(hwmgr);
3707 PP_ASSERT_WITH_CODE(!tmp_result,
3708 "Failed to update SCLK threshold!",
3709 result = tmp_result);
3710
3711 result = vega10_copy_table_to_smc(hwmgr->smumgr,
3712 (uint8_t *)pp_table, PPTABLE);
3713 PP_ASSERT_WITH_CODE(!result,
3714 "Failed to upload PPtable!", return result);
3715
3716 data->apply_optimized_settings = false;
3717 data->apply_overdrive_next_settings_mask = 0;
3718
3719 return 0;
3720}
3721
3722static int vega10_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
3723{
3724 struct pp_power_state *ps;
3725 struct vega10_power_state *vega10_ps;
3726
3727 if (hwmgr == NULL)
3728 return -EINVAL;
3729
3730 ps = hwmgr->request_ps;
3731
3732 if (ps == NULL)
3733 return -EINVAL;
3734
3735 vega10_ps = cast_phw_vega10_power_state(&ps->hardware);
3736
3737 if (low)
3738 return vega10_ps->performance_levels[0].gfx_clock;
3739 else
3740 return vega10_ps->performance_levels
3741 [vega10_ps->performance_level_count - 1].gfx_clock;
3742}
3743
3744static int vega10_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
3745{
3746 struct pp_power_state *ps;
3747 struct vega10_power_state *vega10_ps;
3748
3749 if (hwmgr == NULL)
3750 return -EINVAL;
3751
3752 ps = hwmgr->request_ps;
3753
3754 if (ps == NULL)
3755 return -EINVAL;
3756
3757 vega10_ps = cast_phw_vega10_power_state(&ps->hardware);
3758
3759 if (low)
3760 return vega10_ps->performance_levels[0].mem_clock;
3761 else
3762 return vega10_ps->performance_levels
3763 [vega10_ps->performance_level_count-1].mem_clock;
3764}
3765
3766static int vega10_read_sensor(struct pp_hwmgr *hwmgr, int idx,
3767 void *value, int *size)
3768{
3769 uint32_t sclk_idx, mclk_idx, activity_percent = 0;
3770 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
3771 struct vega10_dpm_table *dpm_table = &data->dpm_table;
3772 int ret = 0;
3773
3774 switch (idx) {
3775 case AMDGPU_PP_SENSOR_GFX_SCLK:
3776 ret = smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_GetCurrentGfxclkIndex);
3777 if (!ret) {
3778 vega10_read_arg_from_smc(hwmgr->smumgr, &sclk_idx);
3779 *((uint32_t *)value) = dpm_table->gfx_table.dpm_levels[sclk_idx].value;
3780 *size = 4;
3781 }
3782 break;
3783 case AMDGPU_PP_SENSOR_GFX_MCLK:
3784 ret = smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_GetCurrentUclkIndex);
3785 if (!ret) {
3786 vega10_read_arg_from_smc(hwmgr->smumgr, &mclk_idx);
3787 *((uint32_t *)value) = dpm_table->mem_table.dpm_levels[mclk_idx].value;
3788 *size = 4;
3789 }
3790 break;
3791 case AMDGPU_PP_SENSOR_GPU_LOAD:
3792 ret = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_GetAverageGfxActivity, 0);
3793 if (!ret) {
3794 vega10_read_arg_from_smc(hwmgr->smumgr, &activity_percent);
3795 *((uint32_t *)value) = activity_percent > 100 ? 100 : activity_percent;
3796 *size = 4;
3797 }
3798 break;
3799 case AMDGPU_PP_SENSOR_GPU_TEMP:
3800 *((uint32_t *)value) = vega10_thermal_get_temperature(hwmgr);
3801 *size = 4;
3802 break;
3803 case AMDGPU_PP_SENSOR_UVD_POWER:
3804 *((uint32_t *)value) = data->uvd_power_gated ? 0 : 1;
3805 *size = 4;
3806 break;
3807 case AMDGPU_PP_SENSOR_VCE_POWER:
3808 *((uint32_t *)value) = data->vce_power_gated ? 0 : 1;
3809 *size = 4;
3810 break;
3811 default:
3812 ret = -EINVAL;
3813 break;
3814 }
3815 return ret;
3816}
3817
3818static int vega10_notify_smc_display_change(struct pp_hwmgr *hwmgr,
3819 bool has_disp)
3820{
3821 return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
3822 PPSMC_MSG_SetUclkFastSwitch,
3823 has_disp ? 0 : 1);
3824}
3825
3826int vega10_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
3827 struct pp_display_clock_request *clock_req)
3828{
3829 int result = 0;
3830 enum amd_pp_clock_type clk_type = clock_req->clock_type;
3831 uint32_t clk_freq = clock_req->clock_freq_in_khz / 100;
3832 DSPCLK_e clk_select = 0;
3833 uint32_t clk_request = 0;
3834
3835 switch (clk_type) {
3836 case amd_pp_dcef_clock:
3837 clk_select = DSPCLK_DCEFCLK;
3838 break;
3839 case amd_pp_disp_clock:
3840 clk_select = DSPCLK_DISPCLK;
3841 break;
3842 case amd_pp_pixel_clock:
3843 clk_select = DSPCLK_PIXCLK;
3844 break;
3845 case amd_pp_phy_clock:
3846 clk_select = DSPCLK_PHYCLK;
3847 break;
3848 default:
3849 pr_info("[DisplayClockVoltageRequest]Invalid Clock Type!");
3850 result = -1;
3851 break;
3852 }
3853
3854 if (!result) {
3855 clk_request = (clk_freq << 16) | clk_select;
3856 result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
3857 PPSMC_MSG_RequestDisplayClockByFreq,
3858 clk_request);
3859 }
3860
3861 return result;
3862}
3863
3864static int vega10_notify_smc_display_config_after_ps_adjustment(
3865 struct pp_hwmgr *hwmgr)
3866{
3867 struct vega10_hwmgr *data =
3868 (struct vega10_hwmgr *)(hwmgr->backend);
3869 struct vega10_single_dpm_table *dpm_table =
3870 &data->dpm_table.dcef_table;
3871 uint32_t num_active_disps = 0;
3872 struct cgs_display_info info = {0};
3873 struct PP_Clocks min_clocks = {0};
3874 uint32_t i;
3875 struct pp_display_clock_request clock_req;
3876
3877 info.mode_info = NULL;
3878
3879 cgs_get_active_displays_info(hwmgr->device, &info);
3880
3881 num_active_disps = info.display_count;
3882
3883 if (num_active_disps > 1)
3884 vega10_notify_smc_display_change(hwmgr, false);
3885 else
3886 vega10_notify_smc_display_change(hwmgr, true);
3887
3888 min_clocks.dcefClock = hwmgr->display_config.min_dcef_set_clk;
3889 min_clocks.dcefClockInSR = hwmgr->display_config.min_dcef_deep_sleep_set_clk;
3890
3891 for (i = 0; i < dpm_table->count; i++) {
3892 if (dpm_table->dpm_levels[i].value == min_clocks.dcefClock)
3893 break;
3894 }
3895
3896 if (i < dpm_table->count) {
3897 clock_req.clock_type = amd_pp_dcef_clock;
3898 clock_req.clock_freq_in_khz = dpm_table->dpm_levels[i].value;
3899 if (!vega10_display_clock_voltage_request(hwmgr, &clock_req)) {
3900 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter(
3901 hwmgr->smumgr, PPSMC_MSG_SetMinDeepSleepDcefclk,
3902 min_clocks.dcefClockInSR),
3903 "Attempt to set divider for DCEFCLK Failed!",);
3904 } else
3905 pr_info("Attempt to set Hard Min for DCEFCLK Failed!");
3906 } else
3907 pr_info("Cannot find requested DCEFCLK!");
3908
3909 return 0;
3910}
3911
3912static int vega10_force_dpm_highest(struct pp_hwmgr *hwmgr)
3913{
3914 struct vega10_hwmgr *data =
3915 (struct vega10_hwmgr *)(hwmgr->backend);
3916
3917 data->smc_state_table.gfx_boot_level =
3918 data->smc_state_table.gfx_max_level =
3919 vega10_find_highest_dpm_level(&(data->dpm_table.gfx_table));
3920 data->smc_state_table.mem_boot_level =
3921 data->smc_state_table.mem_max_level =
3922 vega10_find_highest_dpm_level(&(data->dpm_table.mem_table));
3923
3924 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr),
3925 "Failed to upload boot level to highest!",
3926 return -1);
3927
3928 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr),
3929 "Failed to upload dpm max level to highest!",
3930 return -1);
3931
3932 return 0;
3933}
3934
3935static int vega10_force_dpm_lowest(struct pp_hwmgr *hwmgr)
3936{
3937 struct vega10_hwmgr *data =
3938 (struct vega10_hwmgr *)(hwmgr->backend);
3939
3940 data->smc_state_table.gfx_boot_level =
3941 data->smc_state_table.gfx_max_level =
3942 vega10_find_lowest_dpm_level(&(data->dpm_table.gfx_table));
3943 data->smc_state_table.mem_boot_level =
3944 data->smc_state_table.mem_max_level =
3945 vega10_find_lowest_dpm_level(&(data->dpm_table.mem_table));
3946
3947 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr),
3948 "Failed to upload boot level to highest!",
3949 return -1);
3950
3951 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr),
3952 "Failed to upload dpm max level to highest!",
3953 return -1);
3954
3955 return 0;
3956
3957}
3958
3959static int vega10_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
3960{
3961 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
3962
3963 data->smc_state_table.gfx_boot_level =
3964 vega10_find_lowest_dpm_level(&(data->dpm_table.gfx_table));
3965 data->smc_state_table.gfx_max_level =
3966 vega10_find_highest_dpm_level(&(data->dpm_table.gfx_table));
3967 data->smc_state_table.mem_boot_level =
3968 vega10_find_lowest_dpm_level(&(data->dpm_table.mem_table));
3969 data->smc_state_table.mem_max_level =
3970 vega10_find_highest_dpm_level(&(data->dpm_table.mem_table));
3971
3972 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr),
3973 "Failed to upload DPM Bootup Levels!",
3974 return -1);
3975
3976 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr),
3977 "Failed to upload DPM Max Levels!",
3978 return -1);
3979 return 0;
3980}
3981
3982static int vega10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
3983 enum amd_dpm_forced_level level)
3984{
3985 int ret = 0;
3986
3987 switch (level) {
3988 case AMD_DPM_FORCED_LEVEL_HIGH:
3989 ret = vega10_force_dpm_highest(hwmgr);
3990 if (ret)
3991 return ret;
3992 break;
3993 case AMD_DPM_FORCED_LEVEL_LOW:
3994 ret = vega10_force_dpm_lowest(hwmgr);
3995 if (ret)
3996 return ret;
3997 break;
3998 case AMD_DPM_FORCED_LEVEL_AUTO:
3999 ret = vega10_unforce_dpm_levels(hwmgr);
4000 if (ret)
4001 return ret;
4002 break;
4003 default:
4004 break;
4005 }
4006
4007 hwmgr->dpm_level = level;
4008
4009 return ret;
4010}
4011
4012static int vega10_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
4013{
7522ffc4 4014 int result = 0;
f83a9991 4015
7522ffc4
RZ
4016 switch (mode) {
4017 case AMD_FAN_CTRL_NONE:
4018 result = vega10_fan_ctrl_set_fan_speed_percent(hwmgr, 100);
4019 break;
4020 case AMD_FAN_CTRL_MANUAL:
4021 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
4022 PHM_PlatformCaps_MicrocodeFanControl))
4023 result = vega10_fan_ctrl_stop_smc_fan_control(hwmgr);
4024 break;
4025 case AMD_FAN_CTRL_AUTO:
4026 result = vega10_fan_ctrl_set_static_mode(hwmgr, mode);
4027 if (!result)
4028 result = vega10_fan_ctrl_start_smc_fan_control(hwmgr);
4029 break;
4030 default:
4031 break;
4032 }
4033 return result;
f83a9991
EH
4034}
4035
4036static int vega10_get_fan_control_mode(struct pp_hwmgr *hwmgr)
4037{
7522ffc4 4038 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
f83a9991 4039
7522ffc4
RZ
4040 if (data->smu_features[GNLD_FAN_CONTROL].enabled == false)
4041 return AMD_FAN_CTRL_MANUAL;
4042 else
4043 return AMD_FAN_CTRL_AUTO;
f83a9991
EH
4044}
4045
4046static int vega10_get_dal_power_level(struct pp_hwmgr *hwmgr,
4047 struct amd_pp_simple_clock_info *info)
4048{
4049 struct phm_ppt_v2_information *table_info =
4050 (struct phm_ppt_v2_information *)hwmgr->pptable;
4051 struct phm_clock_and_voltage_limits *max_limits =
4052 &table_info->max_clock_voltage_on_ac;
4053
4054 info->engine_max_clock = max_limits->sclk;
4055 info->memory_max_clock = max_limits->mclk;
4056
4057 return 0;
4058}
4059
4060static void vega10_get_sclks(struct pp_hwmgr *hwmgr,
4061 struct pp_clock_levels_with_latency *clocks)
4062{
4063 struct phm_ppt_v2_information *table_info =
4064 (struct phm_ppt_v2_information *)hwmgr->pptable;
4065 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table =
4066 table_info->vdd_dep_on_sclk;
4067 uint32_t i;
4068
4069 for (i = 0; i < dep_table->count; i++) {
4070 if (dep_table->entries[i].clk) {
4071 clocks->data[clocks->num_levels].clocks_in_khz =
4072 dep_table->entries[i].clk;
4073 clocks->num_levels++;
4074 }
4075 }
4076
4077}
4078
4079static uint32_t vega10_get_mem_latency(struct pp_hwmgr *hwmgr,
4080 uint32_t clock)
4081{
4082 if (clock >= MEM_FREQ_LOW_LATENCY &&
4083 clock < MEM_FREQ_HIGH_LATENCY)
4084 return MEM_LATENCY_HIGH;
4085 else if (clock >= MEM_FREQ_HIGH_LATENCY)
4086 return MEM_LATENCY_LOW;
4087 else
4088 return MEM_LATENCY_ERR;
4089}
4090
4091static void vega10_get_memclocks(struct pp_hwmgr *hwmgr,
4092 struct pp_clock_levels_with_latency *clocks)
4093{
4094 struct phm_ppt_v2_information *table_info =
4095 (struct phm_ppt_v2_information *)hwmgr->pptable;
4096 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table =
4097 table_info->vdd_dep_on_mclk;
4098 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
4099 uint32_t i;
4100
4101 clocks->num_levels = 0;
4102 data->mclk_latency_table.count = 0;
4103
4104 for (i = 0; i < dep_table->count; i++) {
4105 if (dep_table->entries[i].clk) {
4106 clocks->data[clocks->num_levels].clocks_in_khz =
4107 data->mclk_latency_table.entries
4108 [data->mclk_latency_table.count].frequency =
4109 dep_table->entries[i].clk;
4110 clocks->data[clocks->num_levels].latency_in_us =
4111 data->mclk_latency_table.entries
4112 [data->mclk_latency_table.count].latency =
4113 vega10_get_mem_latency(hwmgr,
4114 dep_table->entries[i].clk);
4115 clocks->num_levels++;
4116 data->mclk_latency_table.count++;
4117 }
4118 }
4119}
4120
4121static void vega10_get_dcefclocks(struct pp_hwmgr *hwmgr,
4122 struct pp_clock_levels_with_latency *clocks)
4123{
4124 struct phm_ppt_v2_information *table_info =
4125 (struct phm_ppt_v2_information *)hwmgr->pptable;
4126 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table =
4127 table_info->vdd_dep_on_dcefclk;
4128 uint32_t i;
4129
4130 for (i = 0; i < dep_table->count; i++) {
4131 clocks->data[i].clocks_in_khz = dep_table->entries[i].clk;
4132 clocks->data[i].latency_in_us = 0;
4133 clocks->num_levels++;
4134 }
4135}
4136
4137static void vega10_get_socclocks(struct pp_hwmgr *hwmgr,
4138 struct pp_clock_levels_with_latency *clocks)
4139{
4140 struct phm_ppt_v2_information *table_info =
4141 (struct phm_ppt_v2_information *)hwmgr->pptable;
4142 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table =
4143 table_info->vdd_dep_on_socclk;
4144 uint32_t i;
4145
4146 for (i = 0; i < dep_table->count; i++) {
4147 clocks->data[i].clocks_in_khz = dep_table->entries[i].clk;
4148 clocks->data[i].latency_in_us = 0;
4149 clocks->num_levels++;
4150 }
4151}
4152
4153static int vega10_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr,
4154 enum amd_pp_clock_type type,
4155 struct pp_clock_levels_with_latency *clocks)
4156{
4157 switch (type) {
4158 case amd_pp_sys_clock:
4159 vega10_get_sclks(hwmgr, clocks);
4160 break;
4161 case amd_pp_mem_clock:
4162 vega10_get_memclocks(hwmgr, clocks);
4163 break;
4164 case amd_pp_dcef_clock:
4165 vega10_get_dcefclocks(hwmgr, clocks);
4166 break;
4167 case amd_pp_soc_clock:
4168 vega10_get_socclocks(hwmgr, clocks);
4169 break;
4170 default:
4171 return -1;
4172 }
4173
4174 return 0;
4175}
4176
4177static int vega10_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr,
4178 enum amd_pp_clock_type type,
4179 struct pp_clock_levels_with_voltage *clocks)
4180{
4181 struct phm_ppt_v2_information *table_info =
4182 (struct phm_ppt_v2_information *)hwmgr->pptable;
4183 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table;
4184 uint32_t i;
4185
4186 switch (type) {
4187 case amd_pp_mem_clock:
4188 dep_table = table_info->vdd_dep_on_mclk;
4189 break;
4190 case amd_pp_dcef_clock:
4191 dep_table = table_info->vdd_dep_on_dcefclk;
4192 break;
4193 case amd_pp_disp_clock:
4194 dep_table = table_info->vdd_dep_on_dispclk;
4195 break;
4196 case amd_pp_pixel_clock:
4197 dep_table = table_info->vdd_dep_on_pixclk;
4198 break;
4199 case amd_pp_phy_clock:
4200 dep_table = table_info->vdd_dep_on_phyclk;
4201 break;
4202 default:
4203 return -1;
4204 }
4205
4206 for (i = 0; i < dep_table->count; i++) {
4207 clocks->data[i].clocks_in_khz = dep_table->entries[i].clk;
4208 clocks->data[i].voltage_in_mv = (uint32_t)(table_info->vddc_lookup_table->
4209 entries[dep_table->entries[i].vddInd].us_vdd);
4210 clocks->num_levels++;
4211 }
4212
4213 if (i < dep_table->count)
4214 return -1;
4215
4216 return 0;
4217}
4218
4219static int vega10_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
4220 struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges)
4221{
4222 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
4223 Watermarks_t *table = &(data->smc_state_table.water_marks_table);
4224 int result = 0;
4225 uint32_t i;
4226
4227 if (!data->registry_data.disable_water_mark) {
4228 for (i = 0; i < wm_with_clock_ranges->num_wm_sets_dmif; i++) {
4229 table->WatermarkRow[WM_DCEFCLK][i].MinClock =
4230 cpu_to_le16((uint16_t)
4231 (wm_with_clock_ranges->wm_sets_dmif[i].wm_min_dcefclk_in_khz) /
4232 100);
4233 table->WatermarkRow[WM_DCEFCLK][i].MaxClock =
4234 cpu_to_le16((uint16_t)
4235 (wm_with_clock_ranges->wm_sets_dmif[i].wm_max_dcefclk_in_khz) /
4236 100);
4237 table->WatermarkRow[WM_DCEFCLK][i].MinUclk =
4238 cpu_to_le16((uint16_t)
4239 (wm_with_clock_ranges->wm_sets_dmif[i].wm_min_memclk_in_khz) /
4240 100);
4241 table->WatermarkRow[WM_DCEFCLK][i].MaxUclk =
4242 cpu_to_le16((uint16_t)
4243 (wm_with_clock_ranges->wm_sets_dmif[i].wm_max_memclk_in_khz) /
4244 100);
4245 table->WatermarkRow[WM_DCEFCLK][i].WmSetting = (uint8_t)
4246 wm_with_clock_ranges->wm_sets_dmif[i].wm_set_id;
4247 }
4248
4249 for (i = 0; i < wm_with_clock_ranges->num_wm_sets_mcif; i++) {
4250 table->WatermarkRow[WM_SOCCLK][i].MinClock =
4251 cpu_to_le16((uint16_t)
4252 (wm_with_clock_ranges->wm_sets_mcif[i].wm_min_socclk_in_khz) /
4253 100);
4254 table->WatermarkRow[WM_SOCCLK][i].MaxClock =
4255 cpu_to_le16((uint16_t)
4256 (wm_with_clock_ranges->wm_sets_mcif[i].wm_max_socclk_in_khz) /
4257 100);
4258 table->WatermarkRow[WM_SOCCLK][i].MinUclk =
4259 cpu_to_le16((uint16_t)
4260 (wm_with_clock_ranges->wm_sets_mcif[i].wm_min_memclk_in_khz) /
4261 100);
4262 table->WatermarkRow[WM_SOCCLK][i].MaxUclk =
4263 cpu_to_le16((uint16_t)
4264 (wm_with_clock_ranges->wm_sets_mcif[i].wm_max_memclk_in_khz) /
4265 100);
4266 table->WatermarkRow[WM_SOCCLK][i].WmSetting = (uint8_t)
4267 wm_with_clock_ranges->wm_sets_mcif[i].wm_set_id;
4268 }
4269 data->water_marks_bitmap = WaterMarksExist;
4270 }
4271
4272 return result;
4273}
4274
4275static int vega10_force_clock_level(struct pp_hwmgr *hwmgr,
4276 enum pp_clock_type type, uint32_t mask)
4277{
4278 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
2f590f84 4279 int i;
f83a9991
EH
4280
4281 if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
4282 return -EINVAL;
4283
4284 switch (type) {
4285 case PP_SCLK:
f83a9991
EH
4286 for (i = 0; i < 32; i++) {
4287 if (mask & (1 << i))
4288 break;
4289 }
7b52db39 4290 data->smc_state_table.gfx_boot_level = i;
f83a9991 4291
7b52db39
RZ
4292 for (i = 31; i >= 0; i--) {
4293 if (mask & (1 << i))
4294 break;
4295 }
4296 data->smc_state_table.gfx_max_level = i;
4297
4298 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr),
4299 "Failed to upload boot level to lowest!",
4300 return -EINVAL);
4301
4302 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr),
4303 "Failed to upload dpm max level to highest!",
4304 return -EINVAL);
f83a9991
EH
4305 break;
4306
4307 case PP_MCLK:
f83a9991
EH
4308 for (i = 0; i < 32; i++) {
4309 if (mask & (1 << i))
4310 break;
4311 }
7b52db39
RZ
4312 data->smc_state_table.mem_boot_level = i;
4313
4314 for (i = 31; i >= 0; i--) {
4315 if (mask & (1 << i))
4316 break;
4317 }
4318 data->smc_state_table.mem_max_level = i;
4319
4320 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr),
4321 "Failed to upload boot level to lowest!",
4322 return -EINVAL);
4323
4324 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr),
4325 "Failed to upload dpm max level to highest!",
4326 return -EINVAL);
f83a9991 4327
f83a9991 4328 break;
7b52db39
RZ
4329
4330 case PP_PCIE:
f83a9991
EH
4331 default:
4332 break;
4333 }
4334
4335 return 0;
4336}
4337
4338static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
4339 enum pp_clock_type type, char *buf)
4340{
4341 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
4342 struct vega10_single_dpm_table *sclk_table = &(data->dpm_table.gfx_table);
4343 struct vega10_single_dpm_table *mclk_table = &(data->dpm_table.mem_table);
4344 struct vega10_pcie_table *pcie_table = &(data->dpm_table.pcie_table);
4345 int i, now, size = 0;
4346
4347 switch (type) {
4348 case PP_SCLK:
4349 if (data->registry_data.sclk_dpm_key_disabled)
4350 break;
4351
4352 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr->smumgr,
4353 PPSMC_MSG_GetCurrentGfxclkIndex),
4354 "Attempt to get current sclk index Failed!",
4355 return -1);
4356 PP_ASSERT_WITH_CODE(!vega10_read_arg_from_smc(hwmgr->smumgr,
4357 &now),
4358 "Attempt to read sclk index Failed!",
4359 return -1);
4360
4361 for (i = 0; i < sclk_table->count; i++)
4362 size += sprintf(buf + size, "%d: %uMhz %s\n",
4363 i, sclk_table->dpm_levels[i].value / 100,
4364 (i == now) ? "*" : "");
4365 break;
4366 case PP_MCLK:
4367 if (data->registry_data.mclk_dpm_key_disabled)
4368 break;
4369
4370 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr->smumgr,
4371 PPSMC_MSG_GetCurrentUclkIndex),
4372 "Attempt to get current mclk index Failed!",
4373 return -1);
4374 PP_ASSERT_WITH_CODE(!vega10_read_arg_from_smc(hwmgr->smumgr,
4375 &now),
4376 "Attempt to read mclk index Failed!",
4377 return -1);
4378
4379 for (i = 0; i < mclk_table->count; i++)
4380 size += sprintf(buf + size, "%d: %uMhz %s\n",
4381 i, mclk_table->dpm_levels[i].value / 100,
4382 (i == now) ? "*" : "");
4383 break;
4384 case PP_PCIE:
4385 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr->smumgr,
4386 PPSMC_MSG_GetCurrentLinkIndex),
4387 "Attempt to get current mclk index Failed!",
4388 return -1);
4389 PP_ASSERT_WITH_CODE(!vega10_read_arg_from_smc(hwmgr->smumgr,
4390 &now),
4391 "Attempt to read mclk index Failed!",
4392 return -1);
4393
4394 for (i = 0; i < pcie_table->count; i++)
4395 size += sprintf(buf + size, "%d: %s %s\n", i,
4396 (pcie_table->pcie_gen[i] == 0) ? "2.5GB, x1" :
4397 (pcie_table->pcie_gen[i] == 1) ? "5.0GB, x16" :
4398 (pcie_table->pcie_gen[i] == 2) ? "8.0GB, x16" : "",
4399 (i == now) ? "*" : "");
4400 break;
4401 default:
4402 break;
4403 }
4404 return size;
4405}
4406
4407static int vega10_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
4408{
4409 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
4410 int result = 0;
4411 uint32_t num_turned_on_displays = 1;
4412 Watermarks_t *wm_table = &(data->smc_state_table.water_marks_table);
4413 struct cgs_display_info info = {0};
4414
4415 if ((data->water_marks_bitmap & WaterMarksExist) &&
4416 !(data->water_marks_bitmap & WaterMarksLoaded)) {
4417 result = vega10_copy_table_to_smc(hwmgr->smumgr,
4418 (uint8_t *)wm_table, WMTABLE);
4419 PP_ASSERT_WITH_CODE(result, "Failed to update WMTABLE!", return EINVAL);
4420 data->water_marks_bitmap |= WaterMarksLoaded;
4421 }
4422
4423 if (data->water_marks_bitmap & WaterMarksLoaded) {
4424 cgs_get_active_displays_info(hwmgr->device, &info);
4425 num_turned_on_displays = info.display_count;
4426 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
4427 PPSMC_MSG_NumOfDisplays, num_turned_on_displays);
4428 }
4429
4430 return result;
4431}
4432
4433int vega10_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable)
4434{
4435 struct vega10_hwmgr *data =
4436 (struct vega10_hwmgr *)(hwmgr->backend);
4437
4438 if (data->smu_features[GNLD_DPM_UVD].supported) {
4439 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
4440 enable,
4441 data->smu_features[GNLD_DPM_UVD].smu_feature_bitmap),
4442 "Attempt to Enable/Disable DPM UVD Failed!",
4443 return -1);
4444 data->smu_features[GNLD_DPM_UVD].enabled = enable;
4445 }
4446 return 0;
4447}
4448
4449static int vega10_power_gate_vce(struct pp_hwmgr *hwmgr, bool bgate)
4450{
4451 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
4452
4453 data->vce_power_gated = bgate;
4454 return vega10_enable_disable_vce_dpm(hwmgr, !bgate);
4455}
4456
4457static int vega10_power_gate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
4458{
4459 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
4460
4461 data->uvd_power_gated = bgate;
4462 return vega10_enable_disable_uvd_dpm(hwmgr, !bgate);
4463}
4464
4465static inline bool vega10_are_power_levels_equal(
4466 const struct vega10_performance_level *pl1,
4467 const struct vega10_performance_level *pl2)
4468{
4469 return ((pl1->soc_clock == pl2->soc_clock) &&
4470 (pl1->gfx_clock == pl2->gfx_clock) &&
4471 (pl1->mem_clock == pl2->mem_clock));
4472}
4473
4474static int vega10_check_states_equal(struct pp_hwmgr *hwmgr,
4475 const struct pp_hw_power_state *pstate1,
4476 const struct pp_hw_power_state *pstate2, bool *equal)
4477{
4478 const struct vega10_power_state *psa;
4479 const struct vega10_power_state *psb;
4480 int i;
4481
4482 if (pstate1 == NULL || pstate2 == NULL || equal == NULL)
4483 return -EINVAL;
4484
4485 psa = cast_const_phw_vega10_power_state(pstate1);
4486 psb = cast_const_phw_vega10_power_state(pstate2);
4487 /* If the two states don't even have the same number of performance levels they cannot be the same state. */
4488 if (psa->performance_level_count != psb->performance_level_count) {
4489 *equal = false;
4490 return 0;
4491 }
4492
4493 for (i = 0; i < psa->performance_level_count; i++) {
4494 if (!vega10_are_power_levels_equal(&(psa->performance_levels[i]), &(psb->performance_levels[i]))) {
4495 /* If we have found even one performance level pair that is different the states are different. */
4496 *equal = false;
4497 return 0;
4498 }
4499 }
4500
4501 /* If all performance levels are the same try to use the UVD clocks to break the tie.*/
4502 *equal = ((psa->uvd_clks.vclk == psb->uvd_clks.vclk) && (psa->uvd_clks.dclk == psb->uvd_clks.dclk));
4503 *equal &= ((psa->vce_clks.evclk == psb->vce_clks.evclk) && (psa->vce_clks.ecclk == psb->vce_clks.ecclk));
4504 *equal &= (psa->sclk_threshold == psb->sclk_threshold);
4505
4506 return 0;
4507}
4508
4509static bool
4510vega10_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
4511{
4512 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
4513 bool is_update_required = false;
4514 struct cgs_display_info info = {0, 0, NULL};
4515
4516 cgs_get_active_displays_info(hwmgr->device, &info);
4517
4518 if (data->display_timing.num_existing_displays != info.display_count)
4519 is_update_required = true;
4520
4521 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) {
4522 if (data->display_timing.min_clock_in_sr != hwmgr->display_config.min_core_set_clock_in_sr)
4523 is_update_required = true;
4524 }
4525
4526 return is_update_required;
4527}
4528
8b9242ed
RZ
4529static int vega10_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
4530{
4531 int tmp_result, result = 0;
4532
4533 tmp_result = (vega10_is_dpm_running(hwmgr)) ? 0 : -1;
4534 PP_ASSERT_WITH_CODE(tmp_result == 0,
4535 "DPM is not running right now, no need to disable DPM!",
4536 return 0);
4537
4538 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
4539 PHM_PlatformCaps_ThermalController))
4540 vega10_disable_thermal_protection(hwmgr);
4541
4542 tmp_result = vega10_disable_power_containment(hwmgr);
4543 PP_ASSERT_WITH_CODE((tmp_result == 0),
4544 "Failed to disable power containment!", result = tmp_result);
4545
4546 tmp_result = vega10_avfs_enable(hwmgr, false);
4547 PP_ASSERT_WITH_CODE((tmp_result == 0),
4548 "Failed to disable AVFS!", result = tmp_result);
4549
4550 tmp_result = vega10_stop_dpm(hwmgr, SMC_DPM_FEATURES);
4551 PP_ASSERT_WITH_CODE((tmp_result == 0),
4552 "Failed to stop DPM!", result = tmp_result);
4553
4022e4f2
RZ
4554 tmp_result = vega10_disable_ulv(hwmgr);
4555 PP_ASSERT_WITH_CODE((tmp_result == 0),
4556 "Failed to disable ulv!", result = tmp_result);
4557
8b9242ed
RZ
4558 return result;
4559}
4560
4561static int vega10_power_off_asic(struct pp_hwmgr *hwmgr)
4562{
4563 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
4564 int result;
4565
4566 result = vega10_disable_dpm_tasks(hwmgr);
4567 PP_ASSERT_WITH_CODE((0 == result),
4568 "[disable_dpm_tasks] Failed to disable DPM!",
4569 );
4570 data->water_marks_bitmap &= ~(WaterMarksLoaded);
4571
4572 return result;
4573}
4574
d6c025d2
EH
4575static void vega10_find_min_clock_index(struct pp_hwmgr *hwmgr,
4576 uint32_t *sclk_idx, uint32_t *mclk_idx,
4577 uint32_t min_sclk, uint32_t min_mclk)
4578{
4579 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
4580 struct vega10_dpm_table *dpm_table = &(data->dpm_table);
4581 uint32_t i;
4582
4583 for (i = 0; i < dpm_table->gfx_table.count; i++) {
4584 if (dpm_table->gfx_table.dpm_levels[i].enabled &&
4585 dpm_table->gfx_table.dpm_levels[i].value >= min_sclk) {
4586 *sclk_idx = i;
4587 break;
4588 }
4589 }
4590
4591 for (i = 0; i < dpm_table->mem_table.count; i++) {
4592 if (dpm_table->mem_table.dpm_levels[i].enabled &&
4593 dpm_table->mem_table.dpm_levels[i].value >= min_mclk) {
4594 *mclk_idx = i;
4595 break;
4596 }
4597 }
4598}
4599
4600static int vega10_set_power_profile_state(struct pp_hwmgr *hwmgr,
4601 struct amd_pp_profile *request)
4602{
4603 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
e0ec4506 4604 uint32_t sclk_idx = ~0, mclk_idx = ~0;
d6c025d2
EH
4605
4606 if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_AUTO)
4607 return -EINVAL;
4608
4609 vega10_find_min_clock_index(hwmgr, &sclk_idx, &mclk_idx,
4610 request->min_sclk, request->min_mclk);
4611
e0ec4506 4612 if (sclk_idx != ~0) {
d6c025d2
EH
4613 if (!data->registry_data.sclk_dpm_key_disabled)
4614 PP_ASSERT_WITH_CODE(
4615 !smum_send_msg_to_smc_with_parameter(
4616 hwmgr->smumgr,
4617 PPSMC_MSG_SetSoftMinGfxclkByIndex,
4618 sclk_idx),
4619 "Failed to set soft min sclk index!",
4620 return -EINVAL);
4621 }
4622
e0ec4506 4623 if (mclk_idx != ~0) {
d6c025d2
EH
4624 if (!data->registry_data.mclk_dpm_key_disabled)
4625 PP_ASSERT_WITH_CODE(
4626 !smum_send_msg_to_smc_with_parameter(
4627 hwmgr->smumgr,
4628 PPSMC_MSG_SetSoftMinUclkByIndex,
4629 mclk_idx),
4630 "Failed to set soft min mclk index!",
4631 return -EINVAL);
4632 }
4633
4634 return 0;
4635}
8b9242ed 4636
f83a9991
EH
4637static const struct pp_hwmgr_func vega10_hwmgr_funcs = {
4638 .backend_init = vega10_hwmgr_backend_init,
4639 .backend_fini = vega10_hwmgr_backend_fini,
4640 .asic_setup = vega10_setup_asic_task,
4641 .dynamic_state_management_enable = vega10_enable_dpm_tasks,
8b9242ed 4642 .dynamic_state_management_disable = vega10_disable_dpm_tasks,
f83a9991
EH
4643 .get_num_of_pp_table_entries =
4644 vega10_get_number_of_powerplay_table_entries,
4645 .get_power_state_size = vega10_get_power_state_size,
4646 .get_pp_table_entry = vega10_get_pp_table_entry,
4647 .patch_boot_state = vega10_patch_boot_state,
4648 .apply_state_adjust_rules = vega10_apply_state_adjust_rules,
4649 .power_state_set = vega10_set_power_state_tasks,
4650 .get_sclk = vega10_dpm_get_sclk,
4651 .get_mclk = vega10_dpm_get_mclk,
4652 .notify_smc_display_config_after_ps_adjustment =
4653 vega10_notify_smc_display_config_after_ps_adjustment,
4654 .force_dpm_level = vega10_dpm_force_dpm_level,
4655 .get_temperature = vega10_thermal_get_temperature,
4656 .stop_thermal_controller = vega10_thermal_stop_thermal_controller,
4657 .get_fan_speed_info = vega10_fan_ctrl_get_fan_speed_info,
4658 .get_fan_speed_percent = vega10_fan_ctrl_get_fan_speed_percent,
4659 .set_fan_speed_percent = vega10_fan_ctrl_set_fan_speed_percent,
4660 .reset_fan_speed_to_default =
4661 vega10_fan_ctrl_reset_fan_speed_to_default,
4662 .get_fan_speed_rpm = vega10_fan_ctrl_get_fan_speed_rpm,
4663 .set_fan_speed_rpm = vega10_fan_ctrl_set_fan_speed_rpm,
4664 .uninitialize_thermal_controller =
4665 vega10_thermal_ctrl_uninitialize_thermal_controller,
4666 .set_fan_control_mode = vega10_set_fan_control_mode,
4667 .get_fan_control_mode = vega10_get_fan_control_mode,
4668 .read_sensor = vega10_read_sensor,
4669 .get_dal_power_level = vega10_get_dal_power_level,
4670 .get_clock_by_type_with_latency = vega10_get_clock_by_type_with_latency,
4671 .get_clock_by_type_with_voltage = vega10_get_clock_by_type_with_voltage,
4672 .set_watermarks_for_clocks_ranges = vega10_set_watermarks_for_clocks_ranges,
4673 .display_clock_voltage_request = vega10_display_clock_voltage_request,
4674 .force_clock_level = vega10_force_clock_level,
4675 .print_clock_levels = vega10_print_clock_levels,
4676 .display_config_changed = vega10_display_configuration_changed_task,
4677 .powergate_uvd = vega10_power_gate_uvd,
4678 .powergate_vce = vega10_power_gate_vce,
4679 .check_states_equal = vega10_check_states_equal,
4680 .check_smc_update_required_for_display_configuration =
4681 vega10_check_smc_update_required_for_display_configuration,
8b9242ed
RZ
4682 .power_off_asic = vega10_power_off_asic,
4683 .disable_smc_firmware_ctf = vega10_thermal_disable_alert,
d6c025d2 4684 .set_power_profile_state = vega10_set_power_profile_state,
f83a9991
EH
4685};
4686
4687int vega10_hwmgr_init(struct pp_hwmgr *hwmgr)
4688{
4689 hwmgr->hwmgr_func = &vega10_hwmgr_funcs;
4690 hwmgr->pptable_func = &vega10_pptable_funcs;
4691 pp_vega10_thermal_initialize(hwmgr);
4692 return 0;
4693}