]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
drm/amd/powerplay: clean up code in vega10_smumgr.c
[mirror_ubuntu-artful-kernel.git] / drivers / gpu / drm / amd / powerplay / hwmgr / vega10_hwmgr.c
CommitLineData
f83a9991
EH
1/*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#include <linux/module.h>
24#include <linux/slab.h>
25#include <linux/fb.h>
26#include "linux/delay.h"
27
28#include "hwmgr.h"
29#include "amd_powerplay.h"
30#include "vega10_smumgr.h"
31#include "hardwaremanager.h"
32#include "ppatomfwctrl.h"
33#include "atomfirmware.h"
34#include "cgs_common.h"
35#include "vega10_powertune.h"
36#include "smu9.h"
37#include "smu9_driver_if.h"
38#include "vega10_inc.h"
39#include "pp_soc15.h"
40#include "pppcielanes.h"
41#include "vega10_hwmgr.h"
42#include "vega10_processpptables.h"
43#include "vega10_pptable.h"
44#include "vega10_thermal.h"
45#include "pp_debug.h"
46#include "pp_acpi.h"
47#include "amd_pcie_helpers.h"
48#include "cgs_linux.h"
49#include "ppinterrupt.h"
50
51
52#define VOLTAGE_SCALE 4
53#define VOLTAGE_VID_OFFSET_SCALE1 625
54#define VOLTAGE_VID_OFFSET_SCALE2 100
55
56#define HBM_MEMORY_CHANNEL_WIDTH 128
57
58uint32_t channel_number[] = {1, 2, 0, 4, 0, 8, 0, 16, 2};
59
60#define MEM_FREQ_LOW_LATENCY 25000
61#define MEM_FREQ_HIGH_LATENCY 80000
62#define MEM_LATENCY_HIGH 245
63#define MEM_LATENCY_LOW 35
64#define MEM_LATENCY_ERR 0xFFFF
65
66#define mmDF_CS_AON0_DramBaseAddress0 0x0044
67#define mmDF_CS_AON0_DramBaseAddress0_BASE_IDX 0
68
69//DF_CS_AON0_DramBaseAddress0
70#define DF_CS_AON0_DramBaseAddress0__AddrRngVal__SHIFT 0x0
71#define DF_CS_AON0_DramBaseAddress0__LgcyMmioHoleEn__SHIFT 0x1
72#define DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT 0x4
73#define DF_CS_AON0_DramBaseAddress0__IntLvAddrSel__SHIFT 0x8
74#define DF_CS_AON0_DramBaseAddress0__DramBaseAddr__SHIFT 0xc
75#define DF_CS_AON0_DramBaseAddress0__AddrRngVal_MASK 0x00000001L
76#define DF_CS_AON0_DramBaseAddress0__LgcyMmioHoleEn_MASK 0x00000002L
77#define DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK 0x000000F0L
78#define DF_CS_AON0_DramBaseAddress0__IntLvAddrSel_MASK 0x00000700L
79#define DF_CS_AON0_DramBaseAddress0__DramBaseAddr_MASK 0xFFFFF000L
80
81const ULONG PhwVega10_Magic = (ULONG)(PHM_VIslands_Magic);
82
83struct vega10_power_state *cast_phw_vega10_power_state(
84 struct pp_hw_power_state *hw_ps)
85{
86 PP_ASSERT_WITH_CODE((PhwVega10_Magic == hw_ps->magic),
87 "Invalid Powerstate Type!",
88 return NULL;);
89
90 return (struct vega10_power_state *)hw_ps;
91}
92
93const struct vega10_power_state *cast_const_phw_vega10_power_state(
94 const struct pp_hw_power_state *hw_ps)
95{
96 PP_ASSERT_WITH_CODE((PhwVega10_Magic == hw_ps->magic),
97 "Invalid Powerstate Type!",
98 return NULL;);
99
100 return (const struct vega10_power_state *)hw_ps;
101}
102
103static void vega10_set_default_registry_data(struct pp_hwmgr *hwmgr)
104{
105 struct vega10_hwmgr *data =
106 (struct vega10_hwmgr *)(hwmgr->backend);
107
108 data->registry_data.sclk_dpm_key_disabled =
109 hwmgr->feature_mask & PP_SCLK_DPM_MASK ? false : true;
110 data->registry_data.socclk_dpm_key_disabled =
111 hwmgr->feature_mask & PP_SOCCLK_DPM_MASK ? false : true;
112 data->registry_data.mclk_dpm_key_disabled =
113 hwmgr->feature_mask & PP_MCLK_DPM_MASK ? false : true;
2d5f5f94
RZ
114 data->registry_data.pcie_dpm_key_disabled =
115 hwmgr->feature_mask & PP_PCIE_DPM_MASK ? false : true;
f83a9991
EH
116
117 data->registry_data.dcefclk_dpm_key_disabled =
118 hwmgr->feature_mask & PP_DCEFCLK_DPM_MASK ? false : true;
119
120 if (hwmgr->feature_mask & PP_POWER_CONTAINMENT_MASK) {
121 data->registry_data.power_containment_support = 1;
122 data->registry_data.enable_pkg_pwr_tracking_feature = 1;
123 data->registry_data.enable_tdc_limit_feature = 1;
124 }
125
afc0255c 126 data->registry_data.clock_stretcher_support =
97782cc9 127 hwmgr->feature_mask & PP_CLOCK_STRETCH_MASK ? false : true;
afc0255c 128
f83a9991
EH
129 data->registry_data.disable_water_mark = 0;
130
131 data->registry_data.fan_control_support = 1;
132 data->registry_data.thermal_support = 1;
133 data->registry_data.fw_ctf_enabled = 1;
134
135 data->registry_data.avfs_support = 1;
136 data->registry_data.led_dpm_enabled = 1;
137
138 data->registry_data.vr0hot_enabled = 1;
139 data->registry_data.vr1hot_enabled = 1;
140 data->registry_data.regulator_hot_gpio_support = 1;
141
142 data->display_voltage_mode = PPVEGA10_VEGA10DISPLAYVOLTAGEMODE_DFLT;
143 data->dcef_clk_quad_eqn_a = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
144 data->dcef_clk_quad_eqn_b = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
145 data->dcef_clk_quad_eqn_c = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
146 data->disp_clk_quad_eqn_a = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
147 data->disp_clk_quad_eqn_b = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
148 data->disp_clk_quad_eqn_c = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
149 data->pixel_clk_quad_eqn_a = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
150 data->pixel_clk_quad_eqn_b = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
151 data->pixel_clk_quad_eqn_c = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
152 data->phy_clk_quad_eqn_a = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
153 data->phy_clk_quad_eqn_b = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
154 data->phy_clk_quad_eqn_c = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
155
156 data->gfxclk_average_alpha = PPVEGA10_VEGA10GFXCLKAVERAGEALPHA_DFLT;
157 data->socclk_average_alpha = PPVEGA10_VEGA10SOCCLKAVERAGEALPHA_DFLT;
158 data->uclk_average_alpha = PPVEGA10_VEGA10UCLKCLKAVERAGEALPHA_DFLT;
159 data->gfx_activity_average_alpha = PPVEGA10_VEGA10GFXACTIVITYAVERAGEALPHA_DFLT;
160}
161
162static int vega10_set_features_platform_caps(struct pp_hwmgr *hwmgr)
163{
164 struct vega10_hwmgr *data =
165 (struct vega10_hwmgr *)(hwmgr->backend);
166 struct phm_ppt_v2_information *table_info =
167 (struct phm_ppt_v2_information *)hwmgr->pptable;
168 struct cgs_system_info sys_info = {0};
169 int result;
170
171 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
172 PHM_PlatformCaps_SclkDeepSleep);
173
174 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
175 PHM_PlatformCaps_DynamicPatchPowerState);
176
177 if (data->vddci_control == VEGA10_VOLTAGE_CONTROL_NONE)
178 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
179 PHM_PlatformCaps_ControlVDDCI);
180
181 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
182 PHM_PlatformCaps_TablelessHardwareInterface);
183
184 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
185 PHM_PlatformCaps_EnableSMU7ThermalManagement);
186
187 sys_info.size = sizeof(struct cgs_system_info);
188 sys_info.info_id = CGS_SYSTEM_INFO_PG_FLAGS;
189 result = cgs_query_system_info(hwmgr->device, &sys_info);
190
191 if (!result && (sys_info.value & AMD_PG_SUPPORT_UVD))
192 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
193 PHM_PlatformCaps_UVDPowerGating);
194
195 if (!result && (sys_info.value & AMD_PG_SUPPORT_VCE))
196 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
197 PHM_PlatformCaps_VCEPowerGating);
198
199 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
200 PHM_PlatformCaps_UnTabledHardwareInterface);
201
202 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
203 PHM_PlatformCaps_FanSpeedInTableIsRPM);
204
205 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
206 PHM_PlatformCaps_ODFuzzyFanControlSupport);
207
208 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
209 PHM_PlatformCaps_DynamicPowerManagement);
210
211 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
212 PHM_PlatformCaps_SMC);
213
214 /* power tune caps */
215 /* assume disabled */
216 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
217 PHM_PlatformCaps_PowerContainment);
218 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
219 PHM_PlatformCaps_SQRamping);
220 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
221 PHM_PlatformCaps_DBRamping);
222 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
223 PHM_PlatformCaps_TDRamping);
224 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
225 PHM_PlatformCaps_TCPRamping);
226
227 if (data->registry_data.power_containment_support)
228 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
229 PHM_PlatformCaps_PowerContainment);
230 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
231 PHM_PlatformCaps_CAC);
232
233 if (table_info->tdp_table->usClockStretchAmount &&
234 data->registry_data.clock_stretcher_support)
235 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
236 PHM_PlatformCaps_ClockStretcher);
237
238 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
239 PHM_PlatformCaps_RegulatorHot);
240 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
241 PHM_PlatformCaps_AutomaticDCTransition);
242
243 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
244 PHM_PlatformCaps_UVDDPM);
245 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
246 PHM_PlatformCaps_VCEDPM);
247
248 return 0;
249}
250
251static void vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr)
252{
253 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
254 int i;
255
256 vega10_initialize_power_tune_defaults(hwmgr);
257
258 for (i = 0; i < GNLD_FEATURES_MAX; i++) {
259 data->smu_features[i].smu_feature_id = 0xffff;
260 data->smu_features[i].smu_feature_bitmap = 1 << i;
261 data->smu_features[i].enabled = false;
262 data->smu_features[i].supported = false;
263 }
264
265 data->smu_features[GNLD_DPM_PREFETCHER].smu_feature_id =
266 FEATURE_DPM_PREFETCHER_BIT;
267 data->smu_features[GNLD_DPM_GFXCLK].smu_feature_id =
268 FEATURE_DPM_GFXCLK_BIT;
269 data->smu_features[GNLD_DPM_UCLK].smu_feature_id =
270 FEATURE_DPM_UCLK_BIT;
271 data->smu_features[GNLD_DPM_SOCCLK].smu_feature_id =
272 FEATURE_DPM_SOCCLK_BIT;
273 data->smu_features[GNLD_DPM_UVD].smu_feature_id =
274 FEATURE_DPM_UVD_BIT;
275 data->smu_features[GNLD_DPM_VCE].smu_feature_id =
276 FEATURE_DPM_VCE_BIT;
277 data->smu_features[GNLD_DPM_MP0CLK].smu_feature_id =
278 FEATURE_DPM_MP0CLK_BIT;
279 data->smu_features[GNLD_DPM_LINK].smu_feature_id =
280 FEATURE_DPM_LINK_BIT;
281 data->smu_features[GNLD_DPM_DCEFCLK].smu_feature_id =
282 FEATURE_DPM_DCEFCLK_BIT;
283 data->smu_features[GNLD_ULV].smu_feature_id =
284 FEATURE_ULV_BIT;
285 data->smu_features[GNLD_AVFS].smu_feature_id =
286 FEATURE_AVFS_BIT;
287 data->smu_features[GNLD_DS_GFXCLK].smu_feature_id =
288 FEATURE_DS_GFXCLK_BIT;
289 data->smu_features[GNLD_DS_SOCCLK].smu_feature_id =
290 FEATURE_DS_SOCCLK_BIT;
291 data->smu_features[GNLD_DS_LCLK].smu_feature_id =
292 FEATURE_DS_LCLK_BIT;
293 data->smu_features[GNLD_PPT].smu_feature_id =
294 FEATURE_PPT_BIT;
295 data->smu_features[GNLD_TDC].smu_feature_id =
296 FEATURE_TDC_BIT;
297 data->smu_features[GNLD_THERMAL].smu_feature_id =
298 FEATURE_THERMAL_BIT;
299 data->smu_features[GNLD_GFX_PER_CU_CG].smu_feature_id =
300 FEATURE_GFX_PER_CU_CG_BIT;
301 data->smu_features[GNLD_RM].smu_feature_id =
302 FEATURE_RM_BIT;
303 data->smu_features[GNLD_DS_DCEFCLK].smu_feature_id =
304 FEATURE_DS_DCEFCLK_BIT;
305 data->smu_features[GNLD_ACDC].smu_feature_id =
306 FEATURE_ACDC_BIT;
307 data->smu_features[GNLD_VR0HOT].smu_feature_id =
308 FEATURE_VR0HOT_BIT;
309 data->smu_features[GNLD_VR1HOT].smu_feature_id =
310 FEATURE_VR1HOT_BIT;
311 data->smu_features[GNLD_FW_CTF].smu_feature_id =
312 FEATURE_FW_CTF_BIT;
313 data->smu_features[GNLD_LED_DISPLAY].smu_feature_id =
314 FEATURE_LED_DISPLAY_BIT;
315 data->smu_features[GNLD_FAN_CONTROL].smu_feature_id =
316 FEATURE_FAN_CONTROL_BIT;
317 data->smu_features[GNLD_VOLTAGE_CONTROLLER].smu_feature_id =
318 FEATURE_VOLTAGE_CONTROLLER_BIT;
319
320 if (!data->registry_data.prefetcher_dpm_key_disabled)
321 data->smu_features[GNLD_DPM_PREFETCHER].supported = true;
322
323 if (!data->registry_data.sclk_dpm_key_disabled)
324 data->smu_features[GNLD_DPM_GFXCLK].supported = true;
325
326 if (!data->registry_data.mclk_dpm_key_disabled)
327 data->smu_features[GNLD_DPM_UCLK].supported = true;
328
329 if (!data->registry_data.socclk_dpm_key_disabled)
330 data->smu_features[GNLD_DPM_SOCCLK].supported = true;
331
332 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
333 PHM_PlatformCaps_UVDDPM))
334 data->smu_features[GNLD_DPM_UVD].supported = true;
335
336 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
337 PHM_PlatformCaps_VCEDPM))
338 data->smu_features[GNLD_DPM_VCE].supported = true;
339
340 if (!data->registry_data.pcie_dpm_key_disabled)
341 data->smu_features[GNLD_DPM_LINK].supported = true;
342
343 if (!data->registry_data.dcefclk_dpm_key_disabled)
344 data->smu_features[GNLD_DPM_DCEFCLK].supported = true;
345
346 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
347 PHM_PlatformCaps_SclkDeepSleep) &&
348 data->registry_data.sclk_deep_sleep_support) {
349 data->smu_features[GNLD_DS_GFXCLK].supported = true;
350 data->smu_features[GNLD_DS_SOCCLK].supported = true;
351 data->smu_features[GNLD_DS_LCLK].supported = true;
352 }
353
354 if (data->registry_data.enable_pkg_pwr_tracking_feature)
355 data->smu_features[GNLD_PPT].supported = true;
356
357 if (data->registry_data.enable_tdc_limit_feature)
358 data->smu_features[GNLD_TDC].supported = true;
359
360 if (data->registry_data.thermal_support)
361 data->smu_features[GNLD_THERMAL].supported = true;
362
363 if (data->registry_data.fan_control_support)
364 data->smu_features[GNLD_FAN_CONTROL].supported = true;
365
366 if (data->registry_data.fw_ctf_enabled)
367 data->smu_features[GNLD_FW_CTF].supported = true;
368
369 if (data->registry_data.avfs_support)
370 data->smu_features[GNLD_AVFS].supported = true;
371
372 if (data->registry_data.led_dpm_enabled)
373 data->smu_features[GNLD_LED_DISPLAY].supported = true;
374
375 if (data->registry_data.vr1hot_enabled)
376 data->smu_features[GNLD_VR1HOT].supported = true;
377
378 if (data->registry_data.vr0hot_enabled)
379 data->smu_features[GNLD_VR0HOT].supported = true;
380
381}
382
383#ifdef PPLIB_VEGA10_EVV_SUPPORT
384static int vega10_get_socclk_for_voltage_evv(struct pp_hwmgr *hwmgr,
385 phm_ppt_v1_voltage_lookup_table *lookup_table,
386 uint16_t virtual_voltage_id, int32_t *socclk)
387{
388 uint8_t entry_id;
389 uint8_t voltage_id;
390 struct phm_ppt_v2_information *table_info =
391 (struct phm_ppt_v2_information *)(hwmgr->pptable);
392
393 PP_ASSERT_WITH_CODE(lookup_table->count != 0,
394 "Lookup table is empty",
395 return -EINVAL);
396
397 /* search for leakage voltage ID 0xff01 ~ 0xff08 and sclk */
398 for (entry_id = 0; entry_id < table_info->vdd_dep_on_sclk->count; entry_id++) {
399 voltage_id = table_info->vdd_dep_on_socclk->entries[entry_id].vddInd;
400 if (lookup_table->entries[voltage_id].us_vdd == virtual_voltage_id)
401 break;
402 }
403
404 PP_ASSERT_WITH_CODE(entry_id < table_info->vdd_dep_on_socclk->count,
405 "Can't find requested voltage id in vdd_dep_on_socclk table!",
406 return -EINVAL);
407
408 *socclk = table_info->vdd_dep_on_socclk->entries[entry_id].clk;
409
410 return 0;
411}
412
413#define ATOM_VIRTUAL_VOLTAGE_ID0 0xff01
414/**
415* Get Leakage VDDC based on leakage ID.
416*
417* @param hwmgr the address of the powerplay hardware manager.
418* @return always 0.
419*/
420static int vega10_get_evv_voltages(struct pp_hwmgr *hwmgr)
421{
422 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
423 uint16_t vv_id;
424 uint32_t vddc = 0;
425 uint16_t i, j;
426 uint32_t sclk = 0;
427 struct phm_ppt_v2_information *table_info =
428 (struct phm_ppt_v2_information *)hwmgr->pptable;
429 struct phm_ppt_v1_clock_voltage_dependency_table *socclk_table =
430 table_info->vdd_dep_on_socclk;
431 int result;
432
433 for (i = 0; i < VEGA10_MAX_LEAKAGE_COUNT; i++) {
434 vv_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
435
436 if (!vega10_get_socclk_for_voltage_evv(hwmgr,
437 table_info->vddc_lookup_table, vv_id, &sclk)) {
438 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
439 PHM_PlatformCaps_ClockStretcher)) {
440 for (j = 1; j < socclk_table->count; j++) {
441 if (socclk_table->entries[j].clk == sclk &&
442 socclk_table->entries[j].cks_enable == 0) {
443 sclk += 5000;
444 break;
445 }
446 }
447 }
448
449 PP_ASSERT_WITH_CODE(!atomctrl_get_voltage_evv_on_sclk_ai(hwmgr,
450 VOLTAGE_TYPE_VDDC, sclk, vv_id, &vddc),
451 "Error retrieving EVV voltage value!",
452 continue);
453
454
455 /* need to make sure vddc is less than 2v or else, it could burn the ASIC. */
456 PP_ASSERT_WITH_CODE((vddc < 2000 && vddc != 0),
457 "Invalid VDDC value", result = -EINVAL;);
458
459 /* the voltage should not be zero nor equal to leakage ID */
460 if (vddc != 0 && vddc != vv_id) {
461 data->vddc_leakage.actual_voltage[data->vddc_leakage.count] = (uint16_t)(vddc/100);
462 data->vddc_leakage.leakage_id[data->vddc_leakage.count] = vv_id;
463 data->vddc_leakage.count++;
464 }
465 }
466 }
467
468 return 0;
469}
470
471/**
472 * Change virtual leakage voltage to actual value.
473 *
474 * @param hwmgr the address of the powerplay hardware manager.
475 * @param pointer to changing voltage
476 * @param pointer to leakage table
477 */
478static void vega10_patch_with_vdd_leakage(struct pp_hwmgr *hwmgr,
479 uint16_t *voltage, struct vega10_leakage_voltage *leakage_table)
480{
481 uint32_t index;
482
483 /* search for leakage voltage ID 0xff01 ~ 0xff08 */
484 for (index = 0; index < leakage_table->count; index++) {
485 /* if this voltage matches a leakage voltage ID */
486 /* patch with actual leakage voltage */
487 if (leakage_table->leakage_id[index] == *voltage) {
488 *voltage = leakage_table->actual_voltage[index];
489 break;
490 }
491 }
492
493 if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0)
494 pr_info("Voltage value looks like a Leakage ID \
495 but it's not patched\n");
496}
497
498/**
499* Patch voltage lookup table by EVV leakages.
500*
501* @param hwmgr the address of the powerplay hardware manager.
502* @param pointer to voltage lookup table
503* @param pointer to leakage table
504* @return always 0
505*/
506static int vega10_patch_lookup_table_with_leakage(struct pp_hwmgr *hwmgr,
507 phm_ppt_v1_voltage_lookup_table *lookup_table,
508 struct vega10_leakage_voltage *leakage_table)
509{
510 uint32_t i;
511
512 for (i = 0; i < lookup_table->count; i++)
513 vega10_patch_with_vdd_leakage(hwmgr,
514 &lookup_table->entries[i].us_vdd, leakage_table);
515
516 return 0;
517}
518
519static int vega10_patch_clock_voltage_limits_with_vddc_leakage(
520 struct pp_hwmgr *hwmgr, struct vega10_leakage_voltage *leakage_table,
521 uint16_t *vddc)
522{
523 vega10_patch_with_vdd_leakage(hwmgr, (uint16_t *)vddc, leakage_table);
524
525 return 0;
526}
527#endif
528
529static int vega10_patch_voltage_dependency_tables_with_lookup_table(
530 struct pp_hwmgr *hwmgr)
531{
532 uint8_t entry_id;
533 uint8_t voltage_id;
534 struct phm_ppt_v2_information *table_info =
535 (struct phm_ppt_v2_information *)(hwmgr->pptable);
536 struct phm_ppt_v1_clock_voltage_dependency_table *socclk_table =
537 table_info->vdd_dep_on_socclk;
538 struct phm_ppt_v1_clock_voltage_dependency_table *gfxclk_table =
539 table_info->vdd_dep_on_sclk;
540 struct phm_ppt_v1_clock_voltage_dependency_table *dcefclk_table =
541 table_info->vdd_dep_on_dcefclk;
542 struct phm_ppt_v1_clock_voltage_dependency_table *pixclk_table =
543 table_info->vdd_dep_on_pixclk;
544 struct phm_ppt_v1_clock_voltage_dependency_table *dspclk_table =
545 table_info->vdd_dep_on_dispclk;
546 struct phm_ppt_v1_clock_voltage_dependency_table *phyclk_table =
547 table_info->vdd_dep_on_phyclk;
548 struct phm_ppt_v1_clock_voltage_dependency_table *mclk_table =
549 table_info->vdd_dep_on_mclk;
550 struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
551 table_info->mm_dep_table;
552
553 for (entry_id = 0; entry_id < socclk_table->count; entry_id++) {
554 voltage_id = socclk_table->entries[entry_id].vddInd;
555 socclk_table->entries[entry_id].vddc =
556 table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
557 }
558
559 for (entry_id = 0; entry_id < gfxclk_table->count; entry_id++) {
560 voltage_id = gfxclk_table->entries[entry_id].vddInd;
561 gfxclk_table->entries[entry_id].vddc =
562 table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
563 }
564
565 for (entry_id = 0; entry_id < dcefclk_table->count; entry_id++) {
566 voltage_id = dcefclk_table->entries[entry_id].vddInd;
567 dcefclk_table->entries[entry_id].vddc =
568 table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
569 }
570
571 for (entry_id = 0; entry_id < pixclk_table->count; entry_id++) {
572 voltage_id = pixclk_table->entries[entry_id].vddInd;
573 pixclk_table->entries[entry_id].vddc =
574 table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
575 }
576
577 for (entry_id = 0; entry_id < dspclk_table->count; entry_id++) {
578 voltage_id = dspclk_table->entries[entry_id].vddInd;
579 dspclk_table->entries[entry_id].vddc =
580 table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
581 }
582
583 for (entry_id = 0; entry_id < phyclk_table->count; entry_id++) {
584 voltage_id = phyclk_table->entries[entry_id].vddInd;
585 phyclk_table->entries[entry_id].vddc =
586 table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
587 }
588
589 for (entry_id = 0; entry_id < mclk_table->count; ++entry_id) {
590 voltage_id = mclk_table->entries[entry_id].vddInd;
591 mclk_table->entries[entry_id].vddc =
592 table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
593 voltage_id = mclk_table->entries[entry_id].vddciInd;
594 mclk_table->entries[entry_id].vddci =
595 table_info->vddci_lookup_table->entries[voltage_id].us_vdd;
596 voltage_id = mclk_table->entries[entry_id].mvddInd;
597 mclk_table->entries[entry_id].mvdd =
598 table_info->vddmem_lookup_table->entries[voltage_id].us_vdd;
599 }
600
601 for (entry_id = 0; entry_id < mm_table->count; ++entry_id) {
602 voltage_id = mm_table->entries[entry_id].vddcInd;
603 mm_table->entries[entry_id].vddc =
604 table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
605 }
606
607 return 0;
608
609}
610
611static int vega10_sort_lookup_table(struct pp_hwmgr *hwmgr,
612 struct phm_ppt_v1_voltage_lookup_table *lookup_table)
613{
614 uint32_t table_size, i, j;
615 struct phm_ppt_v1_voltage_lookup_record tmp_voltage_lookup_record;
616
617 PP_ASSERT_WITH_CODE(lookup_table && lookup_table->count,
618 "Lookup table is empty", return -EINVAL);
619
620 table_size = lookup_table->count;
621
622 /* Sorting voltages */
623 for (i = 0; i < table_size - 1; i++) {
624 for (j = i + 1; j > 0; j--) {
625 if (lookup_table->entries[j].us_vdd <
626 lookup_table->entries[j - 1].us_vdd) {
627 tmp_voltage_lookup_record = lookup_table->entries[j - 1];
628 lookup_table->entries[j - 1] = lookup_table->entries[j];
629 lookup_table->entries[j] = tmp_voltage_lookup_record;
630 }
631 }
632 }
633
634 return 0;
635}
636
637static int vega10_complete_dependency_tables(struct pp_hwmgr *hwmgr)
638{
639 int result = 0;
640 int tmp_result;
641 struct phm_ppt_v2_information *table_info =
642 (struct phm_ppt_v2_information *)(hwmgr->pptable);
643#ifdef PPLIB_VEGA10_EVV_SUPPORT
644 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
645
646 tmp_result = vega10_patch_lookup_table_with_leakage(hwmgr,
647 table_info->vddc_lookup_table, &(data->vddc_leakage));
648 if (tmp_result)
649 result = tmp_result;
650
651 tmp_result = vega10_patch_clock_voltage_limits_with_vddc_leakage(hwmgr,
652 &(data->vddc_leakage), &table_info->max_clock_voltage_on_dc.vddc);
653 if (tmp_result)
654 result = tmp_result;
655#endif
656
657 tmp_result = vega10_patch_voltage_dependency_tables_with_lookup_table(hwmgr);
658 if (tmp_result)
659 result = tmp_result;
660
661 tmp_result = vega10_sort_lookup_table(hwmgr, table_info->vddc_lookup_table);
662 if (tmp_result)
663 result = tmp_result;
664
665 return result;
666}
667
668static int vega10_set_private_data_based_on_pptable(struct pp_hwmgr *hwmgr)
669{
670 struct phm_ppt_v2_information *table_info =
671 (struct phm_ppt_v2_information *)(hwmgr->pptable);
672 struct phm_ppt_v1_clock_voltage_dependency_table *allowed_sclk_vdd_table =
673 table_info->vdd_dep_on_socclk;
674 struct phm_ppt_v1_clock_voltage_dependency_table *allowed_mclk_vdd_table =
675 table_info->vdd_dep_on_mclk;
676
677 PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table,
678 "VDD dependency on SCLK table is missing. \
679 This table is mandatory", return -EINVAL);
680 PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table->count >= 1,
681 "VDD dependency on SCLK table is empty. \
682 This table is mandatory", return -EINVAL);
683
684 PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table,
685 "VDD dependency on MCLK table is missing. \
686 This table is mandatory", return -EINVAL);
687 PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table->count >= 1,
688 "VDD dependency on MCLK table is empty. \
689 This table is mandatory", return -EINVAL);
690
691 table_info->max_clock_voltage_on_ac.sclk =
692 allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].clk;
693 table_info->max_clock_voltage_on_ac.mclk =
694 allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].clk;
695 table_info->max_clock_voltage_on_ac.vddc =
696 allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].vddc;
697 table_info->max_clock_voltage_on_ac.vddci =
698 allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].vddci;
699
700 hwmgr->dyn_state.max_clock_voltage_on_ac.sclk =
701 table_info->max_clock_voltage_on_ac.sclk;
702 hwmgr->dyn_state.max_clock_voltage_on_ac.mclk =
703 table_info->max_clock_voltage_on_ac.mclk;
704 hwmgr->dyn_state.max_clock_voltage_on_ac.vddc =
705 table_info->max_clock_voltage_on_ac.vddc;
706 hwmgr->dyn_state.max_clock_voltage_on_ac.vddci =
707 table_info->max_clock_voltage_on_ac.vddci;
708
709 return 0;
710}
711
712static int vega10_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
713{
714 kfree(hwmgr->dyn_state.vddc_dep_on_dal_pwrl);
715 hwmgr->dyn_state.vddc_dep_on_dal_pwrl = NULL;
716
717 kfree(hwmgr->backend);
718 hwmgr->backend = NULL;
719
720 return 0;
721}
722
723static int vega10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
724{
725 int result = 0;
726 struct vega10_hwmgr *data;
727 uint32_t config_telemetry = 0;
728 struct pp_atomfwctrl_voltage_table vol_table;
729 struct cgs_system_info sys_info = {0};
730
731 data = kzalloc(sizeof(struct vega10_hwmgr), GFP_KERNEL);
732 if (data == NULL)
733 return -ENOMEM;
734
735 hwmgr->backend = data;
736
737 vega10_set_default_registry_data(hwmgr);
738
739 data->disable_dpm_mask = 0xff;
740 data->workload_mask = 0xff;
741
742 /* need to set voltage control types before EVV patching */
743 data->vddc_control = VEGA10_VOLTAGE_CONTROL_NONE;
744 data->mvdd_control = VEGA10_VOLTAGE_CONTROL_NONE;
745 data->vddci_control = VEGA10_VOLTAGE_CONTROL_NONE;
746
747 /* VDDCR_SOC */
748 if (pp_atomfwctrl_is_voltage_controlled_by_gpio_v4(hwmgr,
749 VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2)) {
750 if (!pp_atomfwctrl_get_voltage_table_v4(hwmgr,
751 VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2,
752 &vol_table)) {
753 config_telemetry = ((vol_table.telemetry_slope << 8) & 0xff00) |
754 (vol_table.telemetry_offset & 0xff);
755 data->vddc_control = VEGA10_VOLTAGE_CONTROL_BY_SVID2;
756 }
757 } else {
758 kfree(hwmgr->backend);
759 hwmgr->backend = NULL;
760 PP_ASSERT_WITH_CODE(false,
761 "VDDCR_SOC is not SVID2!",
762 return -1);
763 }
764
765 /* MVDDC */
766 if (pp_atomfwctrl_is_voltage_controlled_by_gpio_v4(hwmgr,
767 VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2)) {
768 if (!pp_atomfwctrl_get_voltage_table_v4(hwmgr,
769 VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2,
770 &vol_table)) {
771 config_telemetry |=
772 ((vol_table.telemetry_slope << 24) & 0xff000000) |
773 ((vol_table.telemetry_offset << 16) & 0xff0000);
774 data->mvdd_control = VEGA10_VOLTAGE_CONTROL_BY_SVID2;
775 }
776 }
777
778 /* VDDCI_MEM */
779 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
780 PHM_PlatformCaps_ControlVDDCI)) {
781 if (pp_atomfwctrl_is_voltage_controlled_by_gpio_v4(hwmgr,
782 VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT))
783 data->vddci_control = VEGA10_VOLTAGE_CONTROL_BY_GPIO;
784 }
785
786 data->config_telemetry = config_telemetry;
787
788 vega10_set_features_platform_caps(hwmgr);
789
790 vega10_init_dpm_defaults(hwmgr);
791
792#ifdef PPLIB_VEGA10_EVV_SUPPORT
793 /* Get leakage voltage based on leakage ID. */
794 PP_ASSERT_WITH_CODE(!vega10_get_evv_voltages(hwmgr),
795 "Get EVV Voltage Failed. Abort Driver loading!",
796 return -1);
797#endif
798
799 /* Patch our voltage dependency table with actual leakage voltage
800 * We need to perform leakage translation before it's used by other functions
801 */
802 vega10_complete_dependency_tables(hwmgr);
803
804 /* Parse pptable data read from VBIOS */
805 vega10_set_private_data_based_on_pptable(hwmgr);
806
807 data->is_tlu_enabled = false;
808
809 hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
810 VEGA10_MAX_HARDWARE_POWERLEVELS;
811 hwmgr->platform_descriptor.hardwarePerformanceLevels = 2;
812 hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50;
813
814 hwmgr->platform_descriptor.vbiosInterruptId = 0x20000400; /* IRQ_SOURCE1_SW_INT */
815 /* The true clock step depends on the frequency, typically 4.5 or 9 MHz. Here we use 5. */
816 hwmgr->platform_descriptor.clockStep.engineClock = 500;
817 hwmgr->platform_descriptor.clockStep.memoryClock = 500;
818
819 sys_info.size = sizeof(struct cgs_system_info);
820 sys_info.info_id = CGS_SYSTEM_INFO_GFX_CU_INFO;
821 result = cgs_query_system_info(hwmgr->device, &sys_info);
822 data->total_active_cus = sys_info.value;
823 /* Setup default Overdrive Fan control settings */
824 data->odn_fan_table.target_fan_speed =
825 hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM;
826 data->odn_fan_table.target_temperature =
827 hwmgr->thermal_controller.
828 advanceFanControlParameters.ucTargetTemperature;
829 data->odn_fan_table.min_performance_clock =
830 hwmgr->thermal_controller.advanceFanControlParameters.
831 ulMinFanSCLKAcousticLimit;
832 data->odn_fan_table.min_fan_limit =
833 hwmgr->thermal_controller.
834 advanceFanControlParameters.usFanPWMMinLimit *
835 hwmgr->thermal_controller.fanInfo.ulMaxRPM / 100;
836
837 return result;
838}
839
840static int vega10_init_sclk_threshold(struct pp_hwmgr *hwmgr)
841{
842 struct vega10_hwmgr *data =
843 (struct vega10_hwmgr *)(hwmgr->backend);
844
845 data->low_sclk_interrupt_threshold = 0;
846
847 return 0;
848}
849
850static int vega10_setup_dpm_led_config(struct pp_hwmgr *hwmgr)
851{
852 struct vega10_hwmgr *data =
853 (struct vega10_hwmgr *)(hwmgr->backend);
854 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
855
856 struct pp_atomfwctrl_voltage_table table;
857 uint8_t i, j;
858 uint32_t mask = 0;
859 uint32_t tmp;
860 int32_t ret = 0;
861
862 ret = pp_atomfwctrl_get_voltage_table_v4(hwmgr, VOLTAGE_TYPE_LEDDPM,
863 VOLTAGE_OBJ_GPIO_LUT, &table);
864
865 if (!ret) {
866 tmp = table.mask_low;
867 for (i = 0, j = 0; i < 32; i++) {
868 if (tmp & 1) {
869 mask |= (uint32_t)(i << (8 * j));
870 if (++j >= 3)
871 break;
872 }
873 tmp >>= 1;
874 }
875 }
876
877 pp_table->LedPin0 = (uint8_t)(mask & 0xff);
878 pp_table->LedPin1 = (uint8_t)((mask >> 8) & 0xff);
879 pp_table->LedPin2 = (uint8_t)((mask >> 16) & 0xff);
880 return 0;
881}
882
883static int vega10_setup_asic_task(struct pp_hwmgr *hwmgr)
884{
885 PP_ASSERT_WITH_CODE(!vega10_init_sclk_threshold(hwmgr),
886 "Failed to init sclk threshold!",
887 return -EINVAL);
888
889 PP_ASSERT_WITH_CODE(!vega10_setup_dpm_led_config(hwmgr),
890 "Failed to set up led dpm config!",
891 return -EINVAL);
892
893 return 0;
894}
895
896static bool vega10_is_dpm_running(struct pp_hwmgr *hwmgr)
897{
898 uint32_t features_enabled;
899
900 if (!vega10_get_smc_features(hwmgr->smumgr, &features_enabled)) {
901 if (features_enabled & SMC_DPM_FEATURES)
902 return true;
903 }
904 return false;
905}
906
907/**
908* Remove repeated voltage values and create table with unique values.
909*
910* @param hwmgr the address of the powerplay hardware manager.
911* @param vol_table the pointer to changing voltage table
912* @return 0 in success
913*/
914
915static int vega10_trim_voltage_table(struct pp_hwmgr *hwmgr,
916 struct pp_atomfwctrl_voltage_table *vol_table)
917{
918 uint32_t i, j;
919 uint16_t vvalue;
920 bool found = false;
921 struct pp_atomfwctrl_voltage_table *table;
922
923 PP_ASSERT_WITH_CODE(vol_table,
924 "Voltage Table empty.", return -EINVAL);
925 table = kzalloc(sizeof(struct pp_atomfwctrl_voltage_table),
926 GFP_KERNEL);
927
928 if (!table)
929 return -ENOMEM;
930
931 table->mask_low = vol_table->mask_low;
932 table->phase_delay = vol_table->phase_delay;
933
934 for (i = 0; i < vol_table->count; i++) {
935 vvalue = vol_table->entries[i].value;
936 found = false;
937
938 for (j = 0; j < table->count; j++) {
939 if (vvalue == table->entries[j].value) {
940 found = true;
941 break;
942 }
943 }
944
945 if (!found) {
946 table->entries[table->count].value = vvalue;
947 table->entries[table->count].smio_low =
948 vol_table->entries[i].smio_low;
949 table->count++;
950 }
951 }
952
953 memcpy(vol_table, table, sizeof(struct pp_atomfwctrl_voltage_table));
954 kfree(table);
955
956 return 0;
957}
958
959static int vega10_get_mvdd_voltage_table(struct pp_hwmgr *hwmgr,
960 phm_ppt_v1_clock_voltage_dependency_table *dep_table,
961 struct pp_atomfwctrl_voltage_table *vol_table)
962{
963 int i;
964
965 PP_ASSERT_WITH_CODE(dep_table->count,
966 "Voltage Dependency Table empty.",
967 return -EINVAL);
968
969 vol_table->mask_low = 0;
970 vol_table->phase_delay = 0;
971 vol_table->count = dep_table->count;
972
973 for (i = 0; i < vol_table->count; i++) {
974 vol_table->entries[i].value = dep_table->entries[i].mvdd;
975 vol_table->entries[i].smio_low = 0;
976 }
977
978 PP_ASSERT_WITH_CODE(!vega10_trim_voltage_table(hwmgr,
979 vol_table),
980 "Failed to trim MVDD Table!",
981 return -1);
982
983 return 0;
984}
985
986static int vega10_get_vddci_voltage_table(struct pp_hwmgr *hwmgr,
987 phm_ppt_v1_clock_voltage_dependency_table *dep_table,
988 struct pp_atomfwctrl_voltage_table *vol_table)
989{
990 uint32_t i;
991
992 PP_ASSERT_WITH_CODE(dep_table->count,
993 "Voltage Dependency Table empty.",
994 return -EINVAL);
995
996 vol_table->mask_low = 0;
997 vol_table->phase_delay = 0;
998 vol_table->count = dep_table->count;
999
1000 for (i = 0; i < dep_table->count; i++) {
1001 vol_table->entries[i].value = dep_table->entries[i].vddci;
1002 vol_table->entries[i].smio_low = 0;
1003 }
1004
1005 PP_ASSERT_WITH_CODE(!vega10_trim_voltage_table(hwmgr, vol_table),
1006 "Failed to trim VDDCI table.",
1007 return -1);
1008
1009 return 0;
1010}
1011
1012static int vega10_get_vdd_voltage_table(struct pp_hwmgr *hwmgr,
1013 phm_ppt_v1_clock_voltage_dependency_table *dep_table,
1014 struct pp_atomfwctrl_voltage_table *vol_table)
1015{
1016 int i;
1017
1018 PP_ASSERT_WITH_CODE(dep_table->count,
1019 "Voltage Dependency Table empty.",
1020 return -EINVAL);
1021
1022 vol_table->mask_low = 0;
1023 vol_table->phase_delay = 0;
1024 vol_table->count = dep_table->count;
1025
1026 for (i = 0; i < vol_table->count; i++) {
1027 vol_table->entries[i].value = dep_table->entries[i].vddc;
1028 vol_table->entries[i].smio_low = 0;
1029 }
1030
1031 return 0;
1032}
1033
1034/* ---- Voltage Tables ----
1035 * If the voltage table would be bigger than
1036 * what will fit into the state table on
1037 * the SMC keep only the higher entries.
1038 */
1039static void vega10_trim_voltage_table_to_fit_state_table(
1040 struct pp_hwmgr *hwmgr,
1041 uint32_t max_vol_steps,
1042 struct pp_atomfwctrl_voltage_table *vol_table)
1043{
1044 unsigned int i, diff;
1045
1046 if (vol_table->count <= max_vol_steps)
1047 return;
1048
1049 diff = vol_table->count - max_vol_steps;
1050
1051 for (i = 0; i < max_vol_steps; i++)
1052 vol_table->entries[i] = vol_table->entries[i + diff];
1053
1054 vol_table->count = max_vol_steps;
1055}
1056
1057/**
1058* Create Voltage Tables.
1059*
1060* @param hwmgr the address of the powerplay hardware manager.
1061* @return always 0
1062*/
1063static int vega10_construct_voltage_tables(struct pp_hwmgr *hwmgr)
1064{
1065 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
1066 struct phm_ppt_v2_information *table_info =
1067 (struct phm_ppt_v2_information *)hwmgr->pptable;
1068 int result;
1069
1070 if (data->mvdd_control == VEGA10_VOLTAGE_CONTROL_BY_SVID2 ||
1071 data->mvdd_control == VEGA10_VOLTAGE_CONTROL_NONE) {
1072 result = vega10_get_mvdd_voltage_table(hwmgr,
1073 table_info->vdd_dep_on_mclk,
1074 &(data->mvdd_voltage_table));
1075 PP_ASSERT_WITH_CODE(!result,
1076 "Failed to retrieve MVDDC table!",
1077 return result);
1078 }
1079
1080 if (data->vddci_control == VEGA10_VOLTAGE_CONTROL_NONE) {
1081 result = vega10_get_vddci_voltage_table(hwmgr,
1082 table_info->vdd_dep_on_mclk,
1083 &(data->vddci_voltage_table));
1084 PP_ASSERT_WITH_CODE(!result,
1085 "Failed to retrieve VDDCI_MEM table!",
1086 return result);
1087 }
1088
1089 if (data->vddc_control == VEGA10_VOLTAGE_CONTROL_BY_SVID2 ||
1090 data->vddc_control == VEGA10_VOLTAGE_CONTROL_NONE) {
1091 result = vega10_get_vdd_voltage_table(hwmgr,
1092 table_info->vdd_dep_on_sclk,
1093 &(data->vddc_voltage_table));
1094 PP_ASSERT_WITH_CODE(!result,
1095 "Failed to retrieve VDDCR_SOC table!",
1096 return result);
1097 }
1098
1099 PP_ASSERT_WITH_CODE(data->vddc_voltage_table.count <= 16,
1100 "Too many voltage values for VDDC. Trimming to fit state table.",
1101 vega10_trim_voltage_table_to_fit_state_table(hwmgr,
1102 16, &(data->vddc_voltage_table)));
1103
1104 PP_ASSERT_WITH_CODE(data->vddci_voltage_table.count <= 16,
1105 "Too many voltage values for VDDCI. Trimming to fit state table.",
1106 vega10_trim_voltage_table_to_fit_state_table(hwmgr,
1107 16, &(data->vddci_voltage_table)));
1108
1109 PP_ASSERT_WITH_CODE(data->mvdd_voltage_table.count <= 16,
1110 "Too many voltage values for MVDD. Trimming to fit state table.",
1111 vega10_trim_voltage_table_to_fit_state_table(hwmgr,
1112 16, &(data->mvdd_voltage_table)));
1113
1114
1115 return 0;
1116}
1117
1118/*
1119 * @fn vega10_init_dpm_state
1120 * @brief Function to initialize all Soft Min/Max and Hard Min/Max to 0xff.
1121 *
1122 * @param dpm_state - the address of the DPM Table to initiailize.
1123 * @return None.
1124 */
1125static void vega10_init_dpm_state(struct vega10_dpm_state *dpm_state)
1126{
1127 dpm_state->soft_min_level = 0xff;
1128 dpm_state->soft_max_level = 0xff;
1129 dpm_state->hard_min_level = 0xff;
1130 dpm_state->hard_max_level = 0xff;
1131}
1132
1133static void vega10_setup_default_single_dpm_table(struct pp_hwmgr *hwmgr,
1134 struct vega10_single_dpm_table *dpm_table,
1135 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table)
1136{
1137 int i;
1138
1139 for (i = 0; i < dep_table->count; i++) {
b7a1f0e3 1140 if (i == 0 || dpm_table->dpm_levels[dpm_table->count - 1].value <=
f83a9991
EH
1141 dep_table->entries[i].clk) {
1142 dpm_table->dpm_levels[dpm_table->count].value =
1143 dep_table->entries[i].clk;
1144 dpm_table->dpm_levels[dpm_table->count].enabled = true;
1145 dpm_table->count++;
1146 }
1147 }
1148}
1149static int vega10_setup_default_pcie_table(struct pp_hwmgr *hwmgr)
1150{
1151 struct vega10_hwmgr *data =
1152 (struct vega10_hwmgr *)(hwmgr->backend);
1153 struct vega10_pcie_table *pcie_table = &(data->dpm_table.pcie_table);
1154 struct phm_ppt_v2_information *table_info =
1155 (struct phm_ppt_v2_information *)(hwmgr->pptable);
1156 struct phm_ppt_v1_pcie_table *bios_pcie_table =
1157 table_info->pcie_table;
1158 uint32_t i;
1159
1160 PP_ASSERT_WITH_CODE(bios_pcie_table->count,
1161 "Incorrect number of PCIE States from VBIOS!",
1162 return -1);
1163
1164 for (i = 0; i < NUM_LINK_LEVELS - 1; i++) {
1165 if (data->registry_data.pcieSpeedOverride)
1166 pcie_table->pcie_gen[i] =
1167 data->registry_data.pcieSpeedOverride;
1168 else
1169 pcie_table->pcie_gen[i] =
1170 bios_pcie_table->entries[i].gen_speed;
1171
1172 if (data->registry_data.pcieLaneOverride)
1173 pcie_table->pcie_lane[i] =
1174 data->registry_data.pcieLaneOverride;
1175 else
1176 pcie_table->pcie_lane[i] =
1177 bios_pcie_table->entries[i].lane_width;
1178
1179 if (data->registry_data.pcieClockOverride)
1180 pcie_table->lclk[i] =
1181 data->registry_data.pcieClockOverride;
1182 else
1183 pcie_table->lclk[i] =
1184 bios_pcie_table->entries[i].pcie_sclk;
f83a9991
EH
1185 }
1186
00c4855e 1187 pcie_table->count = NUM_LINK_LEVELS;
f83a9991
EH
1188
1189 return 0;
1190}
1191
1192/*
1193 * This function is to initialize all DPM state tables
1194 * for SMU based on the dependency table.
1195 * Dynamic state patching function will then trim these
1196 * state tables to the allowed range based
1197 * on the power policy or external client requests,
1198 * such as UVD request, etc.
1199 */
1200static int vega10_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
1201{
1202 struct vega10_hwmgr *data =
1203 (struct vega10_hwmgr *)(hwmgr->backend);
1204 struct phm_ppt_v2_information *table_info =
1205 (struct phm_ppt_v2_information *)(hwmgr->pptable);
1206 struct vega10_single_dpm_table *dpm_table;
1207 uint32_t i;
1208
1209 struct phm_ppt_v1_clock_voltage_dependency_table *dep_soc_table =
1210 table_info->vdd_dep_on_socclk;
1211 struct phm_ppt_v1_clock_voltage_dependency_table *dep_gfx_table =
1212 table_info->vdd_dep_on_sclk;
1213 struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table =
1214 table_info->vdd_dep_on_mclk;
1215 struct phm_ppt_v1_mm_clock_voltage_dependency_table *dep_mm_table =
1216 table_info->mm_dep_table;
1217 struct phm_ppt_v1_clock_voltage_dependency_table *dep_dcef_table =
1218 table_info->vdd_dep_on_dcefclk;
1219 struct phm_ppt_v1_clock_voltage_dependency_table *dep_pix_table =
1220 table_info->vdd_dep_on_pixclk;
1221 struct phm_ppt_v1_clock_voltage_dependency_table *dep_disp_table =
1222 table_info->vdd_dep_on_dispclk;
1223 struct phm_ppt_v1_clock_voltage_dependency_table *dep_phy_table =
1224 table_info->vdd_dep_on_phyclk;
1225
1226 PP_ASSERT_WITH_CODE(dep_soc_table,
1227 "SOCCLK dependency table is missing. This table is mandatory",
1228 return -EINVAL);
1229 PP_ASSERT_WITH_CODE(dep_soc_table->count >= 1,
1230 "SOCCLK dependency table is empty. This table is mandatory",
1231 return -EINVAL);
1232
1233 PP_ASSERT_WITH_CODE(dep_gfx_table,
1234 "GFXCLK dependency table is missing. This table is mandatory",
1235 return -EINVAL);
1236 PP_ASSERT_WITH_CODE(dep_gfx_table->count >= 1,
1237 "GFXCLK dependency table is empty. This table is mandatory",
1238 return -EINVAL);
1239
1240 PP_ASSERT_WITH_CODE(dep_mclk_table,
1241 "MCLK dependency table is missing. This table is mandatory",
1242 return -EINVAL);
1243 PP_ASSERT_WITH_CODE(dep_mclk_table->count >= 1,
1244 "MCLK dependency table has to have is missing. This table is mandatory",
1245 return -EINVAL);
1246
1247 /* Initialize Sclk DPM table based on allow Sclk values */
1248 data->dpm_table.soc_table.count = 0;
1249 data->dpm_table.gfx_table.count = 0;
1250 data->dpm_table.dcef_table.count = 0;
1251
1252 dpm_table = &(data->dpm_table.soc_table);
1253 vega10_setup_default_single_dpm_table(hwmgr,
1254 dpm_table,
1255 dep_soc_table);
1256
1257 vega10_init_dpm_state(&(dpm_table->dpm_state));
1258
1259 dpm_table = &(data->dpm_table.gfx_table);
1260 vega10_setup_default_single_dpm_table(hwmgr,
1261 dpm_table,
1262 dep_gfx_table);
1263 vega10_init_dpm_state(&(dpm_table->dpm_state));
1264
1265 /* Initialize Mclk DPM table based on allow Mclk values */
1266 data->dpm_table.mem_table.count = 0;
1267 dpm_table = &(data->dpm_table.mem_table);
1268 vega10_setup_default_single_dpm_table(hwmgr,
1269 dpm_table,
1270 dep_mclk_table);
1271 vega10_init_dpm_state(&(dpm_table->dpm_state));
1272
1273 data->dpm_table.eclk_table.count = 0;
1274 dpm_table = &(data->dpm_table.eclk_table);
1275 for (i = 0; i < dep_mm_table->count; i++) {
1276 if (i == 0 || dpm_table->dpm_levels
b7a1f0e3 1277 [dpm_table->count - 1].value <=
f83a9991
EH
1278 dep_mm_table->entries[i].eclk) {
1279 dpm_table->dpm_levels[dpm_table->count].value =
1280 dep_mm_table->entries[i].eclk;
1281 dpm_table->dpm_levels[dpm_table->count].enabled =
1282 (i == 0) ? true : false;
1283 dpm_table->count++;
1284 }
1285 }
1286 vega10_init_dpm_state(&(dpm_table->dpm_state));
1287
1288 data->dpm_table.vclk_table.count = 0;
1289 data->dpm_table.dclk_table.count = 0;
1290 dpm_table = &(data->dpm_table.vclk_table);
1291 for (i = 0; i < dep_mm_table->count; i++) {
1292 if (i == 0 || dpm_table->dpm_levels
b7a1f0e3 1293 [dpm_table->count - 1].value <=
f83a9991
EH
1294 dep_mm_table->entries[i].vclk) {
1295 dpm_table->dpm_levels[dpm_table->count].value =
1296 dep_mm_table->entries[i].vclk;
1297 dpm_table->dpm_levels[dpm_table->count].enabled =
1298 (i == 0) ? true : false;
1299 dpm_table->count++;
1300 }
1301 }
1302 vega10_init_dpm_state(&(dpm_table->dpm_state));
1303
1304 dpm_table = &(data->dpm_table.dclk_table);
1305 for (i = 0; i < dep_mm_table->count; i++) {
1306 if (i == 0 || dpm_table->dpm_levels
b7a1f0e3 1307 [dpm_table->count - 1].value <=
f83a9991
EH
1308 dep_mm_table->entries[i].dclk) {
1309 dpm_table->dpm_levels[dpm_table->count].value =
1310 dep_mm_table->entries[i].dclk;
1311 dpm_table->dpm_levels[dpm_table->count].enabled =
1312 (i == 0) ? true : false;
1313 dpm_table->count++;
1314 }
1315 }
1316 vega10_init_dpm_state(&(dpm_table->dpm_state));
1317
1318 /* Assume there is no headless Vega10 for now */
1319 dpm_table = &(data->dpm_table.dcef_table);
1320 vega10_setup_default_single_dpm_table(hwmgr,
1321 dpm_table,
1322 dep_dcef_table);
1323
1324 vega10_init_dpm_state(&(dpm_table->dpm_state));
1325
1326 dpm_table = &(data->dpm_table.pixel_table);
1327 vega10_setup_default_single_dpm_table(hwmgr,
1328 dpm_table,
1329 dep_pix_table);
1330
1331 vega10_init_dpm_state(&(dpm_table->dpm_state));
1332
1333 dpm_table = &(data->dpm_table.display_table);
1334 vega10_setup_default_single_dpm_table(hwmgr,
1335 dpm_table,
1336 dep_disp_table);
1337
1338 vega10_init_dpm_state(&(dpm_table->dpm_state));
1339
1340 dpm_table = &(data->dpm_table.phy_table);
1341 vega10_setup_default_single_dpm_table(hwmgr,
1342 dpm_table,
1343 dep_phy_table);
1344
1345 vega10_init_dpm_state(&(dpm_table->dpm_state));
1346
1347 vega10_setup_default_pcie_table(hwmgr);
1348
1349 /* save a copy of the default DPM table */
1350 memcpy(&(data->golden_dpm_table), &(data->dpm_table),
1351 sizeof(struct vega10_dpm_table));
1352
1353 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1354 PHM_PlatformCaps_ODNinACSupport) ||
1355 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1356 PHM_PlatformCaps_ODNinDCSupport)) {
1357 data->odn_dpm_table.odn_core_clock_dpm_levels.
1358 number_of_performance_levels = data->dpm_table.gfx_table.count;
1359 for (i = 0; i < data->dpm_table.gfx_table.count; i++) {
1360 data->odn_dpm_table.odn_core_clock_dpm_levels.
1361 performance_level_entries[i].clock =
1362 data->dpm_table.gfx_table.dpm_levels[i].value;
1363 data->odn_dpm_table.odn_core_clock_dpm_levels.
1364 performance_level_entries[i].enabled = true;
1365 }
1366
1367 data->odn_dpm_table.vdd_dependency_on_sclk.count =
1368 dep_gfx_table->count;
1369 for (i = 0; i < dep_gfx_table->count; i++) {
1370 data->odn_dpm_table.vdd_dependency_on_sclk.entries[i].clk =
1371 dep_gfx_table->entries[i].clk;
1372 data->odn_dpm_table.vdd_dependency_on_sclk.entries[i].vddInd =
1373 dep_gfx_table->entries[i].vddInd;
1374 data->odn_dpm_table.vdd_dependency_on_sclk.entries[i].cks_enable =
1375 dep_gfx_table->entries[i].cks_enable;
1376 data->odn_dpm_table.vdd_dependency_on_sclk.entries[i].cks_voffset =
1377 dep_gfx_table->entries[i].cks_voffset;
1378 }
1379
1380 data->odn_dpm_table.odn_memory_clock_dpm_levels.
1381 number_of_performance_levels = data->dpm_table.mem_table.count;
1382 for (i = 0; i < data->dpm_table.mem_table.count; i++) {
1383 data->odn_dpm_table.odn_memory_clock_dpm_levels.
1384 performance_level_entries[i].clock =
1385 data->dpm_table.mem_table.dpm_levels[i].value;
1386 data->odn_dpm_table.odn_memory_clock_dpm_levels.
1387 performance_level_entries[i].enabled = true;
1388 }
1389
1390 data->odn_dpm_table.vdd_dependency_on_mclk.count = dep_mclk_table->count;
1391 for (i = 0; i < dep_mclk_table->count; i++) {
1392 data->odn_dpm_table.vdd_dependency_on_mclk.entries[i].clk =
1393 dep_mclk_table->entries[i].clk;
1394 data->odn_dpm_table.vdd_dependency_on_mclk.entries[i].vddInd =
1395 dep_mclk_table->entries[i].vddInd;
1396 data->odn_dpm_table.vdd_dependency_on_mclk.entries[i].vddci =
1397 dep_mclk_table->entries[i].vddci;
1398 }
1399 }
1400
1401 return 0;
1402}
1403
1404/*
1405 * @fn vega10_populate_ulv_state
1406 * @brief Function to provide parameters for Utral Low Voltage state to SMC.
1407 *
1408 * @param hwmgr - the address of the hardware manager.
1409 * @return Always 0.
1410 */
1411static int vega10_populate_ulv_state(struct pp_hwmgr *hwmgr)
1412{
1413 struct vega10_hwmgr *data =
1414 (struct vega10_hwmgr *)(hwmgr->backend);
1415 struct phm_ppt_v2_information *table_info =
1416 (struct phm_ppt_v2_information *)(hwmgr->pptable);
1417
1418 data->smc_state_table.pp_table.UlvOffsetVid =
effa290c 1419 (uint8_t)table_info->us_ulv_voltage_offset;
f83a9991
EH
1420
1421 data->smc_state_table.pp_table.UlvSmnclkDid =
1422 (uint8_t)(table_info->us_ulv_smnclk_did);
1423 data->smc_state_table.pp_table.UlvMp1clkDid =
1424 (uint8_t)(table_info->us_ulv_mp1clk_did);
1425 data->smc_state_table.pp_table.UlvGfxclkBypass =
1426 (uint8_t)(table_info->us_ulv_gfxclk_bypass);
1427 data->smc_state_table.pp_table.UlvPhaseSheddingPsi0 =
1428 (uint8_t)(data->vddc_voltage_table.psi0_enable);
1429 data->smc_state_table.pp_table.UlvPhaseSheddingPsi1 =
1430 (uint8_t)(data->vddc_voltage_table.psi1_enable);
1431
1432 return 0;
1433}
1434
1435static int vega10_populate_single_lclk_level(struct pp_hwmgr *hwmgr,
1436 uint32_t lclock, uint8_t *curr_lclk_did)
1437{
1438 struct pp_atomfwctrl_clock_dividers_soc15 dividers;
1439
1440 PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(
1441 hwmgr,
1442 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
1443 lclock, &dividers),
1444 "Failed to get LCLK clock settings from VBIOS!",
1445 return -1);
1446
1447 *curr_lclk_did = dividers.ulDid;
1448
1449 return 0;
1450}
1451
1452static int vega10_populate_smc_link_levels(struct pp_hwmgr *hwmgr)
1453{
1454 int result = -1;
1455 struct vega10_hwmgr *data =
1456 (struct vega10_hwmgr *)(hwmgr->backend);
1457 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
1458 struct vega10_pcie_table *pcie_table =
1459 &(data->dpm_table.pcie_table);
1460 uint32_t i, j;
1461
1462 for (i = 0; i < pcie_table->count; i++) {
1463 pp_table->PcieGenSpeed[i] = pcie_table->pcie_gen[i];
1464 pp_table->PcieLaneCount[i] = pcie_table->pcie_lane[i];
1465
1466 result = vega10_populate_single_lclk_level(hwmgr,
1467 pcie_table->lclk[i], &(pp_table->LclkDid[i]));
1468 if (result) {
1469 pr_info("Populate LClock Level %d Failed!\n", i);
1470 return result;
1471 }
1472 }
1473
1474 j = i - 1;
1475 while (i < NUM_LINK_LEVELS) {
1476 pp_table->PcieGenSpeed[i] = pcie_table->pcie_gen[j];
1477 pp_table->PcieLaneCount[i] = pcie_table->pcie_lane[j];
1478
1479 result = vega10_populate_single_lclk_level(hwmgr,
1480 pcie_table->lclk[j], &(pp_table->LclkDid[i]));
1481 if (result) {
1482 pr_info("Populate LClock Level %d Failed!\n", i);
1483 return result;
1484 }
1485 i++;
1486 }
1487
1488 return result;
1489}
1490
1491/**
1492* Populates single SMC GFXSCLK structure using the provided engine clock
1493*
1494* @param hwmgr the address of the hardware manager
1495* @param gfx_clock the GFX clock to use to populate the structure.
1496* @param current_gfxclk_level location in PPTable for the SMC GFXCLK structure.
1497*/
1498
1499static int vega10_populate_single_gfx_level(struct pp_hwmgr *hwmgr,
1500 uint32_t gfx_clock, PllSetting_t *current_gfxclk_level)
1501{
1502 struct phm_ppt_v2_information *table_info =
1503 (struct phm_ppt_v2_information *)(hwmgr->pptable);
1504 struct phm_ppt_v1_clock_voltage_dependency_table *dep_on_sclk =
1505 table_info->vdd_dep_on_sclk;
1506 struct vega10_hwmgr *data =
1507 (struct vega10_hwmgr *)(hwmgr->backend);
1508 struct pp_atomfwctrl_clock_dividers_soc15 dividers;
1509 uint32_t i;
1510
1511 if (data->apply_overdrive_next_settings_mask &
1512 DPMTABLE_OD_UPDATE_VDDC)
1513 dep_on_sclk = (struct phm_ppt_v1_clock_voltage_dependency_table *)
1514 &(data->odn_dpm_table.vdd_dependency_on_sclk);
1515
1516 PP_ASSERT_WITH_CODE(dep_on_sclk,
1517 "Invalid SOC_VDD-GFX_CLK Dependency Table!",
1518 return -EINVAL);
1519
1520 for (i = 0; i < dep_on_sclk->count; i++) {
1521 if (dep_on_sclk->entries[i].clk == gfx_clock)
1522 break;
1523 }
1524
1525 PP_ASSERT_WITH_CODE(dep_on_sclk->count > i,
1526 "Cannot find gfx_clk in SOC_VDD-GFX_CLK!",
1527 return -EINVAL);
1528 PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(hwmgr,
1529 COMPUTE_GPUCLK_INPUT_FLAG_GFXCLK,
1530 gfx_clock, &dividers),
1531 "Failed to get GFX Clock settings from VBIOS!",
1532 return -EINVAL);
1533
1534 /* Feedback Multiplier: bit 0:8 int, bit 15:12 post_div, bit 31:16 frac */
1535 current_gfxclk_level->FbMult =
1536 cpu_to_le32(dividers.ulPll_fb_mult);
1537 /* Spread FB Multiplier bit: bit 0:8 int, bit 31:16 frac */
1538 current_gfxclk_level->SsOn = dividers.ucPll_ss_enable;
1539 current_gfxclk_level->SsFbMult =
1540 cpu_to_le32(dividers.ulPll_ss_fbsmult);
1541 current_gfxclk_level->SsSlewFrac =
1542 cpu_to_le16(dividers.usPll_ss_slew_frac);
1543 current_gfxclk_level->Did = (uint8_t)(dividers.ulDid);
1544
1545 return 0;
1546}
1547
1548/**
1549 * @brief Populates single SMC SOCCLK structure using the provided clock.
1550 *
1551 * @param hwmgr - the address of the hardware manager.
1552 * @param soc_clock - the SOC clock to use to populate the structure.
1553 * @param current_socclk_level - location in PPTable for the SMC SOCCLK structure.
1554 * @return 0 on success..
1555 */
1556static int vega10_populate_single_soc_level(struct pp_hwmgr *hwmgr,
1557 uint32_t soc_clock, uint8_t *current_soc_did,
1558 uint8_t *current_vol_index)
1559{
1560 struct phm_ppt_v2_information *table_info =
1561 (struct phm_ppt_v2_information *)(hwmgr->pptable);
1562 struct phm_ppt_v1_clock_voltage_dependency_table *dep_on_soc =
1563 table_info->vdd_dep_on_socclk;
1564 struct pp_atomfwctrl_clock_dividers_soc15 dividers;
1565 uint32_t i;
1566
1567 PP_ASSERT_WITH_CODE(dep_on_soc,
1568 "Invalid SOC_VDD-SOC_CLK Dependency Table!",
1569 return -EINVAL);
1570 for (i = 0; i < dep_on_soc->count; i++) {
1571 if (dep_on_soc->entries[i].clk == soc_clock)
1572 break;
1573 }
1574 PP_ASSERT_WITH_CODE(dep_on_soc->count > i,
1575 "Cannot find SOC_CLK in SOC_VDD-SOC_CLK Dependency Table",
1576 return -EINVAL);
1577 PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(hwmgr,
1578 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
1579 soc_clock, &dividers),
1580 "Failed to get SOC Clock settings from VBIOS!",
1581 return -EINVAL);
1582
1583 *current_soc_did = (uint8_t)dividers.ulDid;
1584 *current_vol_index = (uint8_t)(dep_on_soc->entries[i].vddInd);
1585
1586 return 0;
1587}
1588
1589uint16_t vega10_locate_vddc_given_clock(struct pp_hwmgr *hwmgr,
1590 uint32_t clk,
1591 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table)
1592{
1593 uint16_t i;
1594
1595 for (i = 0; i < dep_table->count; i++) {
1596 if (dep_table->entries[i].clk == clk)
1597 return dep_table->entries[i].vddc;
1598 }
1599
1600 pr_info("[LocateVddcGivenClock] Cannot locate SOC Vddc for this clock!");
1601 return 0;
1602}
1603
1604/**
1605* Populates all SMC SCLK levels' structure based on the trimmed allowed dpm engine clock states
1606*
1607* @param hwmgr the address of the hardware manager
1608*/
1609static int vega10_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
1610{
1611 struct vega10_hwmgr *data =
1612 (struct vega10_hwmgr *)(hwmgr->backend);
1613 struct phm_ppt_v2_information *table_info =
1614 (struct phm_ppt_v2_information *)(hwmgr->pptable);
1615 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table =
1616 table_info->vdd_dep_on_socclk;
1617 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
1618 struct vega10_single_dpm_table *dpm_table = &(data->dpm_table.gfx_table);
1619 int result = 0;
1620 uint32_t i, j;
1621
1622 for (i = 0; i < dpm_table->count; i++) {
1623 result = vega10_populate_single_gfx_level(hwmgr,
1624 dpm_table->dpm_levels[i].value,
1625 &(pp_table->GfxclkLevel[i]));
1626 if (result)
1627 return result;
1628 }
1629
1630 j = i - 1;
1631 while (i < NUM_GFXCLK_DPM_LEVELS) {
1632 result = vega10_populate_single_gfx_level(hwmgr,
1633 dpm_table->dpm_levels[j].value,
1634 &(pp_table->GfxclkLevel[i]));
1635 if (result)
1636 return result;
1637 i++;
1638 }
1639
1640 pp_table->GfxclkSlewRate =
1641 cpu_to_le16(table_info->us_gfxclk_slew_rate);
1642
1643 dpm_table = &(data->dpm_table.soc_table);
1644 for (i = 0; i < dpm_table->count; i++) {
1645 pp_table->SocVid[i] =
1646 (uint8_t)convert_to_vid(
1647 vega10_locate_vddc_given_clock(hwmgr,
1648 dpm_table->dpm_levels[i].value,
1649 dep_table));
1650 result = vega10_populate_single_soc_level(hwmgr,
1651 dpm_table->dpm_levels[i].value,
1652 &(pp_table->SocclkDid[i]),
1653 &(pp_table->SocDpmVoltageIndex[i]));
1654 if (result)
1655 return result;
1656 }
1657
1658 j = i - 1;
1659 while (i < NUM_SOCCLK_DPM_LEVELS) {
1660 pp_table->SocVid[i] = pp_table->SocVid[j];
1661 result = vega10_populate_single_soc_level(hwmgr,
1662 dpm_table->dpm_levels[j].value,
1663 &(pp_table->SocclkDid[i]),
1664 &(pp_table->SocDpmVoltageIndex[i]));
1665 if (result)
1666 return result;
1667 i++;
1668 }
1669
1670 return result;
1671}
1672
1673/**
1674 * @brief Populates single SMC GFXCLK structure using the provided clock.
1675 *
1676 * @param hwmgr - the address of the hardware manager.
1677 * @param mem_clock - the memory clock to use to populate the structure.
1678 * @return 0 on success..
1679 */
1680static int vega10_populate_single_memory_level(struct pp_hwmgr *hwmgr,
1681 uint32_t mem_clock, uint8_t *current_mem_vid,
1682 PllSetting_t *current_memclk_level, uint8_t *current_mem_soc_vind)
1683{
1684 struct vega10_hwmgr *data =
1685 (struct vega10_hwmgr *)(hwmgr->backend);
1686 struct phm_ppt_v2_information *table_info =
1687 (struct phm_ppt_v2_information *)(hwmgr->pptable);
1688 struct phm_ppt_v1_clock_voltage_dependency_table *dep_on_mclk =
1689 table_info->vdd_dep_on_mclk;
1690 struct pp_atomfwctrl_clock_dividers_soc15 dividers;
1691 uint32_t i;
1692
1693 if (data->apply_overdrive_next_settings_mask &
1694 DPMTABLE_OD_UPDATE_VDDC)
1695 dep_on_mclk = (struct phm_ppt_v1_clock_voltage_dependency_table *)
1696 &data->odn_dpm_table.vdd_dependency_on_mclk;
1697
1698 PP_ASSERT_WITH_CODE(dep_on_mclk,
1699 "Invalid SOC_VDD-UCLK Dependency Table!",
1700 return -EINVAL);
1701
1702 for (i = 0; i < dep_on_mclk->count; i++) {
1703 if (dep_on_mclk->entries[i].clk == mem_clock)
1704 break;
1705 }
1706
1707 PP_ASSERT_WITH_CODE(dep_on_mclk->count > i,
1708 "Cannot find UCLK in SOC_VDD-UCLK Dependency Table!",
1709 return -EINVAL);
1710
1711 PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(
1712 hwmgr, COMPUTE_GPUCLK_INPUT_FLAG_UCLK, mem_clock, &dividers),
1713 "Failed to get UCLK settings from VBIOS!",
1714 return -1);
1715
1716 *current_mem_vid =
1717 (uint8_t)(convert_to_vid(dep_on_mclk->entries[i].mvdd));
1718 *current_mem_soc_vind =
1719 (uint8_t)(dep_on_mclk->entries[i].vddInd);
1720 current_memclk_level->FbMult = cpu_to_le32(dividers.ulPll_fb_mult);
1721 current_memclk_level->Did = (uint8_t)(dividers.ulDid);
1722
1723 PP_ASSERT_WITH_CODE(current_memclk_level->Did >= 1,
1724 "Invalid Divider ID!",
1725 return -EINVAL);
1726
1727 return 0;
1728}
1729
1730/**
1731 * @brief Populates all SMC MCLK levels' structure based on the trimmed allowed dpm memory clock states.
1732 *
1733 * @param pHwMgr - the address of the hardware manager.
1734 * @return PP_Result_OK on success.
1735 */
1736static int vega10_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
1737{
1738 struct vega10_hwmgr *data =
1739 (struct vega10_hwmgr *)(hwmgr->backend);
1740 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
1741 struct vega10_single_dpm_table *dpm_table =
1742 &(data->dpm_table.mem_table);
1743 int result = 0;
1744 uint32_t i, j, reg, mem_channels;
1745
1746 for (i = 0; i < dpm_table->count; i++) {
1747 result = vega10_populate_single_memory_level(hwmgr,
1748 dpm_table->dpm_levels[i].value,
1749 &(pp_table->MemVid[i]),
1750 &(pp_table->UclkLevel[i]),
1751 &(pp_table->MemSocVoltageIndex[i]));
1752 if (result)
1753 return result;
1754 }
1755
1756 j = i - 1;
1757 while (i < NUM_UCLK_DPM_LEVELS) {
1758 result = vega10_populate_single_memory_level(hwmgr,
1759 dpm_table->dpm_levels[j].value,
1760 &(pp_table->MemVid[i]),
1761 &(pp_table->UclkLevel[i]),
1762 &(pp_table->MemSocVoltageIndex[i]));
1763 if (result)
1764 return result;
1765 i++;
1766 }
1767
1768 reg = soc15_get_register_offset(DF_HWID, 0,
1769 mmDF_CS_AON0_DramBaseAddress0_BASE_IDX,
1770 mmDF_CS_AON0_DramBaseAddress0);
1771 mem_channels = (cgs_read_register(hwmgr->device, reg) &
1772 DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK) >>
1773 DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT;
1774 pp_table->NumMemoryChannels = cpu_to_le16(mem_channels);
1775 pp_table->MemoryChannelWidth =
1776 cpu_to_le16(HBM_MEMORY_CHANNEL_WIDTH *
1777 channel_number[mem_channels]);
1778
1779 pp_table->LowestUclkReservedForUlv =
1780 (uint8_t)(data->lowest_uclk_reserved_for_ulv);
1781
1782 return result;
1783}
1784
1785static int vega10_populate_single_display_type(struct pp_hwmgr *hwmgr,
1786 DSPCLK_e disp_clock)
1787{
1788 struct vega10_hwmgr *data =
1789 (struct vega10_hwmgr *)(hwmgr->backend);
1790 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
1791 struct phm_ppt_v2_information *table_info =
1792 (struct phm_ppt_v2_information *)
1793 (hwmgr->pptable);
1794 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table;
1795 uint32_t i;
1796 uint16_t clk = 0, vddc = 0;
1797 uint8_t vid = 0;
1798
1799 switch (disp_clock) {
1800 case DSPCLK_DCEFCLK:
1801 dep_table = table_info->vdd_dep_on_dcefclk;
1802 break;
1803 case DSPCLK_DISPCLK:
1804 dep_table = table_info->vdd_dep_on_dispclk;
1805 break;
1806 case DSPCLK_PIXCLK:
1807 dep_table = table_info->vdd_dep_on_pixclk;
1808 break;
1809 case DSPCLK_PHYCLK:
1810 dep_table = table_info->vdd_dep_on_phyclk;
1811 break;
1812 default:
1813 return -1;
1814 }
1815
1816 PP_ASSERT_WITH_CODE(dep_table->count <= NUM_DSPCLK_LEVELS,
1817 "Number Of Entries Exceeded maximum!",
1818 return -1);
1819
1820 for (i = 0; i < dep_table->count; i++) {
1821 clk = (uint16_t)(dep_table->entries[i].clk / 100);
1822 vddc = table_info->vddc_lookup_table->
1823 entries[dep_table->entries[i].vddInd].us_vdd;
1824 vid = (uint8_t)convert_to_vid(vddc);
1825 pp_table->DisplayClockTable[disp_clock][i].Freq =
1826 cpu_to_le16(clk);
1827 pp_table->DisplayClockTable[disp_clock][i].Vid =
1828 cpu_to_le16(vid);
1829 }
1830
1831 while (i < NUM_DSPCLK_LEVELS) {
1832 pp_table->DisplayClockTable[disp_clock][i].Freq =
1833 cpu_to_le16(clk);
1834 pp_table->DisplayClockTable[disp_clock][i].Vid =
1835 cpu_to_le16(vid);
1836 i++;
1837 }
1838
1839 return 0;
1840}
1841
1842static int vega10_populate_all_display_clock_levels(struct pp_hwmgr *hwmgr)
1843{
1844 uint32_t i;
1845
1846 for (i = 0; i < DSPCLK_COUNT; i++) {
1847 PP_ASSERT_WITH_CODE(!vega10_populate_single_display_type(hwmgr, i),
1848 "Failed to populate Clock in DisplayClockTable!",
1849 return -1);
1850 }
1851
1852 return 0;
1853}
1854
1855static int vega10_populate_single_eclock_level(struct pp_hwmgr *hwmgr,
1856 uint32_t eclock, uint8_t *current_eclk_did,
1857 uint8_t *current_soc_vol)
1858{
1859 struct phm_ppt_v2_information *table_info =
1860 (struct phm_ppt_v2_information *)(hwmgr->pptable);
1861 struct phm_ppt_v1_mm_clock_voltage_dependency_table *dep_table =
1862 table_info->mm_dep_table;
1863 struct pp_atomfwctrl_clock_dividers_soc15 dividers;
1864 uint32_t i;
1865
1866 PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(hwmgr,
1867 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
1868 eclock, &dividers),
1869 "Failed to get ECLK clock settings from VBIOS!",
1870 return -1);
1871
1872 *current_eclk_did = (uint8_t)dividers.ulDid;
1873
1874 for (i = 0; i < dep_table->count; i++) {
1875 if (dep_table->entries[i].eclk == eclock)
1876 *current_soc_vol = dep_table->entries[i].vddcInd;
1877 }
1878
1879 return 0;
1880}
1881
1882static int vega10_populate_smc_vce_levels(struct pp_hwmgr *hwmgr)
1883{
1884 struct vega10_hwmgr *data =
1885 (struct vega10_hwmgr *)(hwmgr->backend);
1886 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
1887 struct vega10_single_dpm_table *dpm_table = &(data->dpm_table.eclk_table);
1888 int result = -EINVAL;
1889 uint32_t i, j;
1890
1891 for (i = 0; i < dpm_table->count; i++) {
1892 result = vega10_populate_single_eclock_level(hwmgr,
1893 dpm_table->dpm_levels[i].value,
1894 &(pp_table->EclkDid[i]),
1895 &(pp_table->VceDpmVoltageIndex[i]));
1896 if (result)
1897 return result;
1898 }
1899
1900 j = i - 1;
1901 while (i < NUM_VCE_DPM_LEVELS) {
1902 result = vega10_populate_single_eclock_level(hwmgr,
1903 dpm_table->dpm_levels[j].value,
1904 &(pp_table->EclkDid[i]),
1905 &(pp_table->VceDpmVoltageIndex[i]));
1906 if (result)
1907 return result;
1908 i++;
1909 }
1910
1911 return result;
1912}
1913
1914static int vega10_populate_single_vclock_level(struct pp_hwmgr *hwmgr,
1915 uint32_t vclock, uint8_t *current_vclk_did)
1916{
1917 struct pp_atomfwctrl_clock_dividers_soc15 dividers;
1918
1919 PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(hwmgr,
1920 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
1921 vclock, &dividers),
1922 "Failed to get VCLK clock settings from VBIOS!",
1923 return -EINVAL);
1924
1925 *current_vclk_did = (uint8_t)dividers.ulDid;
1926
1927 return 0;
1928}
1929
1930static int vega10_populate_single_dclock_level(struct pp_hwmgr *hwmgr,
1931 uint32_t dclock, uint8_t *current_dclk_did)
1932{
1933 struct pp_atomfwctrl_clock_dividers_soc15 dividers;
1934
1935 PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(hwmgr,
1936 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
1937 dclock, &dividers),
1938 "Failed to get DCLK clock settings from VBIOS!",
1939 return -EINVAL);
1940
1941 *current_dclk_did = (uint8_t)dividers.ulDid;
1942
1943 return 0;
1944}
1945
1946static int vega10_populate_smc_uvd_levels(struct pp_hwmgr *hwmgr)
1947{
1948 struct vega10_hwmgr *data =
1949 (struct vega10_hwmgr *)(hwmgr->backend);
1950 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
1951 struct vega10_single_dpm_table *vclk_dpm_table =
1952 &(data->dpm_table.vclk_table);
1953 struct vega10_single_dpm_table *dclk_dpm_table =
1954 &(data->dpm_table.dclk_table);
1955 struct phm_ppt_v2_information *table_info =
1956 (struct phm_ppt_v2_information *)(hwmgr->pptable);
1957 struct phm_ppt_v1_mm_clock_voltage_dependency_table *dep_table =
1958 table_info->mm_dep_table;
1959 int result = -EINVAL;
1960 uint32_t i, j;
1961
1962 for (i = 0; i < vclk_dpm_table->count; i++) {
1963 result = vega10_populate_single_vclock_level(hwmgr,
1964 vclk_dpm_table->dpm_levels[i].value,
1965 &(pp_table->VclkDid[i]));
1966 if (result)
1967 return result;
1968 }
1969
1970 j = i - 1;
1971 while (i < NUM_UVD_DPM_LEVELS) {
1972 result = vega10_populate_single_vclock_level(hwmgr,
1973 vclk_dpm_table->dpm_levels[j].value,
1974 &(pp_table->VclkDid[i]));
1975 if (result)
1976 return result;
1977 i++;
1978 }
1979
1980 for (i = 0; i < dclk_dpm_table->count; i++) {
1981 result = vega10_populate_single_dclock_level(hwmgr,
1982 dclk_dpm_table->dpm_levels[i].value,
1983 &(pp_table->DclkDid[i]));
1984 if (result)
1985 return result;
1986 }
1987
1988 j = i - 1;
1989 while (i < NUM_UVD_DPM_LEVELS) {
1990 result = vega10_populate_single_dclock_level(hwmgr,
1991 dclk_dpm_table->dpm_levels[j].value,
1992 &(pp_table->DclkDid[i]));
1993 if (result)
1994 return result;
1995 i++;
1996 }
1997
1998 for (i = 0; i < dep_table->count; i++) {
1999 if (dep_table->entries[i].vclk ==
2000 vclk_dpm_table->dpm_levels[i].value &&
2001 dep_table->entries[i].dclk ==
2002 dclk_dpm_table->dpm_levels[i].value)
2003 pp_table->UvdDpmVoltageIndex[i] =
2004 dep_table->entries[i].vddcInd;
2005 else
2006 return -1;
2007 }
2008
2009 j = i - 1;
2010 while (i < NUM_UVD_DPM_LEVELS) {
2011 pp_table->UvdDpmVoltageIndex[i] = dep_table->entries[j].vddcInd;
2012 i++;
2013 }
2014
2015 return 0;
2016}
2017
2018static int vega10_populate_clock_stretcher_table(struct pp_hwmgr *hwmgr)
2019{
2020 struct vega10_hwmgr *data =
2021 (struct vega10_hwmgr *)(hwmgr->backend);
2022 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
2023 struct phm_ppt_v2_information *table_info =
2024 (struct phm_ppt_v2_information *)(hwmgr->pptable);
2025 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table =
2026 table_info->vdd_dep_on_sclk;
2027 uint32_t i;
2028
afc0255c 2029 for (i = 0; i < dep_table->count; i++) {
f83a9991 2030 pp_table->CksEnable[i] = dep_table->entries[i].cks_enable;
afc0255c
RZ
2031 pp_table->CksVidOffset[i] = (uint8_t)(dep_table->entries[i].cks_voffset
2032 * VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
f83a9991
EH
2033 }
2034
2035 return 0;
2036}
2037
2038static int vega10_populate_avfs_parameters(struct pp_hwmgr *hwmgr)
2039{
2040 struct vega10_hwmgr *data =
2041 (struct vega10_hwmgr *)(hwmgr->backend);
2042 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
2043 struct phm_ppt_v2_information *table_info =
2044 (struct phm_ppt_v2_information *)(hwmgr->pptable);
2045 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table =
2046 table_info->vdd_dep_on_sclk;
2047 struct pp_atomfwctrl_avfs_parameters avfs_params = {0};
2048 int result = 0;
2049 uint32_t i;
2050
2051 pp_table->MinVoltageVid = (uint8_t)0xff;
2052 pp_table->MaxVoltageVid = (uint8_t)0;
2053
2054 if (data->smu_features[GNLD_AVFS].supported) {
2055 result = pp_atomfwctrl_get_avfs_information(hwmgr, &avfs_params);
2056 if (!result) {
2057 pp_table->MinVoltageVid = (uint8_t)
f83a9991 2058 convert_to_vid((uint16_t)(avfs_params.ulMinVddc));
6524e494
RZ
2059 pp_table->MaxVoltageVid = (uint8_t)
2060 convert_to_vid((uint16_t)(avfs_params.ulMaxVddc));
2061
2062 pp_table->AConstant[0] = cpu_to_le32(avfs_params.ulMeanNsigmaAcontant0);
2063 pp_table->AConstant[1] = cpu_to_le32(avfs_params.ulMeanNsigmaAcontant1);
2064 pp_table->AConstant[2] = cpu_to_le32(avfs_params.ulMeanNsigmaAcontant2);
2065 pp_table->DC_tol_sigma = cpu_to_le16(avfs_params.usMeanNsigmaDcTolSigma);
2066 pp_table->Platform_mean = cpu_to_le16(avfs_params.usMeanNsigmaPlatformMean);
2067 pp_table->Platform_sigma = cpu_to_le16(avfs_params.usMeanNsigmaDcTolSigma);
2068 pp_table->PSM_Age_CompFactor = cpu_to_le16(avfs_params.usPsmAgeComfactor);
f83a9991
EH
2069
2070 pp_table->BtcGbVdroopTableCksOff.a0 =
2071 cpu_to_le32(avfs_params.ulGbVdroopTableCksoffA0);
6524e494 2072 pp_table->BtcGbVdroopTableCksOff.a0_shift = 20;
f83a9991
EH
2073 pp_table->BtcGbVdroopTableCksOff.a1 =
2074 cpu_to_le32(avfs_params.ulGbVdroopTableCksoffA1);
6524e494 2075 pp_table->BtcGbVdroopTableCksOff.a1_shift = 20;
f83a9991
EH
2076 pp_table->BtcGbVdroopTableCksOff.a2 =
2077 cpu_to_le32(avfs_params.ulGbVdroopTableCksoffA2);
6524e494
RZ
2078 pp_table->BtcGbVdroopTableCksOff.a2_shift = 20;
2079
2080 pp_table->OverrideBtcGbCksOn = avfs_params.ucEnableGbVdroopTableCkson;
2081 pp_table->BtcGbVdroopTableCksOn.a0 =
2082 cpu_to_le32(avfs_params.ulGbVdroopTableCksonA0);
2083 pp_table->BtcGbVdroopTableCksOn.a0_shift = 20;
2084 pp_table->BtcGbVdroopTableCksOn.a1 =
2085 cpu_to_le32(avfs_params.ulGbVdroopTableCksonA1);
2086 pp_table->BtcGbVdroopTableCksOn.a1_shift = 20;
2087 pp_table->BtcGbVdroopTableCksOn.a2 =
2088 cpu_to_le32(avfs_params.ulGbVdroopTableCksonA2);
2089 pp_table->BtcGbVdroopTableCksOn.a2_shift = 20;
f83a9991
EH
2090
2091 pp_table->AvfsGbCksOn.m1 =
2092 cpu_to_le32(avfs_params.ulGbFuseTableCksonM1);
2093 pp_table->AvfsGbCksOn.m2 =
6524e494 2094 cpu_to_le16(avfs_params.ulGbFuseTableCksonM2);
f83a9991
EH
2095 pp_table->AvfsGbCksOn.b =
2096 cpu_to_le32(avfs_params.ulGbFuseTableCksonB);
2097 pp_table->AvfsGbCksOn.m1_shift = 24;
2098 pp_table->AvfsGbCksOn.m2_shift = 12;
6524e494 2099 pp_table->AvfsGbCksOn.b_shift = 0;
f83a9991 2100
6524e494
RZ
2101 pp_table->OverrideAvfsGbCksOn =
2102 avfs_params.ucEnableGbFuseTableCkson;
f83a9991
EH
2103 pp_table->AvfsGbCksOff.m1 =
2104 cpu_to_le32(avfs_params.ulGbFuseTableCksoffM1);
2105 pp_table->AvfsGbCksOff.m2 =
6524e494 2106 cpu_to_le16(avfs_params.ulGbFuseTableCksoffM2);
f83a9991
EH
2107 pp_table->AvfsGbCksOff.b =
2108 cpu_to_le32(avfs_params.ulGbFuseTableCksoffB);
2109 pp_table->AvfsGbCksOff.m1_shift = 24;
2110 pp_table->AvfsGbCksOff.m2_shift = 12;
6524e494
RZ
2111 pp_table->AvfsGbCksOff.b_shift = 0;
2112
2113 for (i = 0; i < dep_table->count; i++) {
2114 if (dep_table->entries[i].sclk_offset == 0)
2115 pp_table->StaticVoltageOffsetVid[i] = 248;
2116 else
2117 pp_table->StaticVoltageOffsetVid[i] =
2118 (uint8_t)(dep_table->entries[i].sclk_offset *
f83a9991
EH
2119 VOLTAGE_VID_OFFSET_SCALE2 /
2120 VOLTAGE_VID_OFFSET_SCALE1);
6524e494 2121 }
f83a9991
EH
2122
2123 if ((PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2124 data->disp_clk_quad_eqn_a) &&
2125 (PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2126 data->disp_clk_quad_eqn_b)) {
2127 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m1 =
2128 (int32_t)data->disp_clk_quad_eqn_a;
2129 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m2 =
6524e494 2130 (int32_t)data->disp_clk_quad_eqn_b;
f83a9991
EH
2131 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].b =
2132 (int32_t)data->disp_clk_quad_eqn_c;
2133 } else {
2134 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m1 =
2135 (int32_t)avfs_params.ulDispclk2GfxclkM1;
2136 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m2 =
6524e494 2137 (int32_t)avfs_params.ulDispclk2GfxclkM2;
f83a9991
EH
2138 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].b =
2139 (int32_t)avfs_params.ulDispclk2GfxclkB;
2140 }
2141
2142 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m1_shift = 24;
2143 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m2_shift = 12;
4bae05e1 2144 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].b_shift = 12;
f83a9991
EH
2145
2146 if ((PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2147 data->dcef_clk_quad_eqn_a) &&
2148 (PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2149 data->dcef_clk_quad_eqn_b)) {
2150 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m1 =
2151 (int32_t)data->dcef_clk_quad_eqn_a;
2152 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m2 =
6524e494 2153 (int32_t)data->dcef_clk_quad_eqn_b;
f83a9991
EH
2154 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].b =
2155 (int32_t)data->dcef_clk_quad_eqn_c;
2156 } else {
2157 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m1 =
2158 (int32_t)avfs_params.ulDcefclk2GfxclkM1;
2159 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m2 =
6524e494 2160 (int32_t)avfs_params.ulDcefclk2GfxclkM2;
f83a9991
EH
2161 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].b =
2162 (int32_t)avfs_params.ulDcefclk2GfxclkB;
2163 }
2164
2165 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m1_shift = 24;
2166 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m2_shift = 12;
4bae05e1 2167 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].b_shift = 12;
f83a9991
EH
2168
2169 if ((PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2170 data->pixel_clk_quad_eqn_a) &&
2171 (PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2172 data->pixel_clk_quad_eqn_b)) {
2173 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m1 =
2174 (int32_t)data->pixel_clk_quad_eqn_a;
2175 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m2 =
6524e494 2176 (int32_t)data->pixel_clk_quad_eqn_b;
f83a9991
EH
2177 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].b =
2178 (int32_t)data->pixel_clk_quad_eqn_c;
2179 } else {
2180 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m1 =
2181 (int32_t)avfs_params.ulPixelclk2GfxclkM1;
2182 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m2 =
6524e494 2183 (int32_t)avfs_params.ulPixelclk2GfxclkM2;
f83a9991
EH
2184 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].b =
2185 (int32_t)avfs_params.ulPixelclk2GfxclkB;
2186 }
2187
2188 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m1_shift = 24;
2189 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m2_shift = 12;
4bae05e1 2190 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].b_shift = 12;
f83a9991
EH
2191 if ((PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2192 data->phy_clk_quad_eqn_a) &&
2193 (PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2194 data->phy_clk_quad_eqn_b)) {
2195 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m1 =
2196 (int32_t)data->phy_clk_quad_eqn_a;
2197 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m2 =
6524e494 2198 (int32_t)data->phy_clk_quad_eqn_b;
f83a9991
EH
2199 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].b =
2200 (int32_t)data->phy_clk_quad_eqn_c;
2201 } else {
2202 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m1 =
2203 (int32_t)avfs_params.ulPhyclk2GfxclkM1;
2204 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m2 =
6524e494 2205 (int32_t)avfs_params.ulPhyclk2GfxclkM2;
f83a9991
EH
2206 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].b =
2207 (int32_t)avfs_params.ulPhyclk2GfxclkB;
2208 }
2209
2210 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m1_shift = 24;
2211 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m2_shift = 12;
4bae05e1 2212 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].b_shift = 12;
f83a9991
EH
2213 } else {
2214 data->smu_features[GNLD_AVFS].supported = false;
2215 }
2216 }
2217
2218 return 0;
2219}
2220
2221static int vega10_populate_gpio_parameters(struct pp_hwmgr *hwmgr)
2222{
2223 struct vega10_hwmgr *data =
2224 (struct vega10_hwmgr *)(hwmgr->backend);
2225 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
2226 struct pp_atomfwctrl_gpio_parameters gpio_params = {0};
2227 int result;
2228
2229 result = pp_atomfwctrl_get_gpio_information(hwmgr, &gpio_params);
2230 if (!result) {
2231 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2232 PHM_PlatformCaps_RegulatorHot) &&
2233 (data->registry_data.regulator_hot_gpio_support)) {
2234 pp_table->VR0HotGpio = gpio_params.ucVR0HotGpio;
2235 pp_table->VR0HotPolarity = gpio_params.ucVR0HotPolarity;
2236 pp_table->VR1HotGpio = gpio_params.ucVR1HotGpio;
2237 pp_table->VR1HotPolarity = gpio_params.ucVR1HotPolarity;
2238 } else {
2239 pp_table->VR0HotGpio = 0;
2240 pp_table->VR0HotPolarity = 0;
2241 pp_table->VR1HotGpio = 0;
2242 pp_table->VR1HotPolarity = 0;
2243 }
2244
2245 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2246 PHM_PlatformCaps_AutomaticDCTransition) &&
2247 (data->registry_data.ac_dc_switch_gpio_support)) {
2248 pp_table->AcDcGpio = gpio_params.ucAcDcGpio;
2249 pp_table->AcDcPolarity = gpio_params.ucAcDcPolarity;
2250 } else {
2251 pp_table->AcDcGpio = 0;
2252 pp_table->AcDcPolarity = 0;
2253 }
2254 }
2255
2256 return result;
2257}
2258
2259static int vega10_avfs_enable(struct pp_hwmgr *hwmgr, bool enable)
2260{
2261 struct vega10_hwmgr *data =
2262 (struct vega10_hwmgr *)(hwmgr->backend);
2263
2264 if (data->smu_features[GNLD_AVFS].supported) {
2265 if (enable) {
2266 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
2267 true,
2268 data->smu_features[GNLD_AVFS].smu_feature_bitmap),
2269 "[avfs_control] Attempt to Enable AVFS feature Failed!",
2270 return -1);
2271 data->smu_features[GNLD_AVFS].enabled = true;
2272 } else {
2273 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
2274 false,
2275 data->smu_features[GNLD_AVFS].smu_feature_id),
2276 "[avfs_control] Attempt to Disable AVFS feature Failed!",
2277 return -1);
2278 data->smu_features[GNLD_AVFS].enabled = false;
2279 }
2280 }
2281
2282 return 0;
2283}
2284
2285/**
2286* Initializes the SMC table and uploads it
2287*
2288* @param hwmgr the address of the powerplay hardware manager.
2289* @param pInput the pointer to input data (PowerState)
2290* @return always 0
2291*/
2292static int vega10_init_smc_table(struct pp_hwmgr *hwmgr)
2293{
2294 int result;
2295 struct vega10_hwmgr *data =
2296 (struct vega10_hwmgr *)(hwmgr->backend);
2297 struct phm_ppt_v2_information *table_info =
2298 (struct phm_ppt_v2_information *)(hwmgr->pptable);
2299 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
2300 struct pp_atomfwctrl_voltage_table voltage_table;
05ee3215 2301 struct pp_atomfwctrl_bios_boot_up_values boot_up_values;
f83a9991
EH
2302
2303 result = vega10_setup_default_dpm_tables(hwmgr);
2304 PP_ASSERT_WITH_CODE(!result,
2305 "Failed to setup default DPM tables!",
2306 return result);
2307
2308 pp_atomfwctrl_get_voltage_table_v4(hwmgr, VOLTAGE_TYPE_VDDC,
2309 VOLTAGE_OBJ_SVID2, &voltage_table);
2310 pp_table->MaxVidStep = voltage_table.max_vid_step;
2311
2312 pp_table->GfxDpmVoltageMode =
2313 (uint8_t)(table_info->uc_gfx_dpm_voltage_mode);
2314 pp_table->SocDpmVoltageMode =
2315 (uint8_t)(table_info->uc_soc_dpm_voltage_mode);
2316 pp_table->UclkDpmVoltageMode =
2317 (uint8_t)(table_info->uc_uclk_dpm_voltage_mode);
2318 pp_table->UvdDpmVoltageMode =
2319 (uint8_t)(table_info->uc_uvd_dpm_voltage_mode);
2320 pp_table->VceDpmVoltageMode =
2321 (uint8_t)(table_info->uc_vce_dpm_voltage_mode);
2322 pp_table->Mp0DpmVoltageMode =
2323 (uint8_t)(table_info->uc_mp0_dpm_voltage_mode);
effa290c 2324
f83a9991
EH
2325 pp_table->DisplayDpmVoltageMode =
2326 (uint8_t)(table_info->uc_dcef_dpm_voltage_mode);
2327
2328 if (data->registry_data.ulv_support &&
2329 table_info->us_ulv_voltage_offset) {
2330 result = vega10_populate_ulv_state(hwmgr);
2331 PP_ASSERT_WITH_CODE(!result,
2332 "Failed to initialize ULV state!",
2333 return result);
2334 }
2335
2336 result = vega10_populate_smc_link_levels(hwmgr);
2337 PP_ASSERT_WITH_CODE(!result,
2338 "Failed to initialize Link Level!",
2339 return result);
2340
2341 result = vega10_populate_all_graphic_levels(hwmgr);
2342 PP_ASSERT_WITH_CODE(!result,
2343 "Failed to initialize Graphics Level!",
2344 return result);
2345
2346 result = vega10_populate_all_memory_levels(hwmgr);
2347 PP_ASSERT_WITH_CODE(!result,
2348 "Failed to initialize Memory Level!",
2349 return result);
2350
2351 result = vega10_populate_all_display_clock_levels(hwmgr);
2352 PP_ASSERT_WITH_CODE(!result,
2353 "Failed to initialize Display Level!",
2354 return result);
2355
2356 result = vega10_populate_smc_vce_levels(hwmgr);
2357 PP_ASSERT_WITH_CODE(!result,
2358 "Failed to initialize VCE Level!",
2359 return result);
2360
2361 result = vega10_populate_smc_uvd_levels(hwmgr);
2362 PP_ASSERT_WITH_CODE(!result,
2363 "Failed to initialize UVD Level!",
2364 return result);
2365
afc0255c 2366 if (data->registry_data.clock_stretcher_support) {
f83a9991
EH
2367 result = vega10_populate_clock_stretcher_table(hwmgr);
2368 PP_ASSERT_WITH_CODE(!result,
2369 "Failed to populate Clock Stretcher Table!",
2370 return result);
2371 }
2372
05ee3215
RZ
2373 result = pp_atomfwctrl_get_vbios_bootup_values(hwmgr, &boot_up_values);
2374 if (!result) {
2375 data->vbios_boot_state.vddc = boot_up_values.usVddc;
2376 data->vbios_boot_state.vddci = boot_up_values.usVddci;
2377 data->vbios_boot_state.mvddc = boot_up_values.usMvddc;
2378 data->vbios_boot_state.gfx_clock = boot_up_values.ulGfxClk;
2379 data->vbios_boot_state.mem_clock = boot_up_values.ulUClk;
2380 data->vbios_boot_state.soc_clock = boot_up_values.ulSocClk;
2381 if (0 != boot_up_values.usVddc) {
2382 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
2383 PPSMC_MSG_SetFloorSocVoltage,
2384 (boot_up_values.usVddc * 4));
2385 data->vbios_boot_state.bsoc_vddc_lock = true;
2386 } else {
2387 data->vbios_boot_state.bsoc_vddc_lock = false;
2388 }
2389 }
2390
f83a9991
EH
2391 result = vega10_populate_avfs_parameters(hwmgr);
2392 PP_ASSERT_WITH_CODE(!result,
2393 "Failed to initialize AVFS Parameters!",
2394 return result);
2395
2396 result = vega10_populate_gpio_parameters(hwmgr);
2397 PP_ASSERT_WITH_CODE(!result,
2398 "Failed to initialize GPIO Parameters!",
2399 return result);
2400
2401 pp_table->GfxclkAverageAlpha = (uint8_t)
2402 (data->gfxclk_average_alpha);
2403 pp_table->SocclkAverageAlpha = (uint8_t)
2404 (data->socclk_average_alpha);
2405 pp_table->UclkAverageAlpha = (uint8_t)
2406 (data->uclk_average_alpha);
2407 pp_table->GfxActivityAverageAlpha = (uint8_t)
2408 (data->gfx_activity_average_alpha);
2409
2410 result = vega10_copy_table_to_smc(hwmgr->smumgr,
2411 (uint8_t *)pp_table, PPTABLE);
2412 PP_ASSERT_WITH_CODE(!result,
2413 "Failed to upload PPtable!", return result);
2414
2211a787
RZ
2415 result = vega10_avfs_enable(hwmgr, true);
2416 PP_ASSERT_WITH_CODE(!result, "Attempt to enable AVFS feature Failed!",
f83a9991 2417 return result);
f83a9991
EH
2418
2419 return 0;
2420}
2421
2422static int vega10_enable_thermal_protection(struct pp_hwmgr *hwmgr)
2423{
2424 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
2425
2426 if (data->smu_features[GNLD_THERMAL].supported) {
2427 if (data->smu_features[GNLD_THERMAL].enabled)
2428 pr_info("THERMAL Feature Already enabled!");
2429
2430 PP_ASSERT_WITH_CODE(
2431 !vega10_enable_smc_features(hwmgr->smumgr,
2432 true,
2433 data->smu_features[GNLD_THERMAL].smu_feature_bitmap),
2434 "Enable THERMAL Feature Failed!",
2435 return -1);
2436 data->smu_features[GNLD_THERMAL].enabled = true;
2437 }
2438
2439 return 0;
2440}
2441
8b9242ed
RZ
2442static int vega10_disable_thermal_protection(struct pp_hwmgr *hwmgr)
2443{
2444 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
2445
2446 if (data->smu_features[GNLD_THERMAL].supported) {
2447 if (!data->smu_features[GNLD_THERMAL].enabled)
2448 pr_info("THERMAL Feature Already disabled!");
2449
2450 PP_ASSERT_WITH_CODE(
2451 !vega10_enable_smc_features(hwmgr->smumgr,
2452 false,
2453 data->smu_features[GNLD_THERMAL].smu_feature_bitmap),
2454 "disable THERMAL Feature Failed!",
2455 return -1);
2456 data->smu_features[GNLD_THERMAL].enabled = false;
2457 }
2458
2459 return 0;
2460}
2461
f83a9991
EH
2462static int vega10_enable_vrhot_feature(struct pp_hwmgr *hwmgr)
2463{
2464 struct vega10_hwmgr *data =
2465 (struct vega10_hwmgr *)(hwmgr->backend);
2466
2467 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2468 PHM_PlatformCaps_RegulatorHot)) {
2469 if (data->smu_features[GNLD_VR0HOT].supported) {
2470 PP_ASSERT_WITH_CODE(
2471 !vega10_enable_smc_features(hwmgr->smumgr,
2472 true,
2473 data->smu_features[GNLD_VR0HOT].smu_feature_bitmap),
2474 "Attempt to Enable VR0 Hot feature Failed!",
2475 return -1);
2476 data->smu_features[GNLD_VR0HOT].enabled = true;
2477 } else {
2478 if (data->smu_features[GNLD_VR1HOT].supported) {
2479 PP_ASSERT_WITH_CODE(
2480 !vega10_enable_smc_features(hwmgr->smumgr,
2481 true,
2482 data->smu_features[GNLD_VR1HOT].smu_feature_bitmap),
2483 "Attempt to Enable VR0 Hot feature Failed!",
2484 return -1);
2485 data->smu_features[GNLD_VR1HOT].enabled = true;
2486 }
2487 }
2488 }
2489 return 0;
2490}
2491
2492static int vega10_enable_ulv(struct pp_hwmgr *hwmgr)
2493{
2494 struct vega10_hwmgr *data =
2495 (struct vega10_hwmgr *)(hwmgr->backend);
2496
2497 if (data->registry_data.ulv_support) {
2498 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
2499 true, data->smu_features[GNLD_ULV].smu_feature_bitmap),
2500 "Enable ULV Feature Failed!",
2501 return -1);
2502 data->smu_features[GNLD_ULV].enabled = true;
2503 }
2504
2505 return 0;
2506}
2507
2508static int vega10_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
2509{
2510 struct vega10_hwmgr *data =
2511 (struct vega10_hwmgr *)(hwmgr->backend);
2512
2513 if (data->smu_features[GNLD_DS_GFXCLK].supported) {
2514 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
2515 true, data->smu_features[GNLD_DS_GFXCLK].smu_feature_bitmap),
2516 "Attempt to Enable DS_GFXCLK Feature Failed!",
2517 return -1);
2518 data->smu_features[GNLD_DS_GFXCLK].enabled = true;
2519 }
2520
2521 if (data->smu_features[GNLD_DS_SOCCLK].supported) {
2522 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
2523 true, data->smu_features[GNLD_DS_SOCCLK].smu_feature_bitmap),
2524 "Attempt to Enable DS_GFXCLK Feature Failed!",
2525 return -1);
2526 data->smu_features[GNLD_DS_SOCCLK].enabled = true;
2527 }
2528
2529 if (data->smu_features[GNLD_DS_LCLK].supported) {
2530 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
2531 true, data->smu_features[GNLD_DS_LCLK].smu_feature_bitmap),
2532 "Attempt to Enable DS_GFXCLK Feature Failed!",
2533 return -1);
2534 data->smu_features[GNLD_DS_LCLK].enabled = true;
2535 }
2536
2537 return 0;
2538}
2539
8b9242ed
RZ
2540static int vega10_stop_dpm(struct pp_hwmgr *hwmgr, uint32_t bitmap)
2541{
2542 struct vega10_hwmgr *data =
2543 (struct vega10_hwmgr *)(hwmgr->backend);
2544 uint32_t i, feature_mask = 0;
2545
2546
2547 if(data->smu_features[GNLD_LED_DISPLAY].supported == true){
2548 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
2549 true, data->smu_features[GNLD_LED_DISPLAY].smu_feature_bitmap),
2550 "Attempt to Enable LED DPM feature Failed!", return -EINVAL);
2551 data->smu_features[GNLD_LED_DISPLAY].enabled = true;
2552 }
2553
2554 for (i = 0; i < GNLD_DPM_MAX; i++) {
2555 if (data->smu_features[i].smu_feature_bitmap & bitmap) {
2556 if (data->smu_features[i].supported) {
2557 if (data->smu_features[i].enabled) {
2558 feature_mask |= data->smu_features[i].
2559 smu_feature_bitmap;
2560 data->smu_features[i].enabled = false;
2561 }
2562 }
2563 }
2564 }
2565
2566 vega10_enable_smc_features(hwmgr->smumgr, false, feature_mask);
2567
2568 return 0;
2569}
2570
f83a9991
EH
2571/**
2572 * @brief Tell SMC to enabled the supported DPMs.
2573 *
2574 * @param hwmgr - the address of the powerplay hardware manager.
2575 * @Param bitmap - bitmap for the features to enabled.
2576 * @return 0 on at least one DPM is successfully enabled.
2577 */
2578static int vega10_start_dpm(struct pp_hwmgr *hwmgr, uint32_t bitmap)
2579{
2580 struct vega10_hwmgr *data =
2581 (struct vega10_hwmgr *)(hwmgr->backend);
2582 uint32_t i, feature_mask = 0;
2583
2584 for (i = 0; i < GNLD_DPM_MAX; i++) {
2585 if (data->smu_features[i].smu_feature_bitmap & bitmap) {
2586 if (data->smu_features[i].supported) {
2587 if (!data->smu_features[i].enabled) {
2588 feature_mask |= data->smu_features[i].
2589 smu_feature_bitmap;
2590 data->smu_features[i].enabled = true;
2591 }
2592 }
2593 }
2594 }
2595
2596 if (vega10_enable_smc_features(hwmgr->smumgr,
2597 true, feature_mask)) {
2598 for (i = 0; i < GNLD_DPM_MAX; i++) {
2599 if (data->smu_features[i].smu_feature_bitmap &
2600 feature_mask)
2601 data->smu_features[i].enabled = false;
2602 }
2603 }
2604
2605 if(data->smu_features[GNLD_LED_DISPLAY].supported == true){
2606 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
2607 true, data->smu_features[GNLD_LED_DISPLAY].smu_feature_bitmap),
2608 "Attempt to Enable LED DPM feature Failed!", return -EINVAL);
2609 data->smu_features[GNLD_LED_DISPLAY].enabled = true;
2610 }
2611
05ee3215
RZ
2612 if (data->vbios_boot_state.bsoc_vddc_lock) {
2613 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
2614 PPSMC_MSG_SetFloorSocVoltage, 0);
2615 data->vbios_boot_state.bsoc_vddc_lock = false;
2616 }
2617
f83a9991
EH
2618 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2619 PHM_PlatformCaps_Falcon_QuickTransition)) {
2620 if (data->smu_features[GNLD_ACDC].supported) {
2621 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
2622 true, data->smu_features[GNLD_ACDC].smu_feature_bitmap),
2623 "Attempt to Enable DS_GFXCLK Feature Failed!",
2624 return -1);
2625 data->smu_features[GNLD_ACDC].enabled = true;
2626 }
2627 }
2628
2629 return 0;
2630}
2631
2632static int vega10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
2633{
2634 struct vega10_hwmgr *data =
2635 (struct vega10_hwmgr *)(hwmgr->backend);
2636 int tmp_result, result = 0;
2637
2638 tmp_result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
2639 PPSMC_MSG_ConfigureTelemetry, data->config_telemetry);
2640 PP_ASSERT_WITH_CODE(!tmp_result,
2641 "Failed to configure telemetry!",
2642 return tmp_result);
2643
f83a9991
EH
2644 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
2645 PPSMC_MSG_NumOfDisplays, 0);
2646
2647 tmp_result = (!vega10_is_dpm_running(hwmgr)) ? 0 : -1;
2648 PP_ASSERT_WITH_CODE(!tmp_result,
2649 "DPM is already running right , skipping re-enablement!",
2650 return 0);
2651
2652 tmp_result = vega10_construct_voltage_tables(hwmgr);
2653 PP_ASSERT_WITH_CODE(!tmp_result,
2654 "Failed to contruct voltage tables!",
2655 result = tmp_result);
2656
2657 tmp_result = vega10_init_smc_table(hwmgr);
2658 PP_ASSERT_WITH_CODE(!tmp_result,
2659 "Failed to initialize SMC table!",
2660 result = tmp_result);
2661
2662 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2663 PHM_PlatformCaps_ThermalController)) {
2664 tmp_result = vega10_enable_thermal_protection(hwmgr);
2665 PP_ASSERT_WITH_CODE(!tmp_result,
2666 "Failed to enable thermal protection!",
2667 result = tmp_result);
2668 }
2669
2670 tmp_result = vega10_enable_vrhot_feature(hwmgr);
2671 PP_ASSERT_WITH_CODE(!tmp_result,
2672 "Failed to enable VR hot feature!",
2673 result = tmp_result);
2674
2675 tmp_result = vega10_enable_ulv(hwmgr);
2676 PP_ASSERT_WITH_CODE(!tmp_result,
2677 "Failed to enable ULV!",
2678 result = tmp_result);
2679
2680 tmp_result = vega10_enable_deep_sleep_master_switch(hwmgr);
2681 PP_ASSERT_WITH_CODE(!tmp_result,
2682 "Failed to enable deep sleep master switch!",
2683 result = tmp_result);
2684
2685 tmp_result = vega10_start_dpm(hwmgr, SMC_DPM_FEATURES);
2686 PP_ASSERT_WITH_CODE(!tmp_result,
2687 "Failed to start DPM!", result = tmp_result);
2688
2689 tmp_result = vega10_enable_power_containment(hwmgr);
2690 PP_ASSERT_WITH_CODE(!tmp_result,
2691 "Failed to enable power containment!",
2692 result = tmp_result);
2693
2694 tmp_result = vega10_power_control_set_level(hwmgr);
2695 PP_ASSERT_WITH_CODE(!tmp_result,
2696 "Failed to power control set level!",
2697 result = tmp_result);
2698
2699 return result;
2700}
2701
2702static int vega10_get_power_state_size(struct pp_hwmgr *hwmgr)
2703{
2704 return sizeof(struct vega10_power_state);
2705}
2706
2707static int vega10_get_pp_table_entry_callback_func(struct pp_hwmgr *hwmgr,
2708 void *state, struct pp_power_state *power_state,
2709 void *pp_table, uint32_t classification_flag)
2710{
2711 struct vega10_power_state *vega10_power_state =
2712 cast_phw_vega10_power_state(&(power_state->hardware));
2713 struct vega10_performance_level *performance_level;
2714 ATOM_Vega10_State *state_entry = (ATOM_Vega10_State *)state;
2715 ATOM_Vega10_POWERPLAYTABLE *powerplay_table =
2716 (ATOM_Vega10_POWERPLAYTABLE *)pp_table;
2717 ATOM_Vega10_SOCCLK_Dependency_Table *socclk_dep_table =
2718 (ATOM_Vega10_SOCCLK_Dependency_Table *)
2719 (((unsigned long)powerplay_table) +
2720 le16_to_cpu(powerplay_table->usSocclkDependencyTableOffset));
2721 ATOM_Vega10_GFXCLK_Dependency_Table *gfxclk_dep_table =
2722 (ATOM_Vega10_GFXCLK_Dependency_Table *)
2723 (((unsigned long)powerplay_table) +
2724 le16_to_cpu(powerplay_table->usGfxclkDependencyTableOffset));
2725 ATOM_Vega10_MCLK_Dependency_Table *mclk_dep_table =
2726 (ATOM_Vega10_MCLK_Dependency_Table *)
2727 (((unsigned long)powerplay_table) +
2728 le16_to_cpu(powerplay_table->usMclkDependencyTableOffset));
2729
2730
2731 /* The following fields are not initialized here:
2732 * id orderedList allStatesList
2733 */
2734 power_state->classification.ui_label =
2735 (le16_to_cpu(state_entry->usClassification) &
2736 ATOM_PPLIB_CLASSIFICATION_UI_MASK) >>
2737 ATOM_PPLIB_CLASSIFICATION_UI_SHIFT;
2738 power_state->classification.flags = classification_flag;
2739 /* NOTE: There is a classification2 flag in BIOS
2740 * that is not being used right now
2741 */
2742 power_state->classification.temporary_state = false;
2743 power_state->classification.to_be_deleted = false;
2744
2745 power_state->validation.disallowOnDC =
2746 ((le32_to_cpu(state_entry->ulCapsAndSettings) &
2747 ATOM_Vega10_DISALLOW_ON_DC) != 0);
2748
2749 power_state->display.disableFrameModulation = false;
2750 power_state->display.limitRefreshrate = false;
2751 power_state->display.enableVariBright =
2752 ((le32_to_cpu(state_entry->ulCapsAndSettings) &
2753 ATOM_Vega10_ENABLE_VARIBRIGHT) != 0);
2754
2755 power_state->validation.supportedPowerLevels = 0;
2756 power_state->uvd_clocks.VCLK = 0;
2757 power_state->uvd_clocks.DCLK = 0;
2758 power_state->temperatures.min = 0;
2759 power_state->temperatures.max = 0;
2760
2761 performance_level = &(vega10_power_state->performance_levels
2762 [vega10_power_state->performance_level_count++]);
2763
2764 PP_ASSERT_WITH_CODE(
2765 (vega10_power_state->performance_level_count <
2766 NUM_GFXCLK_DPM_LEVELS),
2767 "Performance levels exceeds SMC limit!",
2768 return -1);
2769
2770 PP_ASSERT_WITH_CODE(
2771 (vega10_power_state->performance_level_count <=
2772 hwmgr->platform_descriptor.
2773 hardwareActivityPerformanceLevels),
2774 "Performance levels exceeds Driver limit!",
2775 return -1);
2776
2777 /* Performance levels are arranged from low to high. */
2778 performance_level->soc_clock = socclk_dep_table->entries
2779 [state_entry->ucSocClockIndexLow].ulClk;
2780 performance_level->gfx_clock = gfxclk_dep_table->entries
2781 [state_entry->ucGfxClockIndexLow].ulClk;
2782 performance_level->mem_clock = mclk_dep_table->entries
2783 [state_entry->ucMemClockIndexLow].ulMemClk;
2784
2785 performance_level = &(vega10_power_state->performance_levels
2786 [vega10_power_state->performance_level_count++]);
2787
2788 performance_level->soc_clock = socclk_dep_table->entries
2789 [state_entry->ucSocClockIndexHigh].ulClk;
2790 performance_level->gfx_clock = gfxclk_dep_table->entries
2791 [state_entry->ucGfxClockIndexHigh].ulClk;
2792 performance_level->mem_clock = mclk_dep_table->entries
2793 [state_entry->ucMemClockIndexHigh].ulMemClk;
2794 return 0;
2795}
2796
2797static int vega10_get_pp_table_entry(struct pp_hwmgr *hwmgr,
2798 unsigned long entry_index, struct pp_power_state *state)
2799{
2800 int result;
2801 struct vega10_power_state *ps;
2802
2803 state->hardware.magic = PhwVega10_Magic;
2804
2805 ps = cast_phw_vega10_power_state(&state->hardware);
2806
2807 result = vega10_get_powerplay_table_entry(hwmgr, entry_index, state,
2808 vega10_get_pp_table_entry_callback_func);
2809
2810 /*
2811 * This is the earliest time we have all the dependency table
2812 * and the VBIOS boot state
2813 */
2814 /* set DC compatible flag if this state supports DC */
2815 if (!state->validation.disallowOnDC)
2816 ps->dc_compatible = true;
2817
2818 ps->uvd_clks.vclk = state->uvd_clocks.VCLK;
2819 ps->uvd_clks.dclk = state->uvd_clocks.DCLK;
2820
2821 return 0;
2822}
2823
2824static int vega10_patch_boot_state(struct pp_hwmgr *hwmgr,
2825 struct pp_hw_power_state *hw_ps)
2826{
2827 return 0;
2828}
2829
2830static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
2831 struct pp_power_state *request_ps,
2832 const struct pp_power_state *current_ps)
2833{
2834 struct vega10_power_state *vega10_ps =
2835 cast_phw_vega10_power_state(&request_ps->hardware);
2836 uint32_t sclk;
2837 uint32_t mclk;
2838 struct PP_Clocks minimum_clocks = {0};
2839 bool disable_mclk_switching;
2840 bool disable_mclk_switching_for_frame_lock;
2841 bool disable_mclk_switching_for_vr;
2842 bool force_mclk_high;
2843 struct cgs_display_info info = {0};
2844 const struct phm_clock_and_voltage_limits *max_limits;
2845 uint32_t i;
2846 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
2847 struct phm_ppt_v2_information *table_info =
2848 (struct phm_ppt_v2_information *)(hwmgr->pptable);
2849 int32_t count;
2850 uint32_t stable_pstate_sclk_dpm_percentage;
2851 uint32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0;
2852 uint32_t latency;
2853
2854 data->battery_state = (PP_StateUILabel_Battery ==
2855 request_ps->classification.ui_label);
2856
2857 if (vega10_ps->performance_level_count != 2)
2858 pr_info("VI should always have 2 performance levels");
2859
2860 max_limits = (PP_PowerSource_AC == hwmgr->power_source) ?
2861 &(hwmgr->dyn_state.max_clock_voltage_on_ac) :
2862 &(hwmgr->dyn_state.max_clock_voltage_on_dc);
2863
2864 /* Cap clock DPM tables at DC MAX if it is in DC. */
2865 if (PP_PowerSource_DC == hwmgr->power_source) {
2866 for (i = 0; i < vega10_ps->performance_level_count; i++) {
2867 if (vega10_ps->performance_levels[i].mem_clock >
2868 max_limits->mclk)
2869 vega10_ps->performance_levels[i].mem_clock =
2870 max_limits->mclk;
2871 if (vega10_ps->performance_levels[i].gfx_clock >
2872 max_limits->sclk)
2873 vega10_ps->performance_levels[i].gfx_clock =
2874 max_limits->sclk;
2875 }
2876 }
2877
2878 vega10_ps->vce_clks.evclk = hwmgr->vce_arbiter.evclk;
2879 vega10_ps->vce_clks.ecclk = hwmgr->vce_arbiter.ecclk;
2880
2881 cgs_get_active_displays_info(hwmgr->device, &info);
2882
2883 /* result = PHM_CheckVBlankTime(hwmgr, &vblankTooShort);*/
2884 minimum_clocks.engineClock = hwmgr->display_config.min_core_set_clock;
2885 /* minimum_clocks.memoryClock = hwmgr->display_config.min_mem_set_clock; */
2886
2887 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2888 PHM_PlatformCaps_StablePState)) {
2889 PP_ASSERT_WITH_CODE(
2890 data->registry_data.stable_pstate_sclk_dpm_percentage >= 1 &&
2891 data->registry_data.stable_pstate_sclk_dpm_percentage <= 100,
2892 "percent sclk value must range from 1% to 100%, setting default value",
2893 stable_pstate_sclk_dpm_percentage = 75);
2894
2895 max_limits = &(hwmgr->dyn_state.max_clock_voltage_on_ac);
2896 stable_pstate_sclk = (max_limits->sclk *
2897 stable_pstate_sclk_dpm_percentage) / 100;
2898
2899 for (count = table_info->vdd_dep_on_sclk->count - 1;
2900 count >= 0; count--) {
2901 if (stable_pstate_sclk >=
2902 table_info->vdd_dep_on_sclk->entries[count].clk) {
2903 stable_pstate_sclk =
2904 table_info->vdd_dep_on_sclk->entries[count].clk;
2905 break;
2906 }
2907 }
2908
2909 if (count < 0)
2910 stable_pstate_sclk = table_info->vdd_dep_on_sclk->entries[0].clk;
2911
2912 stable_pstate_mclk = max_limits->mclk;
2913
2914 minimum_clocks.engineClock = stable_pstate_sclk;
2915 minimum_clocks.memoryClock = stable_pstate_mclk;
2916 }
2917
2918 if (minimum_clocks.engineClock < hwmgr->gfx_arbiter.sclk)
2919 minimum_clocks.engineClock = hwmgr->gfx_arbiter.sclk;
2920
2921 if (minimum_clocks.memoryClock < hwmgr->gfx_arbiter.mclk)
2922 minimum_clocks.memoryClock = hwmgr->gfx_arbiter.mclk;
2923
2924 vega10_ps->sclk_threshold = hwmgr->gfx_arbiter.sclk_threshold;
2925
2926 if (hwmgr->gfx_arbiter.sclk_over_drive) {
2927 PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.sclk_over_drive <=
2928 hwmgr->platform_descriptor.overdriveLimit.engineClock),
2929 "Overdrive sclk exceeds limit",
2930 hwmgr->gfx_arbiter.sclk_over_drive =
2931 hwmgr->platform_descriptor.overdriveLimit.engineClock);
2932
2933 if (hwmgr->gfx_arbiter.sclk_over_drive >= hwmgr->gfx_arbiter.sclk)
2934 vega10_ps->performance_levels[1].gfx_clock =
2935 hwmgr->gfx_arbiter.sclk_over_drive;
2936 }
2937
2938 if (hwmgr->gfx_arbiter.mclk_over_drive) {
2939 PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.mclk_over_drive <=
2940 hwmgr->platform_descriptor.overdriveLimit.memoryClock),
2941 "Overdrive mclk exceeds limit",
2942 hwmgr->gfx_arbiter.mclk_over_drive =
2943 hwmgr->platform_descriptor.overdriveLimit.memoryClock);
2944
2945 if (hwmgr->gfx_arbiter.mclk_over_drive >= hwmgr->gfx_arbiter.mclk)
2946 vega10_ps->performance_levels[1].mem_clock =
2947 hwmgr->gfx_arbiter.mclk_over_drive;
2948 }
2949
2950 disable_mclk_switching_for_frame_lock = phm_cap_enabled(
2951 hwmgr->platform_descriptor.platformCaps,
2952 PHM_PlatformCaps_DisableMclkSwitchingForFrameLock);
2953 disable_mclk_switching_for_vr = phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2954 PHM_PlatformCaps_DisableMclkSwitchForVR);
2955 force_mclk_high = phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2956 PHM_PlatformCaps_ForceMclkHigh);
2957
2958 disable_mclk_switching = (info.display_count > 1) ||
2959 disable_mclk_switching_for_frame_lock ||
2960 disable_mclk_switching_for_vr ||
2961 force_mclk_high;
2962
2963 sclk = vega10_ps->performance_levels[0].gfx_clock;
2964 mclk = vega10_ps->performance_levels[0].mem_clock;
2965
2966 if (sclk < minimum_clocks.engineClock)
2967 sclk = (minimum_clocks.engineClock > max_limits->sclk) ?
2968 max_limits->sclk : minimum_clocks.engineClock;
2969
2970 if (mclk < minimum_clocks.memoryClock)
2971 mclk = (minimum_clocks.memoryClock > max_limits->mclk) ?
2972 max_limits->mclk : minimum_clocks.memoryClock;
2973
2974 vega10_ps->performance_levels[0].gfx_clock = sclk;
2975 vega10_ps->performance_levels[0].mem_clock = mclk;
2976
2977 vega10_ps->performance_levels[1].gfx_clock =
2978 (vega10_ps->performance_levels[1].gfx_clock >=
2979 vega10_ps->performance_levels[0].gfx_clock) ?
2980 vega10_ps->performance_levels[1].gfx_clock :
2981 vega10_ps->performance_levels[0].gfx_clock;
2982
2983 if (disable_mclk_switching) {
2984 /* Set Mclk the max of level 0 and level 1 */
2985 if (mclk < vega10_ps->performance_levels[1].mem_clock)
2986 mclk = vega10_ps->performance_levels[1].mem_clock;
2987
2988 /* Find the lowest MCLK frequency that is within
2989 * the tolerable latency defined in DAL
2990 */
2991 latency = 0;
2992 for (i = 0; i < data->mclk_latency_table.count; i++) {
2993 if ((data->mclk_latency_table.entries[i].latency <= latency) &&
2994 (data->mclk_latency_table.entries[i].frequency >=
2995 vega10_ps->performance_levels[0].mem_clock) &&
2996 (data->mclk_latency_table.entries[i].frequency <=
2997 vega10_ps->performance_levels[1].mem_clock))
2998 mclk = data->mclk_latency_table.entries[i].frequency;
2999 }
3000 vega10_ps->performance_levels[0].mem_clock = mclk;
3001 } else {
3002 if (vega10_ps->performance_levels[1].mem_clock <
3003 vega10_ps->performance_levels[0].mem_clock)
3004 vega10_ps->performance_levels[1].mem_clock =
3005 vega10_ps->performance_levels[0].mem_clock;
3006 }
3007
3008 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3009 PHM_PlatformCaps_StablePState)) {
3010 for (i = 0; i < vega10_ps->performance_level_count; i++) {
3011 vega10_ps->performance_levels[i].gfx_clock = stable_pstate_sclk;
3012 vega10_ps->performance_levels[i].mem_clock = stable_pstate_mclk;
3013 }
3014 }
3015
3016 return 0;
3017}
3018
3019static int vega10_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, const void *input)
3020{
3021 const struct phm_set_power_state_input *states =
3022 (const struct phm_set_power_state_input *)input;
3023 const struct vega10_power_state *vega10_ps =
3024 cast_const_phw_vega10_power_state(states->pnew_state);
3025 struct vega10_hwmgr *data =
3026 (struct vega10_hwmgr *)(hwmgr->backend);
3027 struct vega10_single_dpm_table *sclk_table =
3028 &(data->dpm_table.gfx_table);
3029 uint32_t sclk = vega10_ps->performance_levels
3030 [vega10_ps->performance_level_count - 1].gfx_clock;
3031 struct vega10_single_dpm_table *mclk_table =
3032 &(data->dpm_table.mem_table);
3033 uint32_t mclk = vega10_ps->performance_levels
3034 [vega10_ps->performance_level_count - 1].mem_clock;
3035 struct PP_Clocks min_clocks = {0};
3036 uint32_t i;
3037 struct cgs_display_info info = {0};
3038
3039 data->need_update_dpm_table = 0;
3040
3041 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3042 PHM_PlatformCaps_ODNinACSupport) ||
3043 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3044 PHM_PlatformCaps_ODNinDCSupport)) {
3045 for (i = 0; i < sclk_table->count; i++) {
3046 if (sclk == sclk_table->dpm_levels[i].value)
3047 break;
3048 }
3049
3050 if (!(data->apply_overdrive_next_settings_mask &
3051 DPMTABLE_OD_UPDATE_SCLK) && i >= sclk_table->count) {
3052 /* Check SCLK in DAL's minimum clocks
3053 * in case DeepSleep divider update is required.
3054 */
3055 if (data->display_timing.min_clock_in_sr !=
3056 min_clocks.engineClockInSR &&
3057 (min_clocks.engineClockInSR >=
3058 VEGA10_MINIMUM_ENGINE_CLOCK ||
3059 data->display_timing.min_clock_in_sr >=
3060 VEGA10_MINIMUM_ENGINE_CLOCK))
3061 data->need_update_dpm_table |= DPMTABLE_UPDATE_SCLK;
3062 }
3063
3064 cgs_get_active_displays_info(hwmgr->device, &info);
3065
3066 if (data->display_timing.num_existing_displays !=
3067 info.display_count)
3068 data->need_update_dpm_table |= DPMTABLE_UPDATE_MCLK;
3069 } else {
3070 for (i = 0; i < sclk_table->count; i++) {
3071 if (sclk == sclk_table->dpm_levels[i].value)
3072 break;
3073 }
3074
3075 if (i >= sclk_table->count)
3076 data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
3077 else {
3078 /* Check SCLK in DAL's minimum clocks
3079 * in case DeepSleep divider update is required.
3080 */
3081 if (data->display_timing.min_clock_in_sr !=
3082 min_clocks.engineClockInSR &&
3083 (min_clocks.engineClockInSR >=
3084 VEGA10_MINIMUM_ENGINE_CLOCK ||
3085 data->display_timing.min_clock_in_sr >=
3086 VEGA10_MINIMUM_ENGINE_CLOCK))
3087 data->need_update_dpm_table |= DPMTABLE_UPDATE_SCLK;
3088 }
3089
3090 for (i = 0; i < mclk_table->count; i++) {
3091 if (mclk == mclk_table->dpm_levels[i].value)
3092 break;
3093 }
3094
3095 cgs_get_active_displays_info(hwmgr->device, &info);
3096
3097 if (i >= mclk_table->count)
3098 data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
3099
3100 if (data->display_timing.num_existing_displays !=
3101 info.display_count ||
3102 i >= mclk_table->count)
3103 data->need_update_dpm_table |= DPMTABLE_UPDATE_MCLK;
3104 }
3105 return 0;
3106}
3107
3108static int vega10_populate_and_upload_sclk_mclk_dpm_levels(
3109 struct pp_hwmgr *hwmgr, const void *input)
3110{
3111 int result = 0;
3112 const struct phm_set_power_state_input *states =
3113 (const struct phm_set_power_state_input *)input;
3114 const struct vega10_power_state *vega10_ps =
3115 cast_const_phw_vega10_power_state(states->pnew_state);
3116 struct vega10_hwmgr *data =
3117 (struct vega10_hwmgr *)(hwmgr->backend);
3118 uint32_t sclk = vega10_ps->performance_levels
3119 [vega10_ps->performance_level_count - 1].gfx_clock;
3120 uint32_t mclk = vega10_ps->performance_levels
3121 [vega10_ps->performance_level_count - 1].mem_clock;
3122 struct vega10_dpm_table *dpm_table = &data->dpm_table;
3123 struct vega10_dpm_table *golden_dpm_table =
3124 &data->golden_dpm_table;
3125 uint32_t dpm_count, clock_percent;
3126 uint32_t i;
3127
3128 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3129 PHM_PlatformCaps_ODNinACSupport) ||
3130 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3131 PHM_PlatformCaps_ODNinDCSupport)) {
3132
3133 if (!data->need_update_dpm_table &&
3134 !data->apply_optimized_settings &&
3135 !data->apply_overdrive_next_settings_mask)
3136 return 0;
3137
3138 if (data->apply_overdrive_next_settings_mask &
3139 DPMTABLE_OD_UPDATE_SCLK) {
3140 for (dpm_count = 0;
3141 dpm_count < dpm_table->gfx_table.count;
3142 dpm_count++) {
3143 dpm_table->gfx_table.dpm_levels[dpm_count].enabled =
3144 data->odn_dpm_table.odn_core_clock_dpm_levels.
3145 performance_level_entries[dpm_count].enabled;
3146 dpm_table->gfx_table.dpm_levels[dpm_count].value =
3147 data->odn_dpm_table.odn_core_clock_dpm_levels.
3148 performance_level_entries[dpm_count].clock;
3149 }
3150 }
3151
3152 if (data->apply_overdrive_next_settings_mask &
3153 DPMTABLE_OD_UPDATE_MCLK) {
3154 for (dpm_count = 0;
3155 dpm_count < dpm_table->mem_table.count;
3156 dpm_count++) {
3157 dpm_table->mem_table.dpm_levels[dpm_count].enabled =
3158 data->odn_dpm_table.odn_memory_clock_dpm_levels.
3159 performance_level_entries[dpm_count].enabled;
3160 dpm_table->mem_table.dpm_levels[dpm_count].value =
3161 data->odn_dpm_table.odn_memory_clock_dpm_levels.
3162 performance_level_entries[dpm_count].clock;
3163 }
3164 }
3165
3166 if ((data->need_update_dpm_table & DPMTABLE_UPDATE_SCLK) ||
3167 data->apply_optimized_settings ||
3168 (data->apply_overdrive_next_settings_mask &
3169 DPMTABLE_OD_UPDATE_SCLK)) {
3170 result = vega10_populate_all_graphic_levels(hwmgr);
3171 PP_ASSERT_WITH_CODE(!result,
3172 "Failed to populate SCLK during \
3173 PopulateNewDPMClocksStates Function!",
3174 return result);
3175 }
3176
3177 if ((data->need_update_dpm_table & DPMTABLE_UPDATE_MCLK) ||
3178 (data->apply_overdrive_next_settings_mask &
3179 DPMTABLE_OD_UPDATE_MCLK)){
3180 result = vega10_populate_all_memory_levels(hwmgr);
3181 PP_ASSERT_WITH_CODE(!result,
3182 "Failed to populate MCLK during \
3183 PopulateNewDPMClocksStates Function!",
3184 return result);
3185 }
3186 } else {
3187 if (!data->need_update_dpm_table &&
3188 !data->apply_optimized_settings)
3189 return 0;
3190
3191 if (data->need_update_dpm_table & DPMTABLE_OD_UPDATE_SCLK &&
3192 data->smu_features[GNLD_DPM_GFXCLK].supported) {
3193 dpm_table->
3194 gfx_table.dpm_levels[dpm_table->gfx_table.count - 1].
3195 value = sclk;
3196
3197 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3198 PHM_PlatformCaps_OD6PlusinACSupport) ||
3199 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3200 PHM_PlatformCaps_OD6PlusinDCSupport)) {
3201 /* Need to do calculation based on the golden DPM table
3202 * as the Heatmap GPU Clock axis is also based on
3203 * the default values
3204 */
3205 PP_ASSERT_WITH_CODE(
3206 golden_dpm_table->gfx_table.dpm_levels
3207 [golden_dpm_table->gfx_table.count - 1].value,
3208 "Divide by 0!",
3209 return -1);
3210
3211 dpm_count = dpm_table->gfx_table.count < 2 ?
3212 0 : dpm_table->gfx_table.count - 2;
3213 for (i = dpm_count; i > 1; i--) {
3214 if (sclk > golden_dpm_table->gfx_table.dpm_levels
3215 [golden_dpm_table->gfx_table.count - 1].value) {
3216 clock_percent =
3217 ((sclk - golden_dpm_table->gfx_table.dpm_levels
3218 [golden_dpm_table->gfx_table.count - 1].value) *
3219 100) /
3220 golden_dpm_table->gfx_table.dpm_levels
3221 [golden_dpm_table->gfx_table.count - 1].value;
3222
3223 dpm_table->gfx_table.dpm_levels[i].value =
3224 golden_dpm_table->gfx_table.dpm_levels[i].value +
3225 (golden_dpm_table->gfx_table.dpm_levels[i].value *
3226 clock_percent) / 100;
3227 } else if (golden_dpm_table->
3228 gfx_table.dpm_levels[dpm_table->gfx_table.count-1].value >
3229 sclk) {
3230 clock_percent =
3231 ((golden_dpm_table->gfx_table.dpm_levels
3232 [golden_dpm_table->gfx_table.count - 1].value -
3233 sclk) * 100) /
3234 golden_dpm_table->gfx_table.dpm_levels
3235 [golden_dpm_table->gfx_table.count-1].value;
3236
3237 dpm_table->gfx_table.dpm_levels[i].value =
3238 golden_dpm_table->gfx_table.dpm_levels[i].value -
3239 (golden_dpm_table->gfx_table.dpm_levels[i].value *
3240 clock_percent) / 100;
3241 } else
3242 dpm_table->gfx_table.dpm_levels[i].value =
3243 golden_dpm_table->gfx_table.dpm_levels[i].value;
3244 }
3245 }
3246 }
3247
3248 if (data->need_update_dpm_table & DPMTABLE_OD_UPDATE_MCLK &&
3249 data->smu_features[GNLD_DPM_UCLK].supported) {
3250 dpm_table->
3251 mem_table.dpm_levels[dpm_table->mem_table.count - 1].
3252 value = mclk;
3253
3254 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3255 PHM_PlatformCaps_OD6PlusinACSupport) ||
3256 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3257 PHM_PlatformCaps_OD6PlusinDCSupport)) {
3258
3259 PP_ASSERT_WITH_CODE(
3260 golden_dpm_table->mem_table.dpm_levels
3261 [golden_dpm_table->mem_table.count - 1].value,
3262 "Divide by 0!",
3263 return -1);
3264
3265 dpm_count = dpm_table->mem_table.count < 2 ?
3266 0 : dpm_table->mem_table.count - 2;
3267 for (i = dpm_count; i > 1; i--) {
3268 if (mclk > golden_dpm_table->mem_table.dpm_levels
3269 [golden_dpm_table->mem_table.count-1].value) {
3270 clock_percent = ((mclk -
3271 golden_dpm_table->mem_table.dpm_levels
3272 [golden_dpm_table->mem_table.count-1].value) *
3273 100) /
3274 golden_dpm_table->mem_table.dpm_levels
3275 [golden_dpm_table->mem_table.count-1].value;
3276
3277 dpm_table->mem_table.dpm_levels[i].value =
3278 golden_dpm_table->mem_table.dpm_levels[i].value +
3279 (golden_dpm_table->mem_table.dpm_levels[i].value *
3280 clock_percent) / 100;
3281 } else if (golden_dpm_table->mem_table.dpm_levels
3282 [dpm_table->mem_table.count-1].value > mclk) {
3283 clock_percent = ((golden_dpm_table->mem_table.dpm_levels
3284 [golden_dpm_table->mem_table.count-1].value - mclk) *
3285 100) /
3286 golden_dpm_table->mem_table.dpm_levels
3287 [golden_dpm_table->mem_table.count-1].value;
3288
3289 dpm_table->mem_table.dpm_levels[i].value =
3290 golden_dpm_table->mem_table.dpm_levels[i].value -
3291 (golden_dpm_table->mem_table.dpm_levels[i].value *
3292 clock_percent) / 100;
3293 } else
3294 dpm_table->mem_table.dpm_levels[i].value =
3295 golden_dpm_table->mem_table.dpm_levels[i].value;
3296 }
3297 }
3298 }
3299
3300 if ((data->need_update_dpm_table &
3301 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK)) ||
3302 data->apply_optimized_settings) {
3303 result = vega10_populate_all_graphic_levels(hwmgr);
3304 PP_ASSERT_WITH_CODE(!result,
3305 "Failed to populate SCLK during \
3306 PopulateNewDPMClocksStates Function!",
3307 return result);
3308 }
3309
3310 if (data->need_update_dpm_table &
3311 (DPMTABLE_OD_UPDATE_MCLK + DPMTABLE_UPDATE_MCLK)) {
3312 result = vega10_populate_all_memory_levels(hwmgr);
3313 PP_ASSERT_WITH_CODE(!result,
3314 "Failed to populate MCLK during \
3315 PopulateNewDPMClocksStates Function!",
3316 return result);
3317 }
3318 }
3319
3320 return result;
3321}
3322
3323static int vega10_trim_single_dpm_states(struct pp_hwmgr *hwmgr,
3324 struct vega10_single_dpm_table *dpm_table,
3325 uint32_t low_limit, uint32_t high_limit)
3326{
3327 uint32_t i;
3328
3329 for (i = 0; i < dpm_table->count; i++) {
3330 if ((dpm_table->dpm_levels[i].value < low_limit) ||
3331 (dpm_table->dpm_levels[i].value > high_limit))
3332 dpm_table->dpm_levels[i].enabled = false;
3333 else
3334 dpm_table->dpm_levels[i].enabled = true;
3335 }
3336 return 0;
3337}
3338
3339static int vega10_trim_single_dpm_states_with_mask(struct pp_hwmgr *hwmgr,
3340 struct vega10_single_dpm_table *dpm_table,
3341 uint32_t low_limit, uint32_t high_limit,
3342 uint32_t disable_dpm_mask)
3343{
3344 uint32_t i;
3345
3346 for (i = 0; i < dpm_table->count; i++) {
3347 if ((dpm_table->dpm_levels[i].value < low_limit) ||
3348 (dpm_table->dpm_levels[i].value > high_limit))
3349 dpm_table->dpm_levels[i].enabled = false;
3350 else if (!((1 << i) & disable_dpm_mask))
3351 dpm_table->dpm_levels[i].enabled = false;
3352 else
3353 dpm_table->dpm_levels[i].enabled = true;
3354 }
3355 return 0;
3356}
3357
3358static int vega10_trim_dpm_states(struct pp_hwmgr *hwmgr,
3359 const struct vega10_power_state *vega10_ps)
3360{
3361 struct vega10_hwmgr *data =
3362 (struct vega10_hwmgr *)(hwmgr->backend);
3363 uint32_t high_limit_count;
3364
3365 PP_ASSERT_WITH_CODE((vega10_ps->performance_level_count >= 1),
3366 "power state did not have any performance level",
3367 return -1);
3368
3369 high_limit_count = (vega10_ps->performance_level_count == 1) ? 0 : 1;
3370
3371 vega10_trim_single_dpm_states(hwmgr,
3372 &(data->dpm_table.soc_table),
3373 vega10_ps->performance_levels[0].soc_clock,
3374 vega10_ps->performance_levels[high_limit_count].soc_clock);
3375
3376 vega10_trim_single_dpm_states_with_mask(hwmgr,
3377 &(data->dpm_table.gfx_table),
3378 vega10_ps->performance_levels[0].gfx_clock,
3379 vega10_ps->performance_levels[high_limit_count].gfx_clock,
3380 data->disable_dpm_mask);
3381
3382 vega10_trim_single_dpm_states(hwmgr,
3383 &(data->dpm_table.mem_table),
3384 vega10_ps->performance_levels[0].mem_clock,
3385 vega10_ps->performance_levels[high_limit_count].mem_clock);
3386
3387 return 0;
3388}
3389
3390static uint32_t vega10_find_lowest_dpm_level(
3391 struct vega10_single_dpm_table *table)
3392{
3393 uint32_t i;
3394
3395 for (i = 0; i < table->count; i++) {
3396 if (table->dpm_levels[i].enabled)
3397 break;
3398 }
3399
3400 return i;
3401}
3402
3403static uint32_t vega10_find_highest_dpm_level(
3404 struct vega10_single_dpm_table *table)
3405{
3406 uint32_t i = 0;
3407
3408 if (table->count <= MAX_REGULAR_DPM_NUMBER) {
3409 for (i = table->count; i > 0; i--) {
3410 if (table->dpm_levels[i - 1].enabled)
3411 return i - 1;
3412 }
3413 } else {
3414 pr_info("DPM Table Has Too Many Entries!");
3415 return MAX_REGULAR_DPM_NUMBER - 1;
3416 }
3417
3418 return i;
3419}
3420
3421static void vega10_apply_dal_minimum_voltage_request(
3422 struct pp_hwmgr *hwmgr)
3423{
3424 return;
3425}
3426
3427static int vega10_upload_dpm_bootup_level(struct pp_hwmgr *hwmgr)
3428{
3429 struct vega10_hwmgr *data =
3430 (struct vega10_hwmgr *)(hwmgr->backend);
3431
3432 vega10_apply_dal_minimum_voltage_request(hwmgr);
3433
3434 if (!data->registry_data.sclk_dpm_key_disabled) {
3435 if (data->smc_state_table.gfx_boot_level !=
3436 data->dpm_table.gfx_table.dpm_state.soft_min_level) {
3437 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter(
3438 hwmgr->smumgr,
3439 PPSMC_MSG_SetSoftMinGfxclkByIndex,
3440 data->smc_state_table.gfx_boot_level),
3441 "Failed to set soft min sclk index!",
3442 return -EINVAL);
3443 data->dpm_table.gfx_table.dpm_state.soft_min_level =
3444 data->smc_state_table.gfx_boot_level;
3445 }
3446 }
3447
3448 if (!data->registry_data.mclk_dpm_key_disabled) {
3449 if (data->smc_state_table.mem_boot_level !=
3450 data->dpm_table.mem_table.dpm_state.soft_min_level) {
3451 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter(
3452 hwmgr->smumgr,
3453 PPSMC_MSG_SetSoftMinUclkByIndex,
3454 data->smc_state_table.mem_boot_level),
3455 "Failed to set soft min mclk index!",
3456 return -EINVAL);
3457
3458 data->dpm_table.mem_table.dpm_state.soft_min_level =
3459 data->smc_state_table.mem_boot_level;
3460 }
3461 }
3462
3463 return 0;
3464}
3465
3466static int vega10_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
3467{
3468 struct vega10_hwmgr *data =
3469 (struct vega10_hwmgr *)(hwmgr->backend);
3470
3471 vega10_apply_dal_minimum_voltage_request(hwmgr);
3472
3473 if (!data->registry_data.sclk_dpm_key_disabled) {
3474 if (data->smc_state_table.gfx_max_level !=
3475 data->dpm_table.gfx_table.dpm_state.soft_max_level) {
3476 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter(
3477 hwmgr->smumgr,
3478 PPSMC_MSG_SetSoftMaxGfxclkByIndex,
3479 data->smc_state_table.gfx_max_level),
3480 "Failed to set soft max sclk index!",
3481 return -EINVAL);
3482 data->dpm_table.gfx_table.dpm_state.soft_max_level =
3483 data->smc_state_table.gfx_max_level;
3484 }
3485 }
3486
3487 if (!data->registry_data.mclk_dpm_key_disabled) {
3488 if (data->smc_state_table.mem_max_level !=
3489 data->dpm_table.mem_table.dpm_state.soft_max_level) {
3490 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter(
3491 hwmgr->smumgr,
3492 PPSMC_MSG_SetSoftMaxUclkByIndex,
3493 data->smc_state_table.mem_max_level),
3494 "Failed to set soft max mclk index!",
3495 return -EINVAL);
3496 data->dpm_table.mem_table.dpm_state.soft_max_level =
3497 data->smc_state_table.mem_max_level;
3498 }
3499 }
3500
3501 return 0;
3502}
3503
3504static int vega10_generate_dpm_level_enable_mask(
3505 struct pp_hwmgr *hwmgr, const void *input)
3506{
3507 struct vega10_hwmgr *data =
3508 (struct vega10_hwmgr *)(hwmgr->backend);
3509 const struct phm_set_power_state_input *states =
3510 (const struct phm_set_power_state_input *)input;
3511 const struct vega10_power_state *vega10_ps =
3512 cast_const_phw_vega10_power_state(states->pnew_state);
3513 int i;
3514
3515 PP_ASSERT_WITH_CODE(!vega10_trim_dpm_states(hwmgr, vega10_ps),
3516 "Attempt to Trim DPM States Failed!",
3517 return -1);
3518
3519 data->smc_state_table.gfx_boot_level =
3520 vega10_find_lowest_dpm_level(&(data->dpm_table.gfx_table));
3521 data->smc_state_table.gfx_max_level =
3522 vega10_find_highest_dpm_level(&(data->dpm_table.gfx_table));
3523 data->smc_state_table.mem_boot_level =
3524 vega10_find_lowest_dpm_level(&(data->dpm_table.mem_table));
3525 data->smc_state_table.mem_max_level =
3526 vega10_find_highest_dpm_level(&(data->dpm_table.mem_table));
3527
3528 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr),
3529 "Attempt to upload DPM Bootup Levels Failed!",
3530 return -1);
3531 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr),
3532 "Attempt to upload DPM Max Levels Failed!",
3533 return -1);
3534 for(i = data->smc_state_table.gfx_boot_level; i < data->smc_state_table.gfx_max_level; i++)
3535 data->dpm_table.gfx_table.dpm_levels[i].enabled = true;
3536
3537
3538 for(i = data->smc_state_table.mem_boot_level; i < data->smc_state_table.mem_max_level; i++)
3539 data->dpm_table.mem_table.dpm_levels[i].enabled = true;
3540
3541 return 0;
3542}
3543
3544int vega10_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
3545{
3546 struct vega10_hwmgr *data =
3547 (struct vega10_hwmgr *)(hwmgr->backend);
3548
3549 if (data->smu_features[GNLD_DPM_VCE].supported) {
3550 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
3551 enable,
3552 data->smu_features[GNLD_DPM_VCE].smu_feature_bitmap),
3553 "Attempt to Enable/Disable DPM VCE Failed!",
3554 return -1);
3555 data->smu_features[GNLD_DPM_VCE].enabled = enable;
3556 }
3557
3558 return 0;
3559}
3560
3561static int vega10_update_sclk_threshold(struct pp_hwmgr *hwmgr)
3562{
3563 struct vega10_hwmgr *data =
3564 (struct vega10_hwmgr *)(hwmgr->backend);
3565 int result = 0;
3566 uint32_t low_sclk_interrupt_threshold = 0;
3567
3568 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3569 PHM_PlatformCaps_SclkThrottleLowNotification)
3570 && (hwmgr->gfx_arbiter.sclk_threshold !=
3571 data->low_sclk_interrupt_threshold)) {
3572 data->low_sclk_interrupt_threshold =
3573 hwmgr->gfx_arbiter.sclk_threshold;
3574 low_sclk_interrupt_threshold =
3575 data->low_sclk_interrupt_threshold;
3576
3577 data->smc_state_table.pp_table.LowGfxclkInterruptThreshold =
3578 cpu_to_le32(low_sclk_interrupt_threshold);
3579
3580 /* This message will also enable SmcToHost Interrupt */
3581 result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
3582 PPSMC_MSG_SetLowGfxclkInterruptThreshold,
3583 (uint32_t)low_sclk_interrupt_threshold);
3584 }
3585
3586 return result;
3587}
3588
3589static int vega10_set_power_state_tasks(struct pp_hwmgr *hwmgr,
3590 const void *input)
3591{
3592 int tmp_result, result = 0;
3593 struct vega10_hwmgr *data =
3594 (struct vega10_hwmgr *)(hwmgr->backend);
3595 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
3596
3597 tmp_result = vega10_find_dpm_states_clocks_in_dpm_table(hwmgr, input);
3598 PP_ASSERT_WITH_CODE(!tmp_result,
3599 "Failed to find DPM states clocks in DPM table!",
3600 result = tmp_result);
3601
3602 tmp_result = vega10_populate_and_upload_sclk_mclk_dpm_levels(hwmgr, input);
3603 PP_ASSERT_WITH_CODE(!tmp_result,
3604 "Failed to populate and upload SCLK MCLK DPM levels!",
3605 result = tmp_result);
3606
3607 tmp_result = vega10_generate_dpm_level_enable_mask(hwmgr, input);
3608 PP_ASSERT_WITH_CODE(!tmp_result,
3609 "Failed to generate DPM level enabled mask!",
3610 result = tmp_result);
3611
3612 tmp_result = vega10_update_sclk_threshold(hwmgr);
3613 PP_ASSERT_WITH_CODE(!tmp_result,
3614 "Failed to update SCLK threshold!",
3615 result = tmp_result);
3616
3617 result = vega10_copy_table_to_smc(hwmgr->smumgr,
3618 (uint8_t *)pp_table, PPTABLE);
3619 PP_ASSERT_WITH_CODE(!result,
3620 "Failed to upload PPtable!", return result);
3621
3622 data->apply_optimized_settings = false;
3623 data->apply_overdrive_next_settings_mask = 0;
3624
3625 return 0;
3626}
3627
3628static int vega10_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
3629{
3630 struct pp_power_state *ps;
3631 struct vega10_power_state *vega10_ps;
3632
3633 if (hwmgr == NULL)
3634 return -EINVAL;
3635
3636 ps = hwmgr->request_ps;
3637
3638 if (ps == NULL)
3639 return -EINVAL;
3640
3641 vega10_ps = cast_phw_vega10_power_state(&ps->hardware);
3642
3643 if (low)
3644 return vega10_ps->performance_levels[0].gfx_clock;
3645 else
3646 return vega10_ps->performance_levels
3647 [vega10_ps->performance_level_count - 1].gfx_clock;
3648}
3649
3650static int vega10_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
3651{
3652 struct pp_power_state *ps;
3653 struct vega10_power_state *vega10_ps;
3654
3655 if (hwmgr == NULL)
3656 return -EINVAL;
3657
3658 ps = hwmgr->request_ps;
3659
3660 if (ps == NULL)
3661 return -EINVAL;
3662
3663 vega10_ps = cast_phw_vega10_power_state(&ps->hardware);
3664
3665 if (low)
3666 return vega10_ps->performance_levels[0].mem_clock;
3667 else
3668 return vega10_ps->performance_levels
3669 [vega10_ps->performance_level_count-1].mem_clock;
3670}
3671
3672static int vega10_read_sensor(struct pp_hwmgr *hwmgr, int idx,
3673 void *value, int *size)
3674{
3675 uint32_t sclk_idx, mclk_idx, activity_percent = 0;
3676 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
3677 struct vega10_dpm_table *dpm_table = &data->dpm_table;
3678 int ret = 0;
3679
3680 switch (idx) {
3681 case AMDGPU_PP_SENSOR_GFX_SCLK:
3682 ret = smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_GetCurrentGfxclkIndex);
3683 if (!ret) {
3684 vega10_read_arg_from_smc(hwmgr->smumgr, &sclk_idx);
3685 *((uint32_t *)value) = dpm_table->gfx_table.dpm_levels[sclk_idx].value;
3686 *size = 4;
3687 }
3688 break;
3689 case AMDGPU_PP_SENSOR_GFX_MCLK:
3690 ret = smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_GetCurrentUclkIndex);
3691 if (!ret) {
3692 vega10_read_arg_from_smc(hwmgr->smumgr, &mclk_idx);
3693 *((uint32_t *)value) = dpm_table->mem_table.dpm_levels[mclk_idx].value;
3694 *size = 4;
3695 }
3696 break;
3697 case AMDGPU_PP_SENSOR_GPU_LOAD:
3698 ret = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_GetAverageGfxActivity, 0);
3699 if (!ret) {
3700 vega10_read_arg_from_smc(hwmgr->smumgr, &activity_percent);
3701 *((uint32_t *)value) = activity_percent > 100 ? 100 : activity_percent;
3702 *size = 4;
3703 }
3704 break;
3705 case AMDGPU_PP_SENSOR_GPU_TEMP:
3706 *((uint32_t *)value) = vega10_thermal_get_temperature(hwmgr);
3707 *size = 4;
3708 break;
3709 case AMDGPU_PP_SENSOR_UVD_POWER:
3710 *((uint32_t *)value) = data->uvd_power_gated ? 0 : 1;
3711 *size = 4;
3712 break;
3713 case AMDGPU_PP_SENSOR_VCE_POWER:
3714 *((uint32_t *)value) = data->vce_power_gated ? 0 : 1;
3715 *size = 4;
3716 break;
3717 default:
3718 ret = -EINVAL;
3719 break;
3720 }
3721 return ret;
3722}
3723
3724static int vega10_notify_smc_display_change(struct pp_hwmgr *hwmgr,
3725 bool has_disp)
3726{
3727 return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
3728 PPSMC_MSG_SetUclkFastSwitch,
3729 has_disp ? 0 : 1);
3730}
3731
3732int vega10_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
3733 struct pp_display_clock_request *clock_req)
3734{
3735 int result = 0;
3736 enum amd_pp_clock_type clk_type = clock_req->clock_type;
3737 uint32_t clk_freq = clock_req->clock_freq_in_khz / 100;
3738 DSPCLK_e clk_select = 0;
3739 uint32_t clk_request = 0;
3740
3741 switch (clk_type) {
3742 case amd_pp_dcef_clock:
3743 clk_select = DSPCLK_DCEFCLK;
3744 break;
3745 case amd_pp_disp_clock:
3746 clk_select = DSPCLK_DISPCLK;
3747 break;
3748 case amd_pp_pixel_clock:
3749 clk_select = DSPCLK_PIXCLK;
3750 break;
3751 case amd_pp_phy_clock:
3752 clk_select = DSPCLK_PHYCLK;
3753 break;
3754 default:
3755 pr_info("[DisplayClockVoltageRequest]Invalid Clock Type!");
3756 result = -1;
3757 break;
3758 }
3759
3760 if (!result) {
3761 clk_request = (clk_freq << 16) | clk_select;
3762 result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
3763 PPSMC_MSG_RequestDisplayClockByFreq,
3764 clk_request);
3765 }
3766
3767 return result;
3768}
3769
3770static int vega10_notify_smc_display_config_after_ps_adjustment(
3771 struct pp_hwmgr *hwmgr)
3772{
3773 struct vega10_hwmgr *data =
3774 (struct vega10_hwmgr *)(hwmgr->backend);
3775 struct vega10_single_dpm_table *dpm_table =
3776 &data->dpm_table.dcef_table;
3777 uint32_t num_active_disps = 0;
3778 struct cgs_display_info info = {0};
3779 struct PP_Clocks min_clocks = {0};
3780 uint32_t i;
3781 struct pp_display_clock_request clock_req;
3782
3783 info.mode_info = NULL;
3784
3785 cgs_get_active_displays_info(hwmgr->device, &info);
3786
3787 num_active_disps = info.display_count;
3788
3789 if (num_active_disps > 1)
3790 vega10_notify_smc_display_change(hwmgr, false);
3791 else
3792 vega10_notify_smc_display_change(hwmgr, true);
3793
3794 min_clocks.dcefClock = hwmgr->display_config.min_dcef_set_clk;
3795 min_clocks.dcefClockInSR = hwmgr->display_config.min_dcef_deep_sleep_set_clk;
3796
3797 for (i = 0; i < dpm_table->count; i++) {
3798 if (dpm_table->dpm_levels[i].value == min_clocks.dcefClock)
3799 break;
3800 }
3801
3802 if (i < dpm_table->count) {
3803 clock_req.clock_type = amd_pp_dcef_clock;
3804 clock_req.clock_freq_in_khz = dpm_table->dpm_levels[i].value;
3805 if (!vega10_display_clock_voltage_request(hwmgr, &clock_req)) {
3806 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter(
3807 hwmgr->smumgr, PPSMC_MSG_SetMinDeepSleepDcefclk,
3808 min_clocks.dcefClockInSR),
3809 "Attempt to set divider for DCEFCLK Failed!",);
3810 } else
3811 pr_info("Attempt to set Hard Min for DCEFCLK Failed!");
3812 } else
3813 pr_info("Cannot find requested DCEFCLK!");
3814
3815 return 0;
3816}
3817
3818static int vega10_force_dpm_highest(struct pp_hwmgr *hwmgr)
3819{
3820 struct vega10_hwmgr *data =
3821 (struct vega10_hwmgr *)(hwmgr->backend);
3822
3823 data->smc_state_table.gfx_boot_level =
3824 data->smc_state_table.gfx_max_level =
3825 vega10_find_highest_dpm_level(&(data->dpm_table.gfx_table));
3826 data->smc_state_table.mem_boot_level =
3827 data->smc_state_table.mem_max_level =
3828 vega10_find_highest_dpm_level(&(data->dpm_table.mem_table));
3829
3830 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr),
3831 "Failed to upload boot level to highest!",
3832 return -1);
3833
3834 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr),
3835 "Failed to upload dpm max level to highest!",
3836 return -1);
3837
3838 return 0;
3839}
3840
3841static int vega10_force_dpm_lowest(struct pp_hwmgr *hwmgr)
3842{
3843 struct vega10_hwmgr *data =
3844 (struct vega10_hwmgr *)(hwmgr->backend);
3845
3846 data->smc_state_table.gfx_boot_level =
3847 data->smc_state_table.gfx_max_level =
3848 vega10_find_lowest_dpm_level(&(data->dpm_table.gfx_table));
3849 data->smc_state_table.mem_boot_level =
3850 data->smc_state_table.mem_max_level =
3851 vega10_find_lowest_dpm_level(&(data->dpm_table.mem_table));
3852
3853 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr),
3854 "Failed to upload boot level to highest!",
3855 return -1);
3856
3857 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr),
3858 "Failed to upload dpm max level to highest!",
3859 return -1);
3860
3861 return 0;
3862
3863}
3864
3865static int vega10_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
3866{
3867 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
3868
3869 data->smc_state_table.gfx_boot_level =
3870 vega10_find_lowest_dpm_level(&(data->dpm_table.gfx_table));
3871 data->smc_state_table.gfx_max_level =
3872 vega10_find_highest_dpm_level(&(data->dpm_table.gfx_table));
3873 data->smc_state_table.mem_boot_level =
3874 vega10_find_lowest_dpm_level(&(data->dpm_table.mem_table));
3875 data->smc_state_table.mem_max_level =
3876 vega10_find_highest_dpm_level(&(data->dpm_table.mem_table));
3877
3878 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr),
3879 "Failed to upload DPM Bootup Levels!",
3880 return -1);
3881
3882 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr),
3883 "Failed to upload DPM Max Levels!",
3884 return -1);
3885 return 0;
3886}
3887
3888static int vega10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
3889 enum amd_dpm_forced_level level)
3890{
3891 int ret = 0;
3892
3893 switch (level) {
3894 case AMD_DPM_FORCED_LEVEL_HIGH:
3895 ret = vega10_force_dpm_highest(hwmgr);
3896 if (ret)
3897 return ret;
3898 break;
3899 case AMD_DPM_FORCED_LEVEL_LOW:
3900 ret = vega10_force_dpm_lowest(hwmgr);
3901 if (ret)
3902 return ret;
3903 break;
3904 case AMD_DPM_FORCED_LEVEL_AUTO:
3905 ret = vega10_unforce_dpm_levels(hwmgr);
3906 if (ret)
3907 return ret;
3908 break;
3909 default:
3910 break;
3911 }
3912
3913 hwmgr->dpm_level = level;
3914
3915 return ret;
3916}
3917
3918static int vega10_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
3919{
3920 if (mode) {
3921 /* stop auto-manage */
3922 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3923 PHM_PlatformCaps_MicrocodeFanControl))
3924 vega10_fan_ctrl_stop_smc_fan_control(hwmgr);
3925 vega10_fan_ctrl_set_static_mode(hwmgr, mode);
3926 } else
3927 /* restart auto-manage */
3928 vega10_fan_ctrl_reset_fan_speed_to_default(hwmgr);
3929
3930 return 0;
3931}
3932
3933static int vega10_get_fan_control_mode(struct pp_hwmgr *hwmgr)
3934{
3935 uint32_t reg;
3936
3937 if (hwmgr->fan_ctrl_is_in_default_mode) {
3938 return hwmgr->fan_ctrl_default_mode;
3939 } else {
3940 reg = soc15_get_register_offset(THM_HWID, 0,
3941 mmCG_FDO_CTRL2_BASE_IDX, mmCG_FDO_CTRL2);
3942 return (cgs_read_register(hwmgr->device, reg) &
3943 CG_FDO_CTRL2__FDO_PWM_MODE_MASK) >>
3944 CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT;
3945 }
3946}
3947
3948static int vega10_get_dal_power_level(struct pp_hwmgr *hwmgr,
3949 struct amd_pp_simple_clock_info *info)
3950{
3951 struct phm_ppt_v2_information *table_info =
3952 (struct phm_ppt_v2_information *)hwmgr->pptable;
3953 struct phm_clock_and_voltage_limits *max_limits =
3954 &table_info->max_clock_voltage_on_ac;
3955
3956 info->engine_max_clock = max_limits->sclk;
3957 info->memory_max_clock = max_limits->mclk;
3958
3959 return 0;
3960}
3961
3962static void vega10_get_sclks(struct pp_hwmgr *hwmgr,
3963 struct pp_clock_levels_with_latency *clocks)
3964{
3965 struct phm_ppt_v2_information *table_info =
3966 (struct phm_ppt_v2_information *)hwmgr->pptable;
3967 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table =
3968 table_info->vdd_dep_on_sclk;
3969 uint32_t i;
3970
3971 for (i = 0; i < dep_table->count; i++) {
3972 if (dep_table->entries[i].clk) {
3973 clocks->data[clocks->num_levels].clocks_in_khz =
3974 dep_table->entries[i].clk;
3975 clocks->num_levels++;
3976 }
3977 }
3978
3979}
3980
3981static uint32_t vega10_get_mem_latency(struct pp_hwmgr *hwmgr,
3982 uint32_t clock)
3983{
3984 if (clock >= MEM_FREQ_LOW_LATENCY &&
3985 clock < MEM_FREQ_HIGH_LATENCY)
3986 return MEM_LATENCY_HIGH;
3987 else if (clock >= MEM_FREQ_HIGH_LATENCY)
3988 return MEM_LATENCY_LOW;
3989 else
3990 return MEM_LATENCY_ERR;
3991}
3992
3993static void vega10_get_memclocks(struct pp_hwmgr *hwmgr,
3994 struct pp_clock_levels_with_latency *clocks)
3995{
3996 struct phm_ppt_v2_information *table_info =
3997 (struct phm_ppt_v2_information *)hwmgr->pptable;
3998 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table =
3999 table_info->vdd_dep_on_mclk;
4000 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
4001 uint32_t i;
4002
4003 clocks->num_levels = 0;
4004 data->mclk_latency_table.count = 0;
4005
4006 for (i = 0; i < dep_table->count; i++) {
4007 if (dep_table->entries[i].clk) {
4008 clocks->data[clocks->num_levels].clocks_in_khz =
4009 data->mclk_latency_table.entries
4010 [data->mclk_latency_table.count].frequency =
4011 dep_table->entries[i].clk;
4012 clocks->data[clocks->num_levels].latency_in_us =
4013 data->mclk_latency_table.entries
4014 [data->mclk_latency_table.count].latency =
4015 vega10_get_mem_latency(hwmgr,
4016 dep_table->entries[i].clk);
4017 clocks->num_levels++;
4018 data->mclk_latency_table.count++;
4019 }
4020 }
4021}
4022
4023static void vega10_get_dcefclocks(struct pp_hwmgr *hwmgr,
4024 struct pp_clock_levels_with_latency *clocks)
4025{
4026 struct phm_ppt_v2_information *table_info =
4027 (struct phm_ppt_v2_information *)hwmgr->pptable;
4028 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table =
4029 table_info->vdd_dep_on_dcefclk;
4030 uint32_t i;
4031
4032 for (i = 0; i < dep_table->count; i++) {
4033 clocks->data[i].clocks_in_khz = dep_table->entries[i].clk;
4034 clocks->data[i].latency_in_us = 0;
4035 clocks->num_levels++;
4036 }
4037}
4038
4039static void vega10_get_socclocks(struct pp_hwmgr *hwmgr,
4040 struct pp_clock_levels_with_latency *clocks)
4041{
4042 struct phm_ppt_v2_information *table_info =
4043 (struct phm_ppt_v2_information *)hwmgr->pptable;
4044 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table =
4045 table_info->vdd_dep_on_socclk;
4046 uint32_t i;
4047
4048 for (i = 0; i < dep_table->count; i++) {
4049 clocks->data[i].clocks_in_khz = dep_table->entries[i].clk;
4050 clocks->data[i].latency_in_us = 0;
4051 clocks->num_levels++;
4052 }
4053}
4054
4055static int vega10_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr,
4056 enum amd_pp_clock_type type,
4057 struct pp_clock_levels_with_latency *clocks)
4058{
4059 switch (type) {
4060 case amd_pp_sys_clock:
4061 vega10_get_sclks(hwmgr, clocks);
4062 break;
4063 case amd_pp_mem_clock:
4064 vega10_get_memclocks(hwmgr, clocks);
4065 break;
4066 case amd_pp_dcef_clock:
4067 vega10_get_dcefclocks(hwmgr, clocks);
4068 break;
4069 case amd_pp_soc_clock:
4070 vega10_get_socclocks(hwmgr, clocks);
4071 break;
4072 default:
4073 return -1;
4074 }
4075
4076 return 0;
4077}
4078
4079static int vega10_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr,
4080 enum amd_pp_clock_type type,
4081 struct pp_clock_levels_with_voltage *clocks)
4082{
4083 struct phm_ppt_v2_information *table_info =
4084 (struct phm_ppt_v2_information *)hwmgr->pptable;
4085 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table;
4086 uint32_t i;
4087
4088 switch (type) {
4089 case amd_pp_mem_clock:
4090 dep_table = table_info->vdd_dep_on_mclk;
4091 break;
4092 case amd_pp_dcef_clock:
4093 dep_table = table_info->vdd_dep_on_dcefclk;
4094 break;
4095 case amd_pp_disp_clock:
4096 dep_table = table_info->vdd_dep_on_dispclk;
4097 break;
4098 case amd_pp_pixel_clock:
4099 dep_table = table_info->vdd_dep_on_pixclk;
4100 break;
4101 case amd_pp_phy_clock:
4102 dep_table = table_info->vdd_dep_on_phyclk;
4103 break;
4104 default:
4105 return -1;
4106 }
4107
4108 for (i = 0; i < dep_table->count; i++) {
4109 clocks->data[i].clocks_in_khz = dep_table->entries[i].clk;
4110 clocks->data[i].voltage_in_mv = (uint32_t)(table_info->vddc_lookup_table->
4111 entries[dep_table->entries[i].vddInd].us_vdd);
4112 clocks->num_levels++;
4113 }
4114
4115 if (i < dep_table->count)
4116 return -1;
4117
4118 return 0;
4119}
4120
4121static int vega10_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
4122 struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges)
4123{
4124 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
4125 Watermarks_t *table = &(data->smc_state_table.water_marks_table);
4126 int result = 0;
4127 uint32_t i;
4128
4129 if (!data->registry_data.disable_water_mark) {
4130 for (i = 0; i < wm_with_clock_ranges->num_wm_sets_dmif; i++) {
4131 table->WatermarkRow[WM_DCEFCLK][i].MinClock =
4132 cpu_to_le16((uint16_t)
4133 (wm_with_clock_ranges->wm_sets_dmif[i].wm_min_dcefclk_in_khz) /
4134 100);
4135 table->WatermarkRow[WM_DCEFCLK][i].MaxClock =
4136 cpu_to_le16((uint16_t)
4137 (wm_with_clock_ranges->wm_sets_dmif[i].wm_max_dcefclk_in_khz) /
4138 100);
4139 table->WatermarkRow[WM_DCEFCLK][i].MinUclk =
4140 cpu_to_le16((uint16_t)
4141 (wm_with_clock_ranges->wm_sets_dmif[i].wm_min_memclk_in_khz) /
4142 100);
4143 table->WatermarkRow[WM_DCEFCLK][i].MaxUclk =
4144 cpu_to_le16((uint16_t)
4145 (wm_with_clock_ranges->wm_sets_dmif[i].wm_max_memclk_in_khz) /
4146 100);
4147 table->WatermarkRow[WM_DCEFCLK][i].WmSetting = (uint8_t)
4148 wm_with_clock_ranges->wm_sets_dmif[i].wm_set_id;
4149 }
4150
4151 for (i = 0; i < wm_with_clock_ranges->num_wm_sets_mcif; i++) {
4152 table->WatermarkRow[WM_SOCCLK][i].MinClock =
4153 cpu_to_le16((uint16_t)
4154 (wm_with_clock_ranges->wm_sets_mcif[i].wm_min_socclk_in_khz) /
4155 100);
4156 table->WatermarkRow[WM_SOCCLK][i].MaxClock =
4157 cpu_to_le16((uint16_t)
4158 (wm_with_clock_ranges->wm_sets_mcif[i].wm_max_socclk_in_khz) /
4159 100);
4160 table->WatermarkRow[WM_SOCCLK][i].MinUclk =
4161 cpu_to_le16((uint16_t)
4162 (wm_with_clock_ranges->wm_sets_mcif[i].wm_min_memclk_in_khz) /
4163 100);
4164 table->WatermarkRow[WM_SOCCLK][i].MaxUclk =
4165 cpu_to_le16((uint16_t)
4166 (wm_with_clock_ranges->wm_sets_mcif[i].wm_max_memclk_in_khz) /
4167 100);
4168 table->WatermarkRow[WM_SOCCLK][i].WmSetting = (uint8_t)
4169 wm_with_clock_ranges->wm_sets_mcif[i].wm_set_id;
4170 }
4171 data->water_marks_bitmap = WaterMarksExist;
4172 }
4173
4174 return result;
4175}
4176
4177static int vega10_force_clock_level(struct pp_hwmgr *hwmgr,
4178 enum pp_clock_type type, uint32_t mask)
4179{
4180 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
4181 uint32_t i;
4182
4183 if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
4184 return -EINVAL;
4185
4186 switch (type) {
4187 case PP_SCLK:
4188 if (data->registry_data.sclk_dpm_key_disabled)
4189 break;
4190
4191 for (i = 0; i < 32; i++) {
4192 if (mask & (1 << i))
4193 break;
4194 }
4195
4196 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter(
4197 hwmgr->smumgr,
4198 PPSMC_MSG_SetSoftMinGfxclkByIndex,
4199 i),
4200 "Failed to set soft min sclk index!",
4201 return -1);
4202 break;
4203
4204 case PP_MCLK:
4205 if (data->registry_data.mclk_dpm_key_disabled)
4206 break;
4207
4208 for (i = 0; i < 32; i++) {
4209 if (mask & (1 << i))
4210 break;
4211 }
4212
4213 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter(
4214 hwmgr->smumgr,
4215 PPSMC_MSG_SetSoftMinUclkByIndex,
4216 i),
4217 "Failed to set soft min mclk index!",
4218 return -1);
4219 break;
4220
4221 case PP_PCIE:
4222 if (data->registry_data.pcie_dpm_key_disabled)
4223 break;
4224
4225 for (i = 0; i < 32; i++) {
4226 if (mask & (1 << i))
4227 break;
4228 }
4229
4230 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter(
4231 hwmgr->smumgr,
4232 PPSMC_MSG_SetMinLinkDpmByIndex,
4233 i),
4234 "Failed to set min pcie index!",
4235 return -1);
4236 break;
4237 default:
4238 break;
4239 }
4240
4241 return 0;
4242}
4243
4244static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
4245 enum pp_clock_type type, char *buf)
4246{
4247 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
4248 struct vega10_single_dpm_table *sclk_table = &(data->dpm_table.gfx_table);
4249 struct vega10_single_dpm_table *mclk_table = &(data->dpm_table.mem_table);
4250 struct vega10_pcie_table *pcie_table = &(data->dpm_table.pcie_table);
4251 int i, now, size = 0;
4252
4253 switch (type) {
4254 case PP_SCLK:
4255 if (data->registry_data.sclk_dpm_key_disabled)
4256 break;
4257
4258 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr->smumgr,
4259 PPSMC_MSG_GetCurrentGfxclkIndex),
4260 "Attempt to get current sclk index Failed!",
4261 return -1);
4262 PP_ASSERT_WITH_CODE(!vega10_read_arg_from_smc(hwmgr->smumgr,
4263 &now),
4264 "Attempt to read sclk index Failed!",
4265 return -1);
4266
4267 for (i = 0; i < sclk_table->count; i++)
4268 size += sprintf(buf + size, "%d: %uMhz %s\n",
4269 i, sclk_table->dpm_levels[i].value / 100,
4270 (i == now) ? "*" : "");
4271 break;
4272 case PP_MCLK:
4273 if (data->registry_data.mclk_dpm_key_disabled)
4274 break;
4275
4276 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr->smumgr,
4277 PPSMC_MSG_GetCurrentUclkIndex),
4278 "Attempt to get current mclk index Failed!",
4279 return -1);
4280 PP_ASSERT_WITH_CODE(!vega10_read_arg_from_smc(hwmgr->smumgr,
4281 &now),
4282 "Attempt to read mclk index Failed!",
4283 return -1);
4284
4285 for (i = 0; i < mclk_table->count; i++)
4286 size += sprintf(buf + size, "%d: %uMhz %s\n",
4287 i, mclk_table->dpm_levels[i].value / 100,
4288 (i == now) ? "*" : "");
4289 break;
4290 case PP_PCIE:
4291 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr->smumgr,
4292 PPSMC_MSG_GetCurrentLinkIndex),
4293 "Attempt to get current mclk index Failed!",
4294 return -1);
4295 PP_ASSERT_WITH_CODE(!vega10_read_arg_from_smc(hwmgr->smumgr,
4296 &now),
4297 "Attempt to read mclk index Failed!",
4298 return -1);
4299
4300 for (i = 0; i < pcie_table->count; i++)
4301 size += sprintf(buf + size, "%d: %s %s\n", i,
4302 (pcie_table->pcie_gen[i] == 0) ? "2.5GB, x1" :
4303 (pcie_table->pcie_gen[i] == 1) ? "5.0GB, x16" :
4304 (pcie_table->pcie_gen[i] == 2) ? "8.0GB, x16" : "",
4305 (i == now) ? "*" : "");
4306 break;
4307 default:
4308 break;
4309 }
4310 return size;
4311}
4312
4313static int vega10_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
4314{
4315 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
4316 int result = 0;
4317 uint32_t num_turned_on_displays = 1;
4318 Watermarks_t *wm_table = &(data->smc_state_table.water_marks_table);
4319 struct cgs_display_info info = {0};
4320
4321 if ((data->water_marks_bitmap & WaterMarksExist) &&
4322 !(data->water_marks_bitmap & WaterMarksLoaded)) {
4323 result = vega10_copy_table_to_smc(hwmgr->smumgr,
4324 (uint8_t *)wm_table, WMTABLE);
4325 PP_ASSERT_WITH_CODE(result, "Failed to update WMTABLE!", return EINVAL);
4326 data->water_marks_bitmap |= WaterMarksLoaded;
4327 }
4328
4329 if (data->water_marks_bitmap & WaterMarksLoaded) {
4330 cgs_get_active_displays_info(hwmgr->device, &info);
4331 num_turned_on_displays = info.display_count;
4332 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
4333 PPSMC_MSG_NumOfDisplays, num_turned_on_displays);
4334 }
4335
4336 return result;
4337}
4338
4339int vega10_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable)
4340{
4341 struct vega10_hwmgr *data =
4342 (struct vega10_hwmgr *)(hwmgr->backend);
4343
4344 if (data->smu_features[GNLD_DPM_UVD].supported) {
4345 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
4346 enable,
4347 data->smu_features[GNLD_DPM_UVD].smu_feature_bitmap),
4348 "Attempt to Enable/Disable DPM UVD Failed!",
4349 return -1);
4350 data->smu_features[GNLD_DPM_UVD].enabled = enable;
4351 }
4352 return 0;
4353}
4354
4355static int vega10_power_gate_vce(struct pp_hwmgr *hwmgr, bool bgate)
4356{
4357 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
4358
4359 data->vce_power_gated = bgate;
4360 return vega10_enable_disable_vce_dpm(hwmgr, !bgate);
4361}
4362
4363static int vega10_power_gate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
4364{
4365 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
4366
4367 data->uvd_power_gated = bgate;
4368 return vega10_enable_disable_uvd_dpm(hwmgr, !bgate);
4369}
4370
4371static inline bool vega10_are_power_levels_equal(
4372 const struct vega10_performance_level *pl1,
4373 const struct vega10_performance_level *pl2)
4374{
4375 return ((pl1->soc_clock == pl2->soc_clock) &&
4376 (pl1->gfx_clock == pl2->gfx_clock) &&
4377 (pl1->mem_clock == pl2->mem_clock));
4378}
4379
4380static int vega10_check_states_equal(struct pp_hwmgr *hwmgr,
4381 const struct pp_hw_power_state *pstate1,
4382 const struct pp_hw_power_state *pstate2, bool *equal)
4383{
4384 const struct vega10_power_state *psa;
4385 const struct vega10_power_state *psb;
4386 int i;
4387
4388 if (pstate1 == NULL || pstate2 == NULL || equal == NULL)
4389 return -EINVAL;
4390
4391 psa = cast_const_phw_vega10_power_state(pstate1);
4392 psb = cast_const_phw_vega10_power_state(pstate2);
4393 /* If the two states don't even have the same number of performance levels they cannot be the same state. */
4394 if (psa->performance_level_count != psb->performance_level_count) {
4395 *equal = false;
4396 return 0;
4397 }
4398
4399 for (i = 0; i < psa->performance_level_count; i++) {
4400 if (!vega10_are_power_levels_equal(&(psa->performance_levels[i]), &(psb->performance_levels[i]))) {
4401 /* If we have found even one performance level pair that is different the states are different. */
4402 *equal = false;
4403 return 0;
4404 }
4405 }
4406
4407 /* If all performance levels are the same try to use the UVD clocks to break the tie.*/
4408 *equal = ((psa->uvd_clks.vclk == psb->uvd_clks.vclk) && (psa->uvd_clks.dclk == psb->uvd_clks.dclk));
4409 *equal &= ((psa->vce_clks.evclk == psb->vce_clks.evclk) && (psa->vce_clks.ecclk == psb->vce_clks.ecclk));
4410 *equal &= (psa->sclk_threshold == psb->sclk_threshold);
4411
4412 return 0;
4413}
4414
4415static bool
4416vega10_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
4417{
4418 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
4419 bool is_update_required = false;
4420 struct cgs_display_info info = {0, 0, NULL};
4421
4422 cgs_get_active_displays_info(hwmgr->device, &info);
4423
4424 if (data->display_timing.num_existing_displays != info.display_count)
4425 is_update_required = true;
4426
4427 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) {
4428 if (data->display_timing.min_clock_in_sr != hwmgr->display_config.min_core_set_clock_in_sr)
4429 is_update_required = true;
4430 }
4431
4432 return is_update_required;
4433}
4434
8b9242ed
RZ
4435static int vega10_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
4436{
4437 int tmp_result, result = 0;
4438
4439 tmp_result = (vega10_is_dpm_running(hwmgr)) ? 0 : -1;
4440 PP_ASSERT_WITH_CODE(tmp_result == 0,
4441 "DPM is not running right now, no need to disable DPM!",
4442 return 0);
4443
4444 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
4445 PHM_PlatformCaps_ThermalController))
4446 vega10_disable_thermal_protection(hwmgr);
4447
4448 tmp_result = vega10_disable_power_containment(hwmgr);
4449 PP_ASSERT_WITH_CODE((tmp_result == 0),
4450 "Failed to disable power containment!", result = tmp_result);
4451
4452 tmp_result = vega10_avfs_enable(hwmgr, false);
4453 PP_ASSERT_WITH_CODE((tmp_result == 0),
4454 "Failed to disable AVFS!", result = tmp_result);
4455
4456 tmp_result = vega10_stop_dpm(hwmgr, SMC_DPM_FEATURES);
4457 PP_ASSERT_WITH_CODE((tmp_result == 0),
4458 "Failed to stop DPM!", result = tmp_result);
4459
4460 return result;
4461}
4462
4463static int vega10_power_off_asic(struct pp_hwmgr *hwmgr)
4464{
4465 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
4466 int result;
4467
4468 result = vega10_disable_dpm_tasks(hwmgr);
4469 PP_ASSERT_WITH_CODE((0 == result),
4470 "[disable_dpm_tasks] Failed to disable DPM!",
4471 );
4472 data->water_marks_bitmap &= ~(WaterMarksLoaded);
4473
4474 return result;
4475}
4476
4477
f83a9991
EH
4478static const struct pp_hwmgr_func vega10_hwmgr_funcs = {
4479 .backend_init = vega10_hwmgr_backend_init,
4480 .backend_fini = vega10_hwmgr_backend_fini,
4481 .asic_setup = vega10_setup_asic_task,
4482 .dynamic_state_management_enable = vega10_enable_dpm_tasks,
8b9242ed 4483 .dynamic_state_management_disable = vega10_disable_dpm_tasks,
f83a9991
EH
4484 .get_num_of_pp_table_entries =
4485 vega10_get_number_of_powerplay_table_entries,
4486 .get_power_state_size = vega10_get_power_state_size,
4487 .get_pp_table_entry = vega10_get_pp_table_entry,
4488 .patch_boot_state = vega10_patch_boot_state,
4489 .apply_state_adjust_rules = vega10_apply_state_adjust_rules,
4490 .power_state_set = vega10_set_power_state_tasks,
4491 .get_sclk = vega10_dpm_get_sclk,
4492 .get_mclk = vega10_dpm_get_mclk,
4493 .notify_smc_display_config_after_ps_adjustment =
4494 vega10_notify_smc_display_config_after_ps_adjustment,
4495 .force_dpm_level = vega10_dpm_force_dpm_level,
4496 .get_temperature = vega10_thermal_get_temperature,
4497 .stop_thermal_controller = vega10_thermal_stop_thermal_controller,
4498 .get_fan_speed_info = vega10_fan_ctrl_get_fan_speed_info,
4499 .get_fan_speed_percent = vega10_fan_ctrl_get_fan_speed_percent,
4500 .set_fan_speed_percent = vega10_fan_ctrl_set_fan_speed_percent,
4501 .reset_fan_speed_to_default =
4502 vega10_fan_ctrl_reset_fan_speed_to_default,
4503 .get_fan_speed_rpm = vega10_fan_ctrl_get_fan_speed_rpm,
4504 .set_fan_speed_rpm = vega10_fan_ctrl_set_fan_speed_rpm,
4505 .uninitialize_thermal_controller =
4506 vega10_thermal_ctrl_uninitialize_thermal_controller,
4507 .set_fan_control_mode = vega10_set_fan_control_mode,
4508 .get_fan_control_mode = vega10_get_fan_control_mode,
4509 .read_sensor = vega10_read_sensor,
4510 .get_dal_power_level = vega10_get_dal_power_level,
4511 .get_clock_by_type_with_latency = vega10_get_clock_by_type_with_latency,
4512 .get_clock_by_type_with_voltage = vega10_get_clock_by_type_with_voltage,
4513 .set_watermarks_for_clocks_ranges = vega10_set_watermarks_for_clocks_ranges,
4514 .display_clock_voltage_request = vega10_display_clock_voltage_request,
4515 .force_clock_level = vega10_force_clock_level,
4516 .print_clock_levels = vega10_print_clock_levels,
4517 .display_config_changed = vega10_display_configuration_changed_task,
4518 .powergate_uvd = vega10_power_gate_uvd,
4519 .powergate_vce = vega10_power_gate_vce,
4520 .check_states_equal = vega10_check_states_equal,
4521 .check_smc_update_required_for_display_configuration =
4522 vega10_check_smc_update_required_for_display_configuration,
8b9242ed
RZ
4523 .power_off_asic = vega10_power_off_asic,
4524 .disable_smc_firmware_ctf = vega10_thermal_disable_alert,
f83a9991
EH
4525};
4526
4527int vega10_hwmgr_init(struct pp_hwmgr *hwmgr)
4528{
4529 hwmgr->hwmgr_func = &vega10_hwmgr_funcs;
4530 hwmgr->pptable_func = &vega10_pptable_funcs;
4531 pp_vega10_thermal_initialize(hwmgr);
4532 return 0;
4533}