]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
drm/amd/powerplay: complete disable_smc_firmware_ctf_tasks.
[mirror_ubuntu-artful-kernel.git] / drivers / gpu / drm / amd / powerplay / hwmgr / vega10_hwmgr.c
CommitLineData
f83a9991
EH
1/*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#include <linux/module.h>
24#include <linux/slab.h>
25#include <linux/fb.h>
26#include "linux/delay.h"
27
28#include "hwmgr.h"
29#include "amd_powerplay.h"
30#include "vega10_smumgr.h"
31#include "hardwaremanager.h"
32#include "ppatomfwctrl.h"
33#include "atomfirmware.h"
34#include "cgs_common.h"
35#include "vega10_powertune.h"
36#include "smu9.h"
37#include "smu9_driver_if.h"
38#include "vega10_inc.h"
39#include "pp_soc15.h"
40#include "pppcielanes.h"
41#include "vega10_hwmgr.h"
42#include "vega10_processpptables.h"
43#include "vega10_pptable.h"
44#include "vega10_thermal.h"
45#include "pp_debug.h"
46#include "pp_acpi.h"
47#include "amd_pcie_helpers.h"
48#include "cgs_linux.h"
49#include "ppinterrupt.h"
50
51
52#define VOLTAGE_SCALE 4
53#define VOLTAGE_VID_OFFSET_SCALE1 625
54#define VOLTAGE_VID_OFFSET_SCALE2 100
55
56#define HBM_MEMORY_CHANNEL_WIDTH 128
57
58uint32_t channel_number[] = {1, 2, 0, 4, 0, 8, 0, 16, 2};
59
60#define MEM_FREQ_LOW_LATENCY 25000
61#define MEM_FREQ_HIGH_LATENCY 80000
62#define MEM_LATENCY_HIGH 245
63#define MEM_LATENCY_LOW 35
64#define MEM_LATENCY_ERR 0xFFFF
65
66#define mmDF_CS_AON0_DramBaseAddress0 0x0044
67#define mmDF_CS_AON0_DramBaseAddress0_BASE_IDX 0
68
69//DF_CS_AON0_DramBaseAddress0
70#define DF_CS_AON0_DramBaseAddress0__AddrRngVal__SHIFT 0x0
71#define DF_CS_AON0_DramBaseAddress0__LgcyMmioHoleEn__SHIFT 0x1
72#define DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT 0x4
73#define DF_CS_AON0_DramBaseAddress0__IntLvAddrSel__SHIFT 0x8
74#define DF_CS_AON0_DramBaseAddress0__DramBaseAddr__SHIFT 0xc
75#define DF_CS_AON0_DramBaseAddress0__AddrRngVal_MASK 0x00000001L
76#define DF_CS_AON0_DramBaseAddress0__LgcyMmioHoleEn_MASK 0x00000002L
77#define DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK 0x000000F0L
78#define DF_CS_AON0_DramBaseAddress0__IntLvAddrSel_MASK 0x00000700L
79#define DF_CS_AON0_DramBaseAddress0__DramBaseAddr_MASK 0xFFFFF000L
80
81const ULONG PhwVega10_Magic = (ULONG)(PHM_VIslands_Magic);
82
83struct vega10_power_state *cast_phw_vega10_power_state(
84 struct pp_hw_power_state *hw_ps)
85{
86 PP_ASSERT_WITH_CODE((PhwVega10_Magic == hw_ps->magic),
87 "Invalid Powerstate Type!",
88 return NULL;);
89
90 return (struct vega10_power_state *)hw_ps;
91}
92
93const struct vega10_power_state *cast_const_phw_vega10_power_state(
94 const struct pp_hw_power_state *hw_ps)
95{
96 PP_ASSERT_WITH_CODE((PhwVega10_Magic == hw_ps->magic),
97 "Invalid Powerstate Type!",
98 return NULL;);
99
100 return (const struct vega10_power_state *)hw_ps;
101}
102
103static void vega10_set_default_registry_data(struct pp_hwmgr *hwmgr)
104{
105 struct vega10_hwmgr *data =
106 (struct vega10_hwmgr *)(hwmgr->backend);
107
108 data->registry_data.sclk_dpm_key_disabled =
109 hwmgr->feature_mask & PP_SCLK_DPM_MASK ? false : true;
110 data->registry_data.socclk_dpm_key_disabled =
111 hwmgr->feature_mask & PP_SOCCLK_DPM_MASK ? false : true;
112 data->registry_data.mclk_dpm_key_disabled =
113 hwmgr->feature_mask & PP_MCLK_DPM_MASK ? false : true;
2d5f5f94
RZ
114 data->registry_data.pcie_dpm_key_disabled =
115 hwmgr->feature_mask & PP_PCIE_DPM_MASK ? false : true;
f83a9991
EH
116
117 data->registry_data.dcefclk_dpm_key_disabled =
118 hwmgr->feature_mask & PP_DCEFCLK_DPM_MASK ? false : true;
119
120 if (hwmgr->feature_mask & PP_POWER_CONTAINMENT_MASK) {
121 data->registry_data.power_containment_support = 1;
122 data->registry_data.enable_pkg_pwr_tracking_feature = 1;
123 data->registry_data.enable_tdc_limit_feature = 1;
124 }
125
afc0255c 126 data->registry_data.clock_stretcher_support =
97782cc9 127 hwmgr->feature_mask & PP_CLOCK_STRETCH_MASK ? false : true;
afc0255c 128
f83a9991
EH
129 data->registry_data.disable_water_mark = 0;
130
131 data->registry_data.fan_control_support = 1;
132 data->registry_data.thermal_support = 1;
133 data->registry_data.fw_ctf_enabled = 1;
134
135 data->registry_data.avfs_support = 1;
136 data->registry_data.led_dpm_enabled = 1;
137
138 data->registry_data.vr0hot_enabled = 1;
139 data->registry_data.vr1hot_enabled = 1;
140 data->registry_data.regulator_hot_gpio_support = 1;
141
142 data->display_voltage_mode = PPVEGA10_VEGA10DISPLAYVOLTAGEMODE_DFLT;
143 data->dcef_clk_quad_eqn_a = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
144 data->dcef_clk_quad_eqn_b = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
145 data->dcef_clk_quad_eqn_c = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
146 data->disp_clk_quad_eqn_a = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
147 data->disp_clk_quad_eqn_b = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
148 data->disp_clk_quad_eqn_c = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
149 data->pixel_clk_quad_eqn_a = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
150 data->pixel_clk_quad_eqn_b = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
151 data->pixel_clk_quad_eqn_c = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
152 data->phy_clk_quad_eqn_a = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
153 data->phy_clk_quad_eqn_b = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
154 data->phy_clk_quad_eqn_c = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
155
156 data->gfxclk_average_alpha = PPVEGA10_VEGA10GFXCLKAVERAGEALPHA_DFLT;
157 data->socclk_average_alpha = PPVEGA10_VEGA10SOCCLKAVERAGEALPHA_DFLT;
158 data->uclk_average_alpha = PPVEGA10_VEGA10UCLKCLKAVERAGEALPHA_DFLT;
159 data->gfx_activity_average_alpha = PPVEGA10_VEGA10GFXACTIVITYAVERAGEALPHA_DFLT;
160}
161
162static int vega10_set_features_platform_caps(struct pp_hwmgr *hwmgr)
163{
164 struct vega10_hwmgr *data =
165 (struct vega10_hwmgr *)(hwmgr->backend);
166 struct phm_ppt_v2_information *table_info =
167 (struct phm_ppt_v2_information *)hwmgr->pptable;
168 struct cgs_system_info sys_info = {0};
169 int result;
170
171 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
172 PHM_PlatformCaps_SclkDeepSleep);
173
174 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
175 PHM_PlatformCaps_DynamicPatchPowerState);
176
177 if (data->vddci_control == VEGA10_VOLTAGE_CONTROL_NONE)
178 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
179 PHM_PlatformCaps_ControlVDDCI);
180
181 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
182 PHM_PlatformCaps_TablelessHardwareInterface);
183
184 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
185 PHM_PlatformCaps_EnableSMU7ThermalManagement);
186
187 sys_info.size = sizeof(struct cgs_system_info);
188 sys_info.info_id = CGS_SYSTEM_INFO_PG_FLAGS;
189 result = cgs_query_system_info(hwmgr->device, &sys_info);
190
191 if (!result && (sys_info.value & AMD_PG_SUPPORT_UVD))
192 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
193 PHM_PlatformCaps_UVDPowerGating);
194
195 if (!result && (sys_info.value & AMD_PG_SUPPORT_VCE))
196 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
197 PHM_PlatformCaps_VCEPowerGating);
198
199 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
200 PHM_PlatformCaps_UnTabledHardwareInterface);
201
202 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
203 PHM_PlatformCaps_FanSpeedInTableIsRPM);
204
205 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
206 PHM_PlatformCaps_ODFuzzyFanControlSupport);
207
208 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
209 PHM_PlatformCaps_DynamicPowerManagement);
210
211 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
212 PHM_PlatformCaps_SMC);
213
214 /* power tune caps */
215 /* assume disabled */
216 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
217 PHM_PlatformCaps_PowerContainment);
218 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
219 PHM_PlatformCaps_SQRamping);
220 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
221 PHM_PlatformCaps_DBRamping);
222 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
223 PHM_PlatformCaps_TDRamping);
224 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
225 PHM_PlatformCaps_TCPRamping);
226
227 if (data->registry_data.power_containment_support)
228 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
229 PHM_PlatformCaps_PowerContainment);
230 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
231 PHM_PlatformCaps_CAC);
232
233 if (table_info->tdp_table->usClockStretchAmount &&
234 data->registry_data.clock_stretcher_support)
235 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
236 PHM_PlatformCaps_ClockStretcher);
237
238 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
239 PHM_PlatformCaps_RegulatorHot);
240 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
241 PHM_PlatformCaps_AutomaticDCTransition);
242
243 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
244 PHM_PlatformCaps_UVDDPM);
245 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
246 PHM_PlatformCaps_VCEDPM);
247
248 return 0;
249}
250
251static void vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr)
252{
253 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
254 int i;
255
256 vega10_initialize_power_tune_defaults(hwmgr);
257
258 for (i = 0; i < GNLD_FEATURES_MAX; i++) {
259 data->smu_features[i].smu_feature_id = 0xffff;
260 data->smu_features[i].smu_feature_bitmap = 1 << i;
261 data->smu_features[i].enabled = false;
262 data->smu_features[i].supported = false;
263 }
264
265 data->smu_features[GNLD_DPM_PREFETCHER].smu_feature_id =
266 FEATURE_DPM_PREFETCHER_BIT;
267 data->smu_features[GNLD_DPM_GFXCLK].smu_feature_id =
268 FEATURE_DPM_GFXCLK_BIT;
269 data->smu_features[GNLD_DPM_UCLK].smu_feature_id =
270 FEATURE_DPM_UCLK_BIT;
271 data->smu_features[GNLD_DPM_SOCCLK].smu_feature_id =
272 FEATURE_DPM_SOCCLK_BIT;
273 data->smu_features[GNLD_DPM_UVD].smu_feature_id =
274 FEATURE_DPM_UVD_BIT;
275 data->smu_features[GNLD_DPM_VCE].smu_feature_id =
276 FEATURE_DPM_VCE_BIT;
277 data->smu_features[GNLD_DPM_MP0CLK].smu_feature_id =
278 FEATURE_DPM_MP0CLK_BIT;
279 data->smu_features[GNLD_DPM_LINK].smu_feature_id =
280 FEATURE_DPM_LINK_BIT;
281 data->smu_features[GNLD_DPM_DCEFCLK].smu_feature_id =
282 FEATURE_DPM_DCEFCLK_BIT;
283 data->smu_features[GNLD_ULV].smu_feature_id =
284 FEATURE_ULV_BIT;
285 data->smu_features[GNLD_AVFS].smu_feature_id =
286 FEATURE_AVFS_BIT;
287 data->smu_features[GNLD_DS_GFXCLK].smu_feature_id =
288 FEATURE_DS_GFXCLK_BIT;
289 data->smu_features[GNLD_DS_SOCCLK].smu_feature_id =
290 FEATURE_DS_SOCCLK_BIT;
291 data->smu_features[GNLD_DS_LCLK].smu_feature_id =
292 FEATURE_DS_LCLK_BIT;
293 data->smu_features[GNLD_PPT].smu_feature_id =
294 FEATURE_PPT_BIT;
295 data->smu_features[GNLD_TDC].smu_feature_id =
296 FEATURE_TDC_BIT;
297 data->smu_features[GNLD_THERMAL].smu_feature_id =
298 FEATURE_THERMAL_BIT;
299 data->smu_features[GNLD_GFX_PER_CU_CG].smu_feature_id =
300 FEATURE_GFX_PER_CU_CG_BIT;
301 data->smu_features[GNLD_RM].smu_feature_id =
302 FEATURE_RM_BIT;
303 data->smu_features[GNLD_DS_DCEFCLK].smu_feature_id =
304 FEATURE_DS_DCEFCLK_BIT;
305 data->smu_features[GNLD_ACDC].smu_feature_id =
306 FEATURE_ACDC_BIT;
307 data->smu_features[GNLD_VR0HOT].smu_feature_id =
308 FEATURE_VR0HOT_BIT;
309 data->smu_features[GNLD_VR1HOT].smu_feature_id =
310 FEATURE_VR1HOT_BIT;
311 data->smu_features[GNLD_FW_CTF].smu_feature_id =
312 FEATURE_FW_CTF_BIT;
313 data->smu_features[GNLD_LED_DISPLAY].smu_feature_id =
314 FEATURE_LED_DISPLAY_BIT;
315 data->smu_features[GNLD_FAN_CONTROL].smu_feature_id =
316 FEATURE_FAN_CONTROL_BIT;
317 data->smu_features[GNLD_VOLTAGE_CONTROLLER].smu_feature_id =
318 FEATURE_VOLTAGE_CONTROLLER_BIT;
319
320 if (!data->registry_data.prefetcher_dpm_key_disabled)
321 data->smu_features[GNLD_DPM_PREFETCHER].supported = true;
322
323 if (!data->registry_data.sclk_dpm_key_disabled)
324 data->smu_features[GNLD_DPM_GFXCLK].supported = true;
325
326 if (!data->registry_data.mclk_dpm_key_disabled)
327 data->smu_features[GNLD_DPM_UCLK].supported = true;
328
329 if (!data->registry_data.socclk_dpm_key_disabled)
330 data->smu_features[GNLD_DPM_SOCCLK].supported = true;
331
332 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
333 PHM_PlatformCaps_UVDDPM))
334 data->smu_features[GNLD_DPM_UVD].supported = true;
335
336 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
337 PHM_PlatformCaps_VCEDPM))
338 data->smu_features[GNLD_DPM_VCE].supported = true;
339
340 if (!data->registry_data.pcie_dpm_key_disabled)
341 data->smu_features[GNLD_DPM_LINK].supported = true;
342
343 if (!data->registry_data.dcefclk_dpm_key_disabled)
344 data->smu_features[GNLD_DPM_DCEFCLK].supported = true;
345
346 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
347 PHM_PlatformCaps_SclkDeepSleep) &&
348 data->registry_data.sclk_deep_sleep_support) {
349 data->smu_features[GNLD_DS_GFXCLK].supported = true;
350 data->smu_features[GNLD_DS_SOCCLK].supported = true;
351 data->smu_features[GNLD_DS_LCLK].supported = true;
352 }
353
354 if (data->registry_data.enable_pkg_pwr_tracking_feature)
355 data->smu_features[GNLD_PPT].supported = true;
356
357 if (data->registry_data.enable_tdc_limit_feature)
358 data->smu_features[GNLD_TDC].supported = true;
359
360 if (data->registry_data.thermal_support)
361 data->smu_features[GNLD_THERMAL].supported = true;
362
363 if (data->registry_data.fan_control_support)
364 data->smu_features[GNLD_FAN_CONTROL].supported = true;
365
366 if (data->registry_data.fw_ctf_enabled)
367 data->smu_features[GNLD_FW_CTF].supported = true;
368
369 if (data->registry_data.avfs_support)
370 data->smu_features[GNLD_AVFS].supported = true;
371
372 if (data->registry_data.led_dpm_enabled)
373 data->smu_features[GNLD_LED_DISPLAY].supported = true;
374
375 if (data->registry_data.vr1hot_enabled)
376 data->smu_features[GNLD_VR1HOT].supported = true;
377
378 if (data->registry_data.vr0hot_enabled)
379 data->smu_features[GNLD_VR0HOT].supported = true;
380
381}
382
383#ifdef PPLIB_VEGA10_EVV_SUPPORT
384static int vega10_get_socclk_for_voltage_evv(struct pp_hwmgr *hwmgr,
385 phm_ppt_v1_voltage_lookup_table *lookup_table,
386 uint16_t virtual_voltage_id, int32_t *socclk)
387{
388 uint8_t entry_id;
389 uint8_t voltage_id;
390 struct phm_ppt_v2_information *table_info =
391 (struct phm_ppt_v2_information *)(hwmgr->pptable);
392
393 PP_ASSERT_WITH_CODE(lookup_table->count != 0,
394 "Lookup table is empty",
395 return -EINVAL);
396
397 /* search for leakage voltage ID 0xff01 ~ 0xff08 and sclk */
398 for (entry_id = 0; entry_id < table_info->vdd_dep_on_sclk->count; entry_id++) {
399 voltage_id = table_info->vdd_dep_on_socclk->entries[entry_id].vddInd;
400 if (lookup_table->entries[voltage_id].us_vdd == virtual_voltage_id)
401 break;
402 }
403
404 PP_ASSERT_WITH_CODE(entry_id < table_info->vdd_dep_on_socclk->count,
405 "Can't find requested voltage id in vdd_dep_on_socclk table!",
406 return -EINVAL);
407
408 *socclk = table_info->vdd_dep_on_socclk->entries[entry_id].clk;
409
410 return 0;
411}
412
413#define ATOM_VIRTUAL_VOLTAGE_ID0 0xff01
414/**
415* Get Leakage VDDC based on leakage ID.
416*
417* @param hwmgr the address of the powerplay hardware manager.
418* @return always 0.
419*/
420static int vega10_get_evv_voltages(struct pp_hwmgr *hwmgr)
421{
422 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
423 uint16_t vv_id;
424 uint32_t vddc = 0;
425 uint16_t i, j;
426 uint32_t sclk = 0;
427 struct phm_ppt_v2_information *table_info =
428 (struct phm_ppt_v2_information *)hwmgr->pptable;
429 struct phm_ppt_v1_clock_voltage_dependency_table *socclk_table =
430 table_info->vdd_dep_on_socclk;
431 int result;
432
433 for (i = 0; i < VEGA10_MAX_LEAKAGE_COUNT; i++) {
434 vv_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
435
436 if (!vega10_get_socclk_for_voltage_evv(hwmgr,
437 table_info->vddc_lookup_table, vv_id, &sclk)) {
438 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
439 PHM_PlatformCaps_ClockStretcher)) {
440 for (j = 1; j < socclk_table->count; j++) {
441 if (socclk_table->entries[j].clk == sclk &&
442 socclk_table->entries[j].cks_enable == 0) {
443 sclk += 5000;
444 break;
445 }
446 }
447 }
448
449 PP_ASSERT_WITH_CODE(!atomctrl_get_voltage_evv_on_sclk_ai(hwmgr,
450 VOLTAGE_TYPE_VDDC, sclk, vv_id, &vddc),
451 "Error retrieving EVV voltage value!",
452 continue);
453
454
455 /* need to make sure vddc is less than 2v or else, it could burn the ASIC. */
456 PP_ASSERT_WITH_CODE((vddc < 2000 && vddc != 0),
457 "Invalid VDDC value", result = -EINVAL;);
458
459 /* the voltage should not be zero nor equal to leakage ID */
460 if (vddc != 0 && vddc != vv_id) {
461 data->vddc_leakage.actual_voltage[data->vddc_leakage.count] = (uint16_t)(vddc/100);
462 data->vddc_leakage.leakage_id[data->vddc_leakage.count] = vv_id;
463 data->vddc_leakage.count++;
464 }
465 }
466 }
467
468 return 0;
469}
470
471/**
472 * Change virtual leakage voltage to actual value.
473 *
474 * @param hwmgr the address of the powerplay hardware manager.
475 * @param pointer to changing voltage
476 * @param pointer to leakage table
477 */
478static void vega10_patch_with_vdd_leakage(struct pp_hwmgr *hwmgr,
479 uint16_t *voltage, struct vega10_leakage_voltage *leakage_table)
480{
481 uint32_t index;
482
483 /* search for leakage voltage ID 0xff01 ~ 0xff08 */
484 for (index = 0; index < leakage_table->count; index++) {
485 /* if this voltage matches a leakage voltage ID */
486 /* patch with actual leakage voltage */
487 if (leakage_table->leakage_id[index] == *voltage) {
488 *voltage = leakage_table->actual_voltage[index];
489 break;
490 }
491 }
492
493 if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0)
494 pr_info("Voltage value looks like a Leakage ID \
495 but it's not patched\n");
496}
497
498/**
499* Patch voltage lookup table by EVV leakages.
500*
501* @param hwmgr the address of the powerplay hardware manager.
502* @param pointer to voltage lookup table
503* @param pointer to leakage table
504* @return always 0
505*/
506static int vega10_patch_lookup_table_with_leakage(struct pp_hwmgr *hwmgr,
507 phm_ppt_v1_voltage_lookup_table *lookup_table,
508 struct vega10_leakage_voltage *leakage_table)
509{
510 uint32_t i;
511
512 for (i = 0; i < lookup_table->count; i++)
513 vega10_patch_with_vdd_leakage(hwmgr,
514 &lookup_table->entries[i].us_vdd, leakage_table);
515
516 return 0;
517}
518
519static int vega10_patch_clock_voltage_limits_with_vddc_leakage(
520 struct pp_hwmgr *hwmgr, struct vega10_leakage_voltage *leakage_table,
521 uint16_t *vddc)
522{
523 vega10_patch_with_vdd_leakage(hwmgr, (uint16_t *)vddc, leakage_table);
524
525 return 0;
526}
527#endif
528
529static int vega10_patch_voltage_dependency_tables_with_lookup_table(
530 struct pp_hwmgr *hwmgr)
531{
532 uint8_t entry_id;
533 uint8_t voltage_id;
534 struct phm_ppt_v2_information *table_info =
535 (struct phm_ppt_v2_information *)(hwmgr->pptable);
536 struct phm_ppt_v1_clock_voltage_dependency_table *socclk_table =
537 table_info->vdd_dep_on_socclk;
538 struct phm_ppt_v1_clock_voltage_dependency_table *gfxclk_table =
539 table_info->vdd_dep_on_sclk;
540 struct phm_ppt_v1_clock_voltage_dependency_table *dcefclk_table =
541 table_info->vdd_dep_on_dcefclk;
542 struct phm_ppt_v1_clock_voltage_dependency_table *pixclk_table =
543 table_info->vdd_dep_on_pixclk;
544 struct phm_ppt_v1_clock_voltage_dependency_table *dspclk_table =
545 table_info->vdd_dep_on_dispclk;
546 struct phm_ppt_v1_clock_voltage_dependency_table *phyclk_table =
547 table_info->vdd_dep_on_phyclk;
548 struct phm_ppt_v1_clock_voltage_dependency_table *mclk_table =
549 table_info->vdd_dep_on_mclk;
550 struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
551 table_info->mm_dep_table;
552
553 for (entry_id = 0; entry_id < socclk_table->count; entry_id++) {
554 voltage_id = socclk_table->entries[entry_id].vddInd;
555 socclk_table->entries[entry_id].vddc =
556 table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
557 }
558
559 for (entry_id = 0; entry_id < gfxclk_table->count; entry_id++) {
560 voltage_id = gfxclk_table->entries[entry_id].vddInd;
561 gfxclk_table->entries[entry_id].vddc =
562 table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
563 }
564
565 for (entry_id = 0; entry_id < dcefclk_table->count; entry_id++) {
566 voltage_id = dcefclk_table->entries[entry_id].vddInd;
567 dcefclk_table->entries[entry_id].vddc =
568 table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
569 }
570
571 for (entry_id = 0; entry_id < pixclk_table->count; entry_id++) {
572 voltage_id = pixclk_table->entries[entry_id].vddInd;
573 pixclk_table->entries[entry_id].vddc =
574 table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
575 }
576
577 for (entry_id = 0; entry_id < dspclk_table->count; entry_id++) {
578 voltage_id = dspclk_table->entries[entry_id].vddInd;
579 dspclk_table->entries[entry_id].vddc =
580 table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
581 }
582
583 for (entry_id = 0; entry_id < phyclk_table->count; entry_id++) {
584 voltage_id = phyclk_table->entries[entry_id].vddInd;
585 phyclk_table->entries[entry_id].vddc =
586 table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
587 }
588
589 for (entry_id = 0; entry_id < mclk_table->count; ++entry_id) {
590 voltage_id = mclk_table->entries[entry_id].vddInd;
591 mclk_table->entries[entry_id].vddc =
592 table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
593 voltage_id = mclk_table->entries[entry_id].vddciInd;
594 mclk_table->entries[entry_id].vddci =
595 table_info->vddci_lookup_table->entries[voltage_id].us_vdd;
596 voltage_id = mclk_table->entries[entry_id].mvddInd;
597 mclk_table->entries[entry_id].mvdd =
598 table_info->vddmem_lookup_table->entries[voltage_id].us_vdd;
599 }
600
601 for (entry_id = 0; entry_id < mm_table->count; ++entry_id) {
602 voltage_id = mm_table->entries[entry_id].vddcInd;
603 mm_table->entries[entry_id].vddc =
604 table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
605 }
606
607 return 0;
608
609}
610
611static int vega10_sort_lookup_table(struct pp_hwmgr *hwmgr,
612 struct phm_ppt_v1_voltage_lookup_table *lookup_table)
613{
614 uint32_t table_size, i, j;
615 struct phm_ppt_v1_voltage_lookup_record tmp_voltage_lookup_record;
616
617 PP_ASSERT_WITH_CODE(lookup_table && lookup_table->count,
618 "Lookup table is empty", return -EINVAL);
619
620 table_size = lookup_table->count;
621
622 /* Sorting voltages */
623 for (i = 0; i < table_size - 1; i++) {
624 for (j = i + 1; j > 0; j--) {
625 if (lookup_table->entries[j].us_vdd <
626 lookup_table->entries[j - 1].us_vdd) {
627 tmp_voltage_lookup_record = lookup_table->entries[j - 1];
628 lookup_table->entries[j - 1] = lookup_table->entries[j];
629 lookup_table->entries[j] = tmp_voltage_lookup_record;
630 }
631 }
632 }
633
634 return 0;
635}
636
637static int vega10_complete_dependency_tables(struct pp_hwmgr *hwmgr)
638{
639 int result = 0;
640 int tmp_result;
641 struct phm_ppt_v2_information *table_info =
642 (struct phm_ppt_v2_information *)(hwmgr->pptable);
643#ifdef PPLIB_VEGA10_EVV_SUPPORT
644 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
645
646 tmp_result = vega10_patch_lookup_table_with_leakage(hwmgr,
647 table_info->vddc_lookup_table, &(data->vddc_leakage));
648 if (tmp_result)
649 result = tmp_result;
650
651 tmp_result = vega10_patch_clock_voltage_limits_with_vddc_leakage(hwmgr,
652 &(data->vddc_leakage), &table_info->max_clock_voltage_on_dc.vddc);
653 if (tmp_result)
654 result = tmp_result;
655#endif
656
657 tmp_result = vega10_patch_voltage_dependency_tables_with_lookup_table(hwmgr);
658 if (tmp_result)
659 result = tmp_result;
660
661 tmp_result = vega10_sort_lookup_table(hwmgr, table_info->vddc_lookup_table);
662 if (tmp_result)
663 result = tmp_result;
664
665 return result;
666}
667
668static int vega10_set_private_data_based_on_pptable(struct pp_hwmgr *hwmgr)
669{
670 struct phm_ppt_v2_information *table_info =
671 (struct phm_ppt_v2_information *)(hwmgr->pptable);
672 struct phm_ppt_v1_clock_voltage_dependency_table *allowed_sclk_vdd_table =
673 table_info->vdd_dep_on_socclk;
674 struct phm_ppt_v1_clock_voltage_dependency_table *allowed_mclk_vdd_table =
675 table_info->vdd_dep_on_mclk;
676
677 PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table,
678 "VDD dependency on SCLK table is missing. \
679 This table is mandatory", return -EINVAL);
680 PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table->count >= 1,
681 "VDD dependency on SCLK table is empty. \
682 This table is mandatory", return -EINVAL);
683
684 PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table,
685 "VDD dependency on MCLK table is missing. \
686 This table is mandatory", return -EINVAL);
687 PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table->count >= 1,
688 "VDD dependency on MCLK table is empty. \
689 This table is mandatory", return -EINVAL);
690
691 table_info->max_clock_voltage_on_ac.sclk =
692 allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].clk;
693 table_info->max_clock_voltage_on_ac.mclk =
694 allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].clk;
695 table_info->max_clock_voltage_on_ac.vddc =
696 allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].vddc;
697 table_info->max_clock_voltage_on_ac.vddci =
698 allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].vddci;
699
700 hwmgr->dyn_state.max_clock_voltage_on_ac.sclk =
701 table_info->max_clock_voltage_on_ac.sclk;
702 hwmgr->dyn_state.max_clock_voltage_on_ac.mclk =
703 table_info->max_clock_voltage_on_ac.mclk;
704 hwmgr->dyn_state.max_clock_voltage_on_ac.vddc =
705 table_info->max_clock_voltage_on_ac.vddc;
706 hwmgr->dyn_state.max_clock_voltage_on_ac.vddci =
707 table_info->max_clock_voltage_on_ac.vddci;
708
709 return 0;
710}
711
712static int vega10_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
713{
714 kfree(hwmgr->dyn_state.vddc_dep_on_dal_pwrl);
715 hwmgr->dyn_state.vddc_dep_on_dal_pwrl = NULL;
716
717 kfree(hwmgr->backend);
718 hwmgr->backend = NULL;
719
720 return 0;
721}
722
723static int vega10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
724{
725 int result = 0;
726 struct vega10_hwmgr *data;
727 uint32_t config_telemetry = 0;
728 struct pp_atomfwctrl_voltage_table vol_table;
729 struct cgs_system_info sys_info = {0};
730
731 data = kzalloc(sizeof(struct vega10_hwmgr), GFP_KERNEL);
732 if (data == NULL)
733 return -ENOMEM;
734
735 hwmgr->backend = data;
736
737 vega10_set_default_registry_data(hwmgr);
738
739 data->disable_dpm_mask = 0xff;
740 data->workload_mask = 0xff;
741
742 /* need to set voltage control types before EVV patching */
743 data->vddc_control = VEGA10_VOLTAGE_CONTROL_NONE;
744 data->mvdd_control = VEGA10_VOLTAGE_CONTROL_NONE;
745 data->vddci_control = VEGA10_VOLTAGE_CONTROL_NONE;
746
747 /* VDDCR_SOC */
748 if (pp_atomfwctrl_is_voltage_controlled_by_gpio_v4(hwmgr,
749 VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2)) {
750 if (!pp_atomfwctrl_get_voltage_table_v4(hwmgr,
751 VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2,
752 &vol_table)) {
753 config_telemetry = ((vol_table.telemetry_slope << 8) & 0xff00) |
754 (vol_table.telemetry_offset & 0xff);
755 data->vddc_control = VEGA10_VOLTAGE_CONTROL_BY_SVID2;
756 }
757 } else {
758 kfree(hwmgr->backend);
759 hwmgr->backend = NULL;
760 PP_ASSERT_WITH_CODE(false,
761 "VDDCR_SOC is not SVID2!",
762 return -1);
763 }
764
765 /* MVDDC */
766 if (pp_atomfwctrl_is_voltage_controlled_by_gpio_v4(hwmgr,
767 VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2)) {
768 if (!pp_atomfwctrl_get_voltage_table_v4(hwmgr,
769 VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2,
770 &vol_table)) {
771 config_telemetry |=
772 ((vol_table.telemetry_slope << 24) & 0xff000000) |
773 ((vol_table.telemetry_offset << 16) & 0xff0000);
774 data->mvdd_control = VEGA10_VOLTAGE_CONTROL_BY_SVID2;
775 }
776 }
777
778 /* VDDCI_MEM */
779 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
780 PHM_PlatformCaps_ControlVDDCI)) {
781 if (pp_atomfwctrl_is_voltage_controlled_by_gpio_v4(hwmgr,
782 VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT))
783 data->vddci_control = VEGA10_VOLTAGE_CONTROL_BY_GPIO;
784 }
785
786 data->config_telemetry = config_telemetry;
787
788 vega10_set_features_platform_caps(hwmgr);
789
790 vega10_init_dpm_defaults(hwmgr);
791
792#ifdef PPLIB_VEGA10_EVV_SUPPORT
793 /* Get leakage voltage based on leakage ID. */
794 PP_ASSERT_WITH_CODE(!vega10_get_evv_voltages(hwmgr),
795 "Get EVV Voltage Failed. Abort Driver loading!",
796 return -1);
797#endif
798
799 /* Patch our voltage dependency table with actual leakage voltage
800 * We need to perform leakage translation before it's used by other functions
801 */
802 vega10_complete_dependency_tables(hwmgr);
803
804 /* Parse pptable data read from VBIOS */
805 vega10_set_private_data_based_on_pptable(hwmgr);
806
807 data->is_tlu_enabled = false;
808
809 hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
810 VEGA10_MAX_HARDWARE_POWERLEVELS;
811 hwmgr->platform_descriptor.hardwarePerformanceLevels = 2;
812 hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50;
813
814 hwmgr->platform_descriptor.vbiosInterruptId = 0x20000400; /* IRQ_SOURCE1_SW_INT */
815 /* The true clock step depends on the frequency, typically 4.5 or 9 MHz. Here we use 5. */
816 hwmgr->platform_descriptor.clockStep.engineClock = 500;
817 hwmgr->platform_descriptor.clockStep.memoryClock = 500;
818
819 sys_info.size = sizeof(struct cgs_system_info);
820 sys_info.info_id = CGS_SYSTEM_INFO_GFX_CU_INFO;
821 result = cgs_query_system_info(hwmgr->device, &sys_info);
822 data->total_active_cus = sys_info.value;
823 /* Setup default Overdrive Fan control settings */
824 data->odn_fan_table.target_fan_speed =
825 hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM;
826 data->odn_fan_table.target_temperature =
827 hwmgr->thermal_controller.
828 advanceFanControlParameters.ucTargetTemperature;
829 data->odn_fan_table.min_performance_clock =
830 hwmgr->thermal_controller.advanceFanControlParameters.
831 ulMinFanSCLKAcousticLimit;
832 data->odn_fan_table.min_fan_limit =
833 hwmgr->thermal_controller.
834 advanceFanControlParameters.usFanPWMMinLimit *
835 hwmgr->thermal_controller.fanInfo.ulMaxRPM / 100;
836
837 return result;
838}
839
840static int vega10_init_sclk_threshold(struct pp_hwmgr *hwmgr)
841{
842 struct vega10_hwmgr *data =
843 (struct vega10_hwmgr *)(hwmgr->backend);
844
845 data->low_sclk_interrupt_threshold = 0;
846
847 return 0;
848}
849
850static int vega10_setup_dpm_led_config(struct pp_hwmgr *hwmgr)
851{
852 struct vega10_hwmgr *data =
853 (struct vega10_hwmgr *)(hwmgr->backend);
854 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
855
856 struct pp_atomfwctrl_voltage_table table;
857 uint8_t i, j;
858 uint32_t mask = 0;
859 uint32_t tmp;
860 int32_t ret = 0;
861
862 ret = pp_atomfwctrl_get_voltage_table_v4(hwmgr, VOLTAGE_TYPE_LEDDPM,
863 VOLTAGE_OBJ_GPIO_LUT, &table);
864
865 if (!ret) {
866 tmp = table.mask_low;
867 for (i = 0, j = 0; i < 32; i++) {
868 if (tmp & 1) {
869 mask |= (uint32_t)(i << (8 * j));
870 if (++j >= 3)
871 break;
872 }
873 tmp >>= 1;
874 }
875 }
876
877 pp_table->LedPin0 = (uint8_t)(mask & 0xff);
878 pp_table->LedPin1 = (uint8_t)((mask >> 8) & 0xff);
879 pp_table->LedPin2 = (uint8_t)((mask >> 16) & 0xff);
880 return 0;
881}
882
883static int vega10_setup_asic_task(struct pp_hwmgr *hwmgr)
884{
885 PP_ASSERT_WITH_CODE(!vega10_init_sclk_threshold(hwmgr),
886 "Failed to init sclk threshold!",
887 return -EINVAL);
888
889 PP_ASSERT_WITH_CODE(!vega10_setup_dpm_led_config(hwmgr),
890 "Failed to set up led dpm config!",
891 return -EINVAL);
892
893 return 0;
894}
895
896static bool vega10_is_dpm_running(struct pp_hwmgr *hwmgr)
897{
898 uint32_t features_enabled;
899
900 if (!vega10_get_smc_features(hwmgr->smumgr, &features_enabled)) {
901 if (features_enabled & SMC_DPM_FEATURES)
902 return true;
903 }
904 return false;
905}
906
907/**
908* Remove repeated voltage values and create table with unique values.
909*
910* @param hwmgr the address of the powerplay hardware manager.
911* @param vol_table the pointer to changing voltage table
912* @return 0 in success
913*/
914
915static int vega10_trim_voltage_table(struct pp_hwmgr *hwmgr,
916 struct pp_atomfwctrl_voltage_table *vol_table)
917{
918 uint32_t i, j;
919 uint16_t vvalue;
920 bool found = false;
921 struct pp_atomfwctrl_voltage_table *table;
922
923 PP_ASSERT_WITH_CODE(vol_table,
924 "Voltage Table empty.", return -EINVAL);
925 table = kzalloc(sizeof(struct pp_atomfwctrl_voltage_table),
926 GFP_KERNEL);
927
928 if (!table)
929 return -ENOMEM;
930
931 table->mask_low = vol_table->mask_low;
932 table->phase_delay = vol_table->phase_delay;
933
934 for (i = 0; i < vol_table->count; i++) {
935 vvalue = vol_table->entries[i].value;
936 found = false;
937
938 for (j = 0; j < table->count; j++) {
939 if (vvalue == table->entries[j].value) {
940 found = true;
941 break;
942 }
943 }
944
945 if (!found) {
946 table->entries[table->count].value = vvalue;
947 table->entries[table->count].smio_low =
948 vol_table->entries[i].smio_low;
949 table->count++;
950 }
951 }
952
953 memcpy(vol_table, table, sizeof(struct pp_atomfwctrl_voltage_table));
954 kfree(table);
955
956 return 0;
957}
958
959static int vega10_get_mvdd_voltage_table(struct pp_hwmgr *hwmgr,
960 phm_ppt_v1_clock_voltage_dependency_table *dep_table,
961 struct pp_atomfwctrl_voltage_table *vol_table)
962{
963 int i;
964
965 PP_ASSERT_WITH_CODE(dep_table->count,
966 "Voltage Dependency Table empty.",
967 return -EINVAL);
968
969 vol_table->mask_low = 0;
970 vol_table->phase_delay = 0;
971 vol_table->count = dep_table->count;
972
973 for (i = 0; i < vol_table->count; i++) {
974 vol_table->entries[i].value = dep_table->entries[i].mvdd;
975 vol_table->entries[i].smio_low = 0;
976 }
977
978 PP_ASSERT_WITH_CODE(!vega10_trim_voltage_table(hwmgr,
979 vol_table),
980 "Failed to trim MVDD Table!",
981 return -1);
982
983 return 0;
984}
985
986static int vega10_get_vddci_voltage_table(struct pp_hwmgr *hwmgr,
987 phm_ppt_v1_clock_voltage_dependency_table *dep_table,
988 struct pp_atomfwctrl_voltage_table *vol_table)
989{
990 uint32_t i;
991
992 PP_ASSERT_WITH_CODE(dep_table->count,
993 "Voltage Dependency Table empty.",
994 return -EINVAL);
995
996 vol_table->mask_low = 0;
997 vol_table->phase_delay = 0;
998 vol_table->count = dep_table->count;
999
1000 for (i = 0; i < dep_table->count; i++) {
1001 vol_table->entries[i].value = dep_table->entries[i].vddci;
1002 vol_table->entries[i].smio_low = 0;
1003 }
1004
1005 PP_ASSERT_WITH_CODE(!vega10_trim_voltage_table(hwmgr, vol_table),
1006 "Failed to trim VDDCI table.",
1007 return -1);
1008
1009 return 0;
1010}
1011
1012static int vega10_get_vdd_voltage_table(struct pp_hwmgr *hwmgr,
1013 phm_ppt_v1_clock_voltage_dependency_table *dep_table,
1014 struct pp_atomfwctrl_voltage_table *vol_table)
1015{
1016 int i;
1017
1018 PP_ASSERT_WITH_CODE(dep_table->count,
1019 "Voltage Dependency Table empty.",
1020 return -EINVAL);
1021
1022 vol_table->mask_low = 0;
1023 vol_table->phase_delay = 0;
1024 vol_table->count = dep_table->count;
1025
1026 for (i = 0; i < vol_table->count; i++) {
1027 vol_table->entries[i].value = dep_table->entries[i].vddc;
1028 vol_table->entries[i].smio_low = 0;
1029 }
1030
1031 return 0;
1032}
1033
1034/* ---- Voltage Tables ----
1035 * If the voltage table would be bigger than
1036 * what will fit into the state table on
1037 * the SMC keep only the higher entries.
1038 */
1039static void vega10_trim_voltage_table_to_fit_state_table(
1040 struct pp_hwmgr *hwmgr,
1041 uint32_t max_vol_steps,
1042 struct pp_atomfwctrl_voltage_table *vol_table)
1043{
1044 unsigned int i, diff;
1045
1046 if (vol_table->count <= max_vol_steps)
1047 return;
1048
1049 diff = vol_table->count - max_vol_steps;
1050
1051 for (i = 0; i < max_vol_steps; i++)
1052 vol_table->entries[i] = vol_table->entries[i + diff];
1053
1054 vol_table->count = max_vol_steps;
1055}
1056
1057/**
1058* Create Voltage Tables.
1059*
1060* @param hwmgr the address of the powerplay hardware manager.
1061* @return always 0
1062*/
1063static int vega10_construct_voltage_tables(struct pp_hwmgr *hwmgr)
1064{
1065 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
1066 struct phm_ppt_v2_information *table_info =
1067 (struct phm_ppt_v2_information *)hwmgr->pptable;
1068 int result;
1069
1070 if (data->mvdd_control == VEGA10_VOLTAGE_CONTROL_BY_SVID2 ||
1071 data->mvdd_control == VEGA10_VOLTAGE_CONTROL_NONE) {
1072 result = vega10_get_mvdd_voltage_table(hwmgr,
1073 table_info->vdd_dep_on_mclk,
1074 &(data->mvdd_voltage_table));
1075 PP_ASSERT_WITH_CODE(!result,
1076 "Failed to retrieve MVDDC table!",
1077 return result);
1078 }
1079
1080 if (data->vddci_control == VEGA10_VOLTAGE_CONTROL_NONE) {
1081 result = vega10_get_vddci_voltage_table(hwmgr,
1082 table_info->vdd_dep_on_mclk,
1083 &(data->vddci_voltage_table));
1084 PP_ASSERT_WITH_CODE(!result,
1085 "Failed to retrieve VDDCI_MEM table!",
1086 return result);
1087 }
1088
1089 if (data->vddc_control == VEGA10_VOLTAGE_CONTROL_BY_SVID2 ||
1090 data->vddc_control == VEGA10_VOLTAGE_CONTROL_NONE) {
1091 result = vega10_get_vdd_voltage_table(hwmgr,
1092 table_info->vdd_dep_on_sclk,
1093 &(data->vddc_voltage_table));
1094 PP_ASSERT_WITH_CODE(!result,
1095 "Failed to retrieve VDDCR_SOC table!",
1096 return result);
1097 }
1098
1099 PP_ASSERT_WITH_CODE(data->vddc_voltage_table.count <= 16,
1100 "Too many voltage values for VDDC. Trimming to fit state table.",
1101 vega10_trim_voltage_table_to_fit_state_table(hwmgr,
1102 16, &(data->vddc_voltage_table)));
1103
1104 PP_ASSERT_WITH_CODE(data->vddci_voltage_table.count <= 16,
1105 "Too many voltage values for VDDCI. Trimming to fit state table.",
1106 vega10_trim_voltage_table_to_fit_state_table(hwmgr,
1107 16, &(data->vddci_voltage_table)));
1108
1109 PP_ASSERT_WITH_CODE(data->mvdd_voltage_table.count <= 16,
1110 "Too many voltage values for MVDD. Trimming to fit state table.",
1111 vega10_trim_voltage_table_to_fit_state_table(hwmgr,
1112 16, &(data->mvdd_voltage_table)));
1113
1114
1115 return 0;
1116}
1117
1118/*
1119 * @fn vega10_init_dpm_state
1120 * @brief Function to initialize all Soft Min/Max and Hard Min/Max to 0xff.
1121 *
1122 * @param dpm_state - the address of the DPM Table to initiailize.
1123 * @return None.
1124 */
1125static void vega10_init_dpm_state(struct vega10_dpm_state *dpm_state)
1126{
1127 dpm_state->soft_min_level = 0xff;
1128 dpm_state->soft_max_level = 0xff;
1129 dpm_state->hard_min_level = 0xff;
1130 dpm_state->hard_max_level = 0xff;
1131}
1132
1133static void vega10_setup_default_single_dpm_table(struct pp_hwmgr *hwmgr,
1134 struct vega10_single_dpm_table *dpm_table,
1135 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table)
1136{
1137 int i;
1138
1139 for (i = 0; i < dep_table->count; i++) {
1140 if (i == 0 || dpm_table->dpm_levels[dpm_table->count - 1].value !=
1141 dep_table->entries[i].clk) {
1142 dpm_table->dpm_levels[dpm_table->count].value =
1143 dep_table->entries[i].clk;
1144 dpm_table->dpm_levels[dpm_table->count].enabled = true;
1145 dpm_table->count++;
1146 }
1147 }
1148}
1149static int vega10_setup_default_pcie_table(struct pp_hwmgr *hwmgr)
1150{
1151 struct vega10_hwmgr *data =
1152 (struct vega10_hwmgr *)(hwmgr->backend);
1153 struct vega10_pcie_table *pcie_table = &(data->dpm_table.pcie_table);
1154 struct phm_ppt_v2_information *table_info =
1155 (struct phm_ppt_v2_information *)(hwmgr->pptable);
1156 struct phm_ppt_v1_pcie_table *bios_pcie_table =
1157 table_info->pcie_table;
1158 uint32_t i;
1159
1160 PP_ASSERT_WITH_CODE(bios_pcie_table->count,
1161 "Incorrect number of PCIE States from VBIOS!",
1162 return -1);
1163
1164 for (i = 0; i < NUM_LINK_LEVELS - 1; i++) {
1165 if (data->registry_data.pcieSpeedOverride)
1166 pcie_table->pcie_gen[i] =
1167 data->registry_data.pcieSpeedOverride;
1168 else
1169 pcie_table->pcie_gen[i] =
1170 bios_pcie_table->entries[i].gen_speed;
1171
1172 if (data->registry_data.pcieLaneOverride)
1173 pcie_table->pcie_lane[i] =
1174 data->registry_data.pcieLaneOverride;
1175 else
1176 pcie_table->pcie_lane[i] =
1177 bios_pcie_table->entries[i].lane_width;
1178
1179 if (data->registry_data.pcieClockOverride)
1180 pcie_table->lclk[i] =
1181 data->registry_data.pcieClockOverride;
1182 else
1183 pcie_table->lclk[i] =
1184 bios_pcie_table->entries[i].pcie_sclk;
f83a9991
EH
1185 }
1186
00c4855e 1187 pcie_table->count = NUM_LINK_LEVELS;
f83a9991
EH
1188
1189 return 0;
1190}
1191
1192/*
1193 * This function is to initialize all DPM state tables
1194 * for SMU based on the dependency table.
1195 * Dynamic state patching function will then trim these
1196 * state tables to the allowed range based
1197 * on the power policy or external client requests,
1198 * such as UVD request, etc.
1199 */
1200static int vega10_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
1201{
1202 struct vega10_hwmgr *data =
1203 (struct vega10_hwmgr *)(hwmgr->backend);
1204 struct phm_ppt_v2_information *table_info =
1205 (struct phm_ppt_v2_information *)(hwmgr->pptable);
1206 struct vega10_single_dpm_table *dpm_table;
1207 uint32_t i;
1208
1209 struct phm_ppt_v1_clock_voltage_dependency_table *dep_soc_table =
1210 table_info->vdd_dep_on_socclk;
1211 struct phm_ppt_v1_clock_voltage_dependency_table *dep_gfx_table =
1212 table_info->vdd_dep_on_sclk;
1213 struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table =
1214 table_info->vdd_dep_on_mclk;
1215 struct phm_ppt_v1_mm_clock_voltage_dependency_table *dep_mm_table =
1216 table_info->mm_dep_table;
1217 struct phm_ppt_v1_clock_voltage_dependency_table *dep_dcef_table =
1218 table_info->vdd_dep_on_dcefclk;
1219 struct phm_ppt_v1_clock_voltage_dependency_table *dep_pix_table =
1220 table_info->vdd_dep_on_pixclk;
1221 struct phm_ppt_v1_clock_voltage_dependency_table *dep_disp_table =
1222 table_info->vdd_dep_on_dispclk;
1223 struct phm_ppt_v1_clock_voltage_dependency_table *dep_phy_table =
1224 table_info->vdd_dep_on_phyclk;
1225
1226 PP_ASSERT_WITH_CODE(dep_soc_table,
1227 "SOCCLK dependency table is missing. This table is mandatory",
1228 return -EINVAL);
1229 PP_ASSERT_WITH_CODE(dep_soc_table->count >= 1,
1230 "SOCCLK dependency table is empty. This table is mandatory",
1231 return -EINVAL);
1232
1233 PP_ASSERT_WITH_CODE(dep_gfx_table,
1234 "GFXCLK dependency table is missing. This table is mandatory",
1235 return -EINVAL);
1236 PP_ASSERT_WITH_CODE(dep_gfx_table->count >= 1,
1237 "GFXCLK dependency table is empty. This table is mandatory",
1238 return -EINVAL);
1239
1240 PP_ASSERT_WITH_CODE(dep_mclk_table,
1241 "MCLK dependency table is missing. This table is mandatory",
1242 return -EINVAL);
1243 PP_ASSERT_WITH_CODE(dep_mclk_table->count >= 1,
1244 "MCLK dependency table has to have is missing. This table is mandatory",
1245 return -EINVAL);
1246
1247 /* Initialize Sclk DPM table based on allow Sclk values */
1248 data->dpm_table.soc_table.count = 0;
1249 data->dpm_table.gfx_table.count = 0;
1250 data->dpm_table.dcef_table.count = 0;
1251
1252 dpm_table = &(data->dpm_table.soc_table);
1253 vega10_setup_default_single_dpm_table(hwmgr,
1254 dpm_table,
1255 dep_soc_table);
1256
1257 vega10_init_dpm_state(&(dpm_table->dpm_state));
1258
1259 dpm_table = &(data->dpm_table.gfx_table);
1260 vega10_setup_default_single_dpm_table(hwmgr,
1261 dpm_table,
1262 dep_gfx_table);
1263 vega10_init_dpm_state(&(dpm_table->dpm_state));
1264
1265 /* Initialize Mclk DPM table based on allow Mclk values */
1266 data->dpm_table.mem_table.count = 0;
1267 dpm_table = &(data->dpm_table.mem_table);
1268 vega10_setup_default_single_dpm_table(hwmgr,
1269 dpm_table,
1270 dep_mclk_table);
1271 vega10_init_dpm_state(&(dpm_table->dpm_state));
1272
1273 data->dpm_table.eclk_table.count = 0;
1274 dpm_table = &(data->dpm_table.eclk_table);
1275 for (i = 0; i < dep_mm_table->count; i++) {
1276 if (i == 0 || dpm_table->dpm_levels
1277 [dpm_table->count - 1].value !=
1278 dep_mm_table->entries[i].eclk) {
1279 dpm_table->dpm_levels[dpm_table->count].value =
1280 dep_mm_table->entries[i].eclk;
1281 dpm_table->dpm_levels[dpm_table->count].enabled =
1282 (i == 0) ? true : false;
1283 dpm_table->count++;
1284 }
1285 }
1286 vega10_init_dpm_state(&(dpm_table->dpm_state));
1287
1288 data->dpm_table.vclk_table.count = 0;
1289 data->dpm_table.dclk_table.count = 0;
1290 dpm_table = &(data->dpm_table.vclk_table);
1291 for (i = 0; i < dep_mm_table->count; i++) {
1292 if (i == 0 || dpm_table->dpm_levels
1293 [dpm_table->count - 1].value !=
1294 dep_mm_table->entries[i].vclk) {
1295 dpm_table->dpm_levels[dpm_table->count].value =
1296 dep_mm_table->entries[i].vclk;
1297 dpm_table->dpm_levels[dpm_table->count].enabled =
1298 (i == 0) ? true : false;
1299 dpm_table->count++;
1300 }
1301 }
1302 vega10_init_dpm_state(&(dpm_table->dpm_state));
1303
1304 dpm_table = &(data->dpm_table.dclk_table);
1305 for (i = 0; i < dep_mm_table->count; i++) {
1306 if (i == 0 || dpm_table->dpm_levels
1307 [dpm_table->count - 1].value !=
1308 dep_mm_table->entries[i].dclk) {
1309 dpm_table->dpm_levels[dpm_table->count].value =
1310 dep_mm_table->entries[i].dclk;
1311 dpm_table->dpm_levels[dpm_table->count].enabled =
1312 (i == 0) ? true : false;
1313 dpm_table->count++;
1314 }
1315 }
1316 vega10_init_dpm_state(&(dpm_table->dpm_state));
1317
1318 /* Assume there is no headless Vega10 for now */
1319 dpm_table = &(data->dpm_table.dcef_table);
1320 vega10_setup_default_single_dpm_table(hwmgr,
1321 dpm_table,
1322 dep_dcef_table);
1323
1324 vega10_init_dpm_state(&(dpm_table->dpm_state));
1325
1326 dpm_table = &(data->dpm_table.pixel_table);
1327 vega10_setup_default_single_dpm_table(hwmgr,
1328 dpm_table,
1329 dep_pix_table);
1330
1331 vega10_init_dpm_state(&(dpm_table->dpm_state));
1332
1333 dpm_table = &(data->dpm_table.display_table);
1334 vega10_setup_default_single_dpm_table(hwmgr,
1335 dpm_table,
1336 dep_disp_table);
1337
1338 vega10_init_dpm_state(&(dpm_table->dpm_state));
1339
1340 dpm_table = &(data->dpm_table.phy_table);
1341 vega10_setup_default_single_dpm_table(hwmgr,
1342 dpm_table,
1343 dep_phy_table);
1344
1345 vega10_init_dpm_state(&(dpm_table->dpm_state));
1346
1347 vega10_setup_default_pcie_table(hwmgr);
1348
1349 /* save a copy of the default DPM table */
1350 memcpy(&(data->golden_dpm_table), &(data->dpm_table),
1351 sizeof(struct vega10_dpm_table));
1352
1353 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1354 PHM_PlatformCaps_ODNinACSupport) ||
1355 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1356 PHM_PlatformCaps_ODNinDCSupport)) {
1357 data->odn_dpm_table.odn_core_clock_dpm_levels.
1358 number_of_performance_levels = data->dpm_table.gfx_table.count;
1359 for (i = 0; i < data->dpm_table.gfx_table.count; i++) {
1360 data->odn_dpm_table.odn_core_clock_dpm_levels.
1361 performance_level_entries[i].clock =
1362 data->dpm_table.gfx_table.dpm_levels[i].value;
1363 data->odn_dpm_table.odn_core_clock_dpm_levels.
1364 performance_level_entries[i].enabled = true;
1365 }
1366
1367 data->odn_dpm_table.vdd_dependency_on_sclk.count =
1368 dep_gfx_table->count;
1369 for (i = 0; i < dep_gfx_table->count; i++) {
1370 data->odn_dpm_table.vdd_dependency_on_sclk.entries[i].clk =
1371 dep_gfx_table->entries[i].clk;
1372 data->odn_dpm_table.vdd_dependency_on_sclk.entries[i].vddInd =
1373 dep_gfx_table->entries[i].vddInd;
1374 data->odn_dpm_table.vdd_dependency_on_sclk.entries[i].cks_enable =
1375 dep_gfx_table->entries[i].cks_enable;
1376 data->odn_dpm_table.vdd_dependency_on_sclk.entries[i].cks_voffset =
1377 dep_gfx_table->entries[i].cks_voffset;
1378 }
1379
1380 data->odn_dpm_table.odn_memory_clock_dpm_levels.
1381 number_of_performance_levels = data->dpm_table.mem_table.count;
1382 for (i = 0; i < data->dpm_table.mem_table.count; i++) {
1383 data->odn_dpm_table.odn_memory_clock_dpm_levels.
1384 performance_level_entries[i].clock =
1385 data->dpm_table.mem_table.dpm_levels[i].value;
1386 data->odn_dpm_table.odn_memory_clock_dpm_levels.
1387 performance_level_entries[i].enabled = true;
1388 }
1389
1390 data->odn_dpm_table.vdd_dependency_on_mclk.count = dep_mclk_table->count;
1391 for (i = 0; i < dep_mclk_table->count; i++) {
1392 data->odn_dpm_table.vdd_dependency_on_mclk.entries[i].clk =
1393 dep_mclk_table->entries[i].clk;
1394 data->odn_dpm_table.vdd_dependency_on_mclk.entries[i].vddInd =
1395 dep_mclk_table->entries[i].vddInd;
1396 data->odn_dpm_table.vdd_dependency_on_mclk.entries[i].vddci =
1397 dep_mclk_table->entries[i].vddci;
1398 }
1399 }
1400
1401 return 0;
1402}
1403
1404/*
1405 * @fn vega10_populate_ulv_state
1406 * @brief Function to provide parameters for Utral Low Voltage state to SMC.
1407 *
1408 * @param hwmgr - the address of the hardware manager.
1409 * @return Always 0.
1410 */
1411static int vega10_populate_ulv_state(struct pp_hwmgr *hwmgr)
1412{
1413 struct vega10_hwmgr *data =
1414 (struct vega10_hwmgr *)(hwmgr->backend);
1415 struct phm_ppt_v2_information *table_info =
1416 (struct phm_ppt_v2_information *)(hwmgr->pptable);
1417
1418 data->smc_state_table.pp_table.UlvOffsetVid =
effa290c 1419 (uint8_t)table_info->us_ulv_voltage_offset;
f83a9991
EH
1420
1421 data->smc_state_table.pp_table.UlvSmnclkDid =
1422 (uint8_t)(table_info->us_ulv_smnclk_did);
1423 data->smc_state_table.pp_table.UlvMp1clkDid =
1424 (uint8_t)(table_info->us_ulv_mp1clk_did);
1425 data->smc_state_table.pp_table.UlvGfxclkBypass =
1426 (uint8_t)(table_info->us_ulv_gfxclk_bypass);
1427 data->smc_state_table.pp_table.UlvPhaseSheddingPsi0 =
1428 (uint8_t)(data->vddc_voltage_table.psi0_enable);
1429 data->smc_state_table.pp_table.UlvPhaseSheddingPsi1 =
1430 (uint8_t)(data->vddc_voltage_table.psi1_enable);
1431
1432 return 0;
1433}
1434
1435static int vega10_populate_single_lclk_level(struct pp_hwmgr *hwmgr,
1436 uint32_t lclock, uint8_t *curr_lclk_did)
1437{
1438 struct pp_atomfwctrl_clock_dividers_soc15 dividers;
1439
1440 PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(
1441 hwmgr,
1442 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
1443 lclock, &dividers),
1444 "Failed to get LCLK clock settings from VBIOS!",
1445 return -1);
1446
1447 *curr_lclk_did = dividers.ulDid;
1448
1449 return 0;
1450}
1451
1452static int vega10_populate_smc_link_levels(struct pp_hwmgr *hwmgr)
1453{
1454 int result = -1;
1455 struct vega10_hwmgr *data =
1456 (struct vega10_hwmgr *)(hwmgr->backend);
1457 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
1458 struct vega10_pcie_table *pcie_table =
1459 &(data->dpm_table.pcie_table);
1460 uint32_t i, j;
1461
1462 for (i = 0; i < pcie_table->count; i++) {
1463 pp_table->PcieGenSpeed[i] = pcie_table->pcie_gen[i];
1464 pp_table->PcieLaneCount[i] = pcie_table->pcie_lane[i];
1465
1466 result = vega10_populate_single_lclk_level(hwmgr,
1467 pcie_table->lclk[i], &(pp_table->LclkDid[i]));
1468 if (result) {
1469 pr_info("Populate LClock Level %d Failed!\n", i);
1470 return result;
1471 }
1472 }
1473
1474 j = i - 1;
1475 while (i < NUM_LINK_LEVELS) {
1476 pp_table->PcieGenSpeed[i] = pcie_table->pcie_gen[j];
1477 pp_table->PcieLaneCount[i] = pcie_table->pcie_lane[j];
1478
1479 result = vega10_populate_single_lclk_level(hwmgr,
1480 pcie_table->lclk[j], &(pp_table->LclkDid[i]));
1481 if (result) {
1482 pr_info("Populate LClock Level %d Failed!\n", i);
1483 return result;
1484 }
1485 i++;
1486 }
1487
1488 return result;
1489}
1490
1491/**
1492* Populates single SMC GFXSCLK structure using the provided engine clock
1493*
1494* @param hwmgr the address of the hardware manager
1495* @param gfx_clock the GFX clock to use to populate the structure.
1496* @param current_gfxclk_level location in PPTable for the SMC GFXCLK structure.
1497*/
1498
1499static int vega10_populate_single_gfx_level(struct pp_hwmgr *hwmgr,
1500 uint32_t gfx_clock, PllSetting_t *current_gfxclk_level)
1501{
1502 struct phm_ppt_v2_information *table_info =
1503 (struct phm_ppt_v2_information *)(hwmgr->pptable);
1504 struct phm_ppt_v1_clock_voltage_dependency_table *dep_on_sclk =
1505 table_info->vdd_dep_on_sclk;
1506 struct vega10_hwmgr *data =
1507 (struct vega10_hwmgr *)(hwmgr->backend);
1508 struct pp_atomfwctrl_clock_dividers_soc15 dividers;
1509 uint32_t i;
1510
1511 if (data->apply_overdrive_next_settings_mask &
1512 DPMTABLE_OD_UPDATE_VDDC)
1513 dep_on_sclk = (struct phm_ppt_v1_clock_voltage_dependency_table *)
1514 &(data->odn_dpm_table.vdd_dependency_on_sclk);
1515
1516 PP_ASSERT_WITH_CODE(dep_on_sclk,
1517 "Invalid SOC_VDD-GFX_CLK Dependency Table!",
1518 return -EINVAL);
1519
1520 for (i = 0; i < dep_on_sclk->count; i++) {
1521 if (dep_on_sclk->entries[i].clk == gfx_clock)
1522 break;
1523 }
1524
1525 PP_ASSERT_WITH_CODE(dep_on_sclk->count > i,
1526 "Cannot find gfx_clk in SOC_VDD-GFX_CLK!",
1527 return -EINVAL);
1528 PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(hwmgr,
1529 COMPUTE_GPUCLK_INPUT_FLAG_GFXCLK,
1530 gfx_clock, &dividers),
1531 "Failed to get GFX Clock settings from VBIOS!",
1532 return -EINVAL);
1533
1534 /* Feedback Multiplier: bit 0:8 int, bit 15:12 post_div, bit 31:16 frac */
1535 current_gfxclk_level->FbMult =
1536 cpu_to_le32(dividers.ulPll_fb_mult);
1537 /* Spread FB Multiplier bit: bit 0:8 int, bit 31:16 frac */
1538 current_gfxclk_level->SsOn = dividers.ucPll_ss_enable;
1539 current_gfxclk_level->SsFbMult =
1540 cpu_to_le32(dividers.ulPll_ss_fbsmult);
1541 current_gfxclk_level->SsSlewFrac =
1542 cpu_to_le16(dividers.usPll_ss_slew_frac);
1543 current_gfxclk_level->Did = (uint8_t)(dividers.ulDid);
1544
1545 return 0;
1546}
1547
1548/**
1549 * @brief Populates single SMC SOCCLK structure using the provided clock.
1550 *
1551 * @param hwmgr - the address of the hardware manager.
1552 * @param soc_clock - the SOC clock to use to populate the structure.
1553 * @param current_socclk_level - location in PPTable for the SMC SOCCLK structure.
1554 * @return 0 on success..
1555 */
1556static int vega10_populate_single_soc_level(struct pp_hwmgr *hwmgr,
1557 uint32_t soc_clock, uint8_t *current_soc_did,
1558 uint8_t *current_vol_index)
1559{
1560 struct phm_ppt_v2_information *table_info =
1561 (struct phm_ppt_v2_information *)(hwmgr->pptable);
1562 struct phm_ppt_v1_clock_voltage_dependency_table *dep_on_soc =
1563 table_info->vdd_dep_on_socclk;
1564 struct pp_atomfwctrl_clock_dividers_soc15 dividers;
1565 uint32_t i;
1566
1567 PP_ASSERT_WITH_CODE(dep_on_soc,
1568 "Invalid SOC_VDD-SOC_CLK Dependency Table!",
1569 return -EINVAL);
1570 for (i = 0; i < dep_on_soc->count; i++) {
1571 if (dep_on_soc->entries[i].clk == soc_clock)
1572 break;
1573 }
1574 PP_ASSERT_WITH_CODE(dep_on_soc->count > i,
1575 "Cannot find SOC_CLK in SOC_VDD-SOC_CLK Dependency Table",
1576 return -EINVAL);
1577 PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(hwmgr,
1578 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
1579 soc_clock, &dividers),
1580 "Failed to get SOC Clock settings from VBIOS!",
1581 return -EINVAL);
1582
1583 *current_soc_did = (uint8_t)dividers.ulDid;
1584 *current_vol_index = (uint8_t)(dep_on_soc->entries[i].vddInd);
1585
1586 return 0;
1587}
1588
1589uint16_t vega10_locate_vddc_given_clock(struct pp_hwmgr *hwmgr,
1590 uint32_t clk,
1591 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table)
1592{
1593 uint16_t i;
1594
1595 for (i = 0; i < dep_table->count; i++) {
1596 if (dep_table->entries[i].clk == clk)
1597 return dep_table->entries[i].vddc;
1598 }
1599
1600 pr_info("[LocateVddcGivenClock] Cannot locate SOC Vddc for this clock!");
1601 return 0;
1602}
1603
1604/**
1605* Populates all SMC SCLK levels' structure based on the trimmed allowed dpm engine clock states
1606*
1607* @param hwmgr the address of the hardware manager
1608*/
1609static int vega10_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
1610{
1611 struct vega10_hwmgr *data =
1612 (struct vega10_hwmgr *)(hwmgr->backend);
1613 struct phm_ppt_v2_information *table_info =
1614 (struct phm_ppt_v2_information *)(hwmgr->pptable);
1615 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table =
1616 table_info->vdd_dep_on_socclk;
1617 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
1618 struct vega10_single_dpm_table *dpm_table = &(data->dpm_table.gfx_table);
1619 int result = 0;
1620 uint32_t i, j;
1621
1622 for (i = 0; i < dpm_table->count; i++) {
1623 result = vega10_populate_single_gfx_level(hwmgr,
1624 dpm_table->dpm_levels[i].value,
1625 &(pp_table->GfxclkLevel[i]));
1626 if (result)
1627 return result;
1628 }
1629
1630 j = i - 1;
1631 while (i < NUM_GFXCLK_DPM_LEVELS) {
1632 result = vega10_populate_single_gfx_level(hwmgr,
1633 dpm_table->dpm_levels[j].value,
1634 &(pp_table->GfxclkLevel[i]));
1635 if (result)
1636 return result;
1637 i++;
1638 }
1639
1640 pp_table->GfxclkSlewRate =
1641 cpu_to_le16(table_info->us_gfxclk_slew_rate);
1642
1643 dpm_table = &(data->dpm_table.soc_table);
1644 for (i = 0; i < dpm_table->count; i++) {
1645 pp_table->SocVid[i] =
1646 (uint8_t)convert_to_vid(
1647 vega10_locate_vddc_given_clock(hwmgr,
1648 dpm_table->dpm_levels[i].value,
1649 dep_table));
1650 result = vega10_populate_single_soc_level(hwmgr,
1651 dpm_table->dpm_levels[i].value,
1652 &(pp_table->SocclkDid[i]),
1653 &(pp_table->SocDpmVoltageIndex[i]));
1654 if (result)
1655 return result;
1656 }
1657
1658 j = i - 1;
1659 while (i < NUM_SOCCLK_DPM_LEVELS) {
1660 pp_table->SocVid[i] = pp_table->SocVid[j];
1661 result = vega10_populate_single_soc_level(hwmgr,
1662 dpm_table->dpm_levels[j].value,
1663 &(pp_table->SocclkDid[i]),
1664 &(pp_table->SocDpmVoltageIndex[i]));
1665 if (result)
1666 return result;
1667 i++;
1668 }
1669
1670 return result;
1671}
1672
1673/**
1674 * @brief Populates single SMC GFXCLK structure using the provided clock.
1675 *
1676 * @param hwmgr - the address of the hardware manager.
1677 * @param mem_clock - the memory clock to use to populate the structure.
1678 * @return 0 on success..
1679 */
1680static int vega10_populate_single_memory_level(struct pp_hwmgr *hwmgr,
1681 uint32_t mem_clock, uint8_t *current_mem_vid,
1682 PllSetting_t *current_memclk_level, uint8_t *current_mem_soc_vind)
1683{
1684 struct vega10_hwmgr *data =
1685 (struct vega10_hwmgr *)(hwmgr->backend);
1686 struct phm_ppt_v2_information *table_info =
1687 (struct phm_ppt_v2_information *)(hwmgr->pptable);
1688 struct phm_ppt_v1_clock_voltage_dependency_table *dep_on_mclk =
1689 table_info->vdd_dep_on_mclk;
1690 struct pp_atomfwctrl_clock_dividers_soc15 dividers;
1691 uint32_t i;
1692
1693 if (data->apply_overdrive_next_settings_mask &
1694 DPMTABLE_OD_UPDATE_VDDC)
1695 dep_on_mclk = (struct phm_ppt_v1_clock_voltage_dependency_table *)
1696 &data->odn_dpm_table.vdd_dependency_on_mclk;
1697
1698 PP_ASSERT_WITH_CODE(dep_on_mclk,
1699 "Invalid SOC_VDD-UCLK Dependency Table!",
1700 return -EINVAL);
1701
1702 for (i = 0; i < dep_on_mclk->count; i++) {
1703 if (dep_on_mclk->entries[i].clk == mem_clock)
1704 break;
1705 }
1706
1707 PP_ASSERT_WITH_CODE(dep_on_mclk->count > i,
1708 "Cannot find UCLK in SOC_VDD-UCLK Dependency Table!",
1709 return -EINVAL);
1710
1711 PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(
1712 hwmgr, COMPUTE_GPUCLK_INPUT_FLAG_UCLK, mem_clock, &dividers),
1713 "Failed to get UCLK settings from VBIOS!",
1714 return -1);
1715
1716 *current_mem_vid =
1717 (uint8_t)(convert_to_vid(dep_on_mclk->entries[i].mvdd));
1718 *current_mem_soc_vind =
1719 (uint8_t)(dep_on_mclk->entries[i].vddInd);
1720 current_memclk_level->FbMult = cpu_to_le32(dividers.ulPll_fb_mult);
1721 current_memclk_level->Did = (uint8_t)(dividers.ulDid);
1722
1723 PP_ASSERT_WITH_CODE(current_memclk_level->Did >= 1,
1724 "Invalid Divider ID!",
1725 return -EINVAL);
1726
1727 return 0;
1728}
1729
1730/**
1731 * @brief Populates all SMC MCLK levels' structure based on the trimmed allowed dpm memory clock states.
1732 *
1733 * @param pHwMgr - the address of the hardware manager.
1734 * @return PP_Result_OK on success.
1735 */
1736static int vega10_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
1737{
1738 struct vega10_hwmgr *data =
1739 (struct vega10_hwmgr *)(hwmgr->backend);
1740 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
1741 struct vega10_single_dpm_table *dpm_table =
1742 &(data->dpm_table.mem_table);
1743 int result = 0;
1744 uint32_t i, j, reg, mem_channels;
1745
1746 for (i = 0; i < dpm_table->count; i++) {
1747 result = vega10_populate_single_memory_level(hwmgr,
1748 dpm_table->dpm_levels[i].value,
1749 &(pp_table->MemVid[i]),
1750 &(pp_table->UclkLevel[i]),
1751 &(pp_table->MemSocVoltageIndex[i]));
1752 if (result)
1753 return result;
1754 }
1755
1756 j = i - 1;
1757 while (i < NUM_UCLK_DPM_LEVELS) {
1758 result = vega10_populate_single_memory_level(hwmgr,
1759 dpm_table->dpm_levels[j].value,
1760 &(pp_table->MemVid[i]),
1761 &(pp_table->UclkLevel[i]),
1762 &(pp_table->MemSocVoltageIndex[i]));
1763 if (result)
1764 return result;
1765 i++;
1766 }
1767
1768 reg = soc15_get_register_offset(DF_HWID, 0,
1769 mmDF_CS_AON0_DramBaseAddress0_BASE_IDX,
1770 mmDF_CS_AON0_DramBaseAddress0);
1771 mem_channels = (cgs_read_register(hwmgr->device, reg) &
1772 DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK) >>
1773 DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT;
1774 pp_table->NumMemoryChannels = cpu_to_le16(mem_channels);
1775 pp_table->MemoryChannelWidth =
1776 cpu_to_le16(HBM_MEMORY_CHANNEL_WIDTH *
1777 channel_number[mem_channels]);
1778
1779 pp_table->LowestUclkReservedForUlv =
1780 (uint8_t)(data->lowest_uclk_reserved_for_ulv);
1781
1782 return result;
1783}
1784
1785static int vega10_populate_single_display_type(struct pp_hwmgr *hwmgr,
1786 DSPCLK_e disp_clock)
1787{
1788 struct vega10_hwmgr *data =
1789 (struct vega10_hwmgr *)(hwmgr->backend);
1790 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
1791 struct phm_ppt_v2_information *table_info =
1792 (struct phm_ppt_v2_information *)
1793 (hwmgr->pptable);
1794 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table;
1795 uint32_t i;
1796 uint16_t clk = 0, vddc = 0;
1797 uint8_t vid = 0;
1798
1799 switch (disp_clock) {
1800 case DSPCLK_DCEFCLK:
1801 dep_table = table_info->vdd_dep_on_dcefclk;
1802 break;
1803 case DSPCLK_DISPCLK:
1804 dep_table = table_info->vdd_dep_on_dispclk;
1805 break;
1806 case DSPCLK_PIXCLK:
1807 dep_table = table_info->vdd_dep_on_pixclk;
1808 break;
1809 case DSPCLK_PHYCLK:
1810 dep_table = table_info->vdd_dep_on_phyclk;
1811 break;
1812 default:
1813 return -1;
1814 }
1815
1816 PP_ASSERT_WITH_CODE(dep_table->count <= NUM_DSPCLK_LEVELS,
1817 "Number Of Entries Exceeded maximum!",
1818 return -1);
1819
1820 for (i = 0; i < dep_table->count; i++) {
1821 clk = (uint16_t)(dep_table->entries[i].clk / 100);
1822 vddc = table_info->vddc_lookup_table->
1823 entries[dep_table->entries[i].vddInd].us_vdd;
1824 vid = (uint8_t)convert_to_vid(vddc);
1825 pp_table->DisplayClockTable[disp_clock][i].Freq =
1826 cpu_to_le16(clk);
1827 pp_table->DisplayClockTable[disp_clock][i].Vid =
1828 cpu_to_le16(vid);
1829 }
1830
1831 while (i < NUM_DSPCLK_LEVELS) {
1832 pp_table->DisplayClockTable[disp_clock][i].Freq =
1833 cpu_to_le16(clk);
1834 pp_table->DisplayClockTable[disp_clock][i].Vid =
1835 cpu_to_le16(vid);
1836 i++;
1837 }
1838
1839 return 0;
1840}
1841
1842static int vega10_populate_all_display_clock_levels(struct pp_hwmgr *hwmgr)
1843{
1844 uint32_t i;
1845
1846 for (i = 0; i < DSPCLK_COUNT; i++) {
1847 PP_ASSERT_WITH_CODE(!vega10_populate_single_display_type(hwmgr, i),
1848 "Failed to populate Clock in DisplayClockTable!",
1849 return -1);
1850 }
1851
1852 return 0;
1853}
1854
1855static int vega10_populate_single_eclock_level(struct pp_hwmgr *hwmgr,
1856 uint32_t eclock, uint8_t *current_eclk_did,
1857 uint8_t *current_soc_vol)
1858{
1859 struct phm_ppt_v2_information *table_info =
1860 (struct phm_ppt_v2_information *)(hwmgr->pptable);
1861 struct phm_ppt_v1_mm_clock_voltage_dependency_table *dep_table =
1862 table_info->mm_dep_table;
1863 struct pp_atomfwctrl_clock_dividers_soc15 dividers;
1864 uint32_t i;
1865
1866 PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(hwmgr,
1867 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
1868 eclock, &dividers),
1869 "Failed to get ECLK clock settings from VBIOS!",
1870 return -1);
1871
1872 *current_eclk_did = (uint8_t)dividers.ulDid;
1873
1874 for (i = 0; i < dep_table->count; i++) {
1875 if (dep_table->entries[i].eclk == eclock)
1876 *current_soc_vol = dep_table->entries[i].vddcInd;
1877 }
1878
1879 return 0;
1880}
1881
1882static int vega10_populate_smc_vce_levels(struct pp_hwmgr *hwmgr)
1883{
1884 struct vega10_hwmgr *data =
1885 (struct vega10_hwmgr *)(hwmgr->backend);
1886 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
1887 struct vega10_single_dpm_table *dpm_table = &(data->dpm_table.eclk_table);
1888 int result = -EINVAL;
1889 uint32_t i, j;
1890
1891 for (i = 0; i < dpm_table->count; i++) {
1892 result = vega10_populate_single_eclock_level(hwmgr,
1893 dpm_table->dpm_levels[i].value,
1894 &(pp_table->EclkDid[i]),
1895 &(pp_table->VceDpmVoltageIndex[i]));
1896 if (result)
1897 return result;
1898 }
1899
1900 j = i - 1;
1901 while (i < NUM_VCE_DPM_LEVELS) {
1902 result = vega10_populate_single_eclock_level(hwmgr,
1903 dpm_table->dpm_levels[j].value,
1904 &(pp_table->EclkDid[i]),
1905 &(pp_table->VceDpmVoltageIndex[i]));
1906 if (result)
1907 return result;
1908 i++;
1909 }
1910
1911 return result;
1912}
1913
1914static int vega10_populate_single_vclock_level(struct pp_hwmgr *hwmgr,
1915 uint32_t vclock, uint8_t *current_vclk_did)
1916{
1917 struct pp_atomfwctrl_clock_dividers_soc15 dividers;
1918
1919 PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(hwmgr,
1920 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
1921 vclock, &dividers),
1922 "Failed to get VCLK clock settings from VBIOS!",
1923 return -EINVAL);
1924
1925 *current_vclk_did = (uint8_t)dividers.ulDid;
1926
1927 return 0;
1928}
1929
1930static int vega10_populate_single_dclock_level(struct pp_hwmgr *hwmgr,
1931 uint32_t dclock, uint8_t *current_dclk_did)
1932{
1933 struct pp_atomfwctrl_clock_dividers_soc15 dividers;
1934
1935 PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(hwmgr,
1936 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
1937 dclock, &dividers),
1938 "Failed to get DCLK clock settings from VBIOS!",
1939 return -EINVAL);
1940
1941 *current_dclk_did = (uint8_t)dividers.ulDid;
1942
1943 return 0;
1944}
1945
1946static int vega10_populate_smc_uvd_levels(struct pp_hwmgr *hwmgr)
1947{
1948 struct vega10_hwmgr *data =
1949 (struct vega10_hwmgr *)(hwmgr->backend);
1950 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
1951 struct vega10_single_dpm_table *vclk_dpm_table =
1952 &(data->dpm_table.vclk_table);
1953 struct vega10_single_dpm_table *dclk_dpm_table =
1954 &(data->dpm_table.dclk_table);
1955 struct phm_ppt_v2_information *table_info =
1956 (struct phm_ppt_v2_information *)(hwmgr->pptable);
1957 struct phm_ppt_v1_mm_clock_voltage_dependency_table *dep_table =
1958 table_info->mm_dep_table;
1959 int result = -EINVAL;
1960 uint32_t i, j;
1961
1962 for (i = 0; i < vclk_dpm_table->count; i++) {
1963 result = vega10_populate_single_vclock_level(hwmgr,
1964 vclk_dpm_table->dpm_levels[i].value,
1965 &(pp_table->VclkDid[i]));
1966 if (result)
1967 return result;
1968 }
1969
1970 j = i - 1;
1971 while (i < NUM_UVD_DPM_LEVELS) {
1972 result = vega10_populate_single_vclock_level(hwmgr,
1973 vclk_dpm_table->dpm_levels[j].value,
1974 &(pp_table->VclkDid[i]));
1975 if (result)
1976 return result;
1977 i++;
1978 }
1979
1980 for (i = 0; i < dclk_dpm_table->count; i++) {
1981 result = vega10_populate_single_dclock_level(hwmgr,
1982 dclk_dpm_table->dpm_levels[i].value,
1983 &(pp_table->DclkDid[i]));
1984 if (result)
1985 return result;
1986 }
1987
1988 j = i - 1;
1989 while (i < NUM_UVD_DPM_LEVELS) {
1990 result = vega10_populate_single_dclock_level(hwmgr,
1991 dclk_dpm_table->dpm_levels[j].value,
1992 &(pp_table->DclkDid[i]));
1993 if (result)
1994 return result;
1995 i++;
1996 }
1997
1998 for (i = 0; i < dep_table->count; i++) {
1999 if (dep_table->entries[i].vclk ==
2000 vclk_dpm_table->dpm_levels[i].value &&
2001 dep_table->entries[i].dclk ==
2002 dclk_dpm_table->dpm_levels[i].value)
2003 pp_table->UvdDpmVoltageIndex[i] =
2004 dep_table->entries[i].vddcInd;
2005 else
2006 return -1;
2007 }
2008
2009 j = i - 1;
2010 while (i < NUM_UVD_DPM_LEVELS) {
2011 pp_table->UvdDpmVoltageIndex[i] = dep_table->entries[j].vddcInd;
2012 i++;
2013 }
2014
2015 return 0;
2016}
2017
2018static int vega10_populate_clock_stretcher_table(struct pp_hwmgr *hwmgr)
2019{
2020 struct vega10_hwmgr *data =
2021 (struct vega10_hwmgr *)(hwmgr->backend);
2022 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
2023 struct phm_ppt_v2_information *table_info =
2024 (struct phm_ppt_v2_information *)(hwmgr->pptable);
2025 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table =
2026 table_info->vdd_dep_on_sclk;
2027 uint32_t i;
2028
afc0255c 2029 for (i = 0; i < dep_table->count; i++) {
f83a9991 2030 pp_table->CksEnable[i] = dep_table->entries[i].cks_enable;
afc0255c
RZ
2031 pp_table->CksVidOffset[i] = (uint8_t)(dep_table->entries[i].cks_voffset
2032 * VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
f83a9991
EH
2033 }
2034
2035 return 0;
2036}
2037
2038static int vega10_populate_avfs_parameters(struct pp_hwmgr *hwmgr)
2039{
2040 struct vega10_hwmgr *data =
2041 (struct vega10_hwmgr *)(hwmgr->backend);
2042 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
2043 struct phm_ppt_v2_information *table_info =
2044 (struct phm_ppt_v2_information *)(hwmgr->pptable);
2045 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table =
2046 table_info->vdd_dep_on_sclk;
2047 struct pp_atomfwctrl_avfs_parameters avfs_params = {0};
2048 int result = 0;
2049 uint32_t i;
2050
2051 pp_table->MinVoltageVid = (uint8_t)0xff;
2052 pp_table->MaxVoltageVid = (uint8_t)0;
2053
2054 if (data->smu_features[GNLD_AVFS].supported) {
2055 result = pp_atomfwctrl_get_avfs_information(hwmgr, &avfs_params);
2056 if (!result) {
2057 pp_table->MinVoltageVid = (uint8_t)
f83a9991 2058 convert_to_vid((uint16_t)(avfs_params.ulMinVddc));
6524e494
RZ
2059 pp_table->MaxVoltageVid = (uint8_t)
2060 convert_to_vid((uint16_t)(avfs_params.ulMaxVddc));
2061
2062 pp_table->AConstant[0] = cpu_to_le32(avfs_params.ulMeanNsigmaAcontant0);
2063 pp_table->AConstant[1] = cpu_to_le32(avfs_params.ulMeanNsigmaAcontant1);
2064 pp_table->AConstant[2] = cpu_to_le32(avfs_params.ulMeanNsigmaAcontant2);
2065 pp_table->DC_tol_sigma = cpu_to_le16(avfs_params.usMeanNsigmaDcTolSigma);
2066 pp_table->Platform_mean = cpu_to_le16(avfs_params.usMeanNsigmaPlatformMean);
2067 pp_table->Platform_sigma = cpu_to_le16(avfs_params.usMeanNsigmaDcTolSigma);
2068 pp_table->PSM_Age_CompFactor = cpu_to_le16(avfs_params.usPsmAgeComfactor);
f83a9991
EH
2069
2070 pp_table->BtcGbVdroopTableCksOff.a0 =
2071 cpu_to_le32(avfs_params.ulGbVdroopTableCksoffA0);
6524e494 2072 pp_table->BtcGbVdroopTableCksOff.a0_shift = 20;
f83a9991
EH
2073 pp_table->BtcGbVdroopTableCksOff.a1 =
2074 cpu_to_le32(avfs_params.ulGbVdroopTableCksoffA1);
6524e494 2075 pp_table->BtcGbVdroopTableCksOff.a1_shift = 20;
f83a9991
EH
2076 pp_table->BtcGbVdroopTableCksOff.a2 =
2077 cpu_to_le32(avfs_params.ulGbVdroopTableCksoffA2);
6524e494
RZ
2078 pp_table->BtcGbVdroopTableCksOff.a2_shift = 20;
2079
2080 pp_table->OverrideBtcGbCksOn = avfs_params.ucEnableGbVdroopTableCkson;
2081 pp_table->BtcGbVdroopTableCksOn.a0 =
2082 cpu_to_le32(avfs_params.ulGbVdroopTableCksonA0);
2083 pp_table->BtcGbVdroopTableCksOn.a0_shift = 20;
2084 pp_table->BtcGbVdroopTableCksOn.a1 =
2085 cpu_to_le32(avfs_params.ulGbVdroopTableCksonA1);
2086 pp_table->BtcGbVdroopTableCksOn.a1_shift = 20;
2087 pp_table->BtcGbVdroopTableCksOn.a2 =
2088 cpu_to_le32(avfs_params.ulGbVdroopTableCksonA2);
2089 pp_table->BtcGbVdroopTableCksOn.a2_shift = 20;
f83a9991
EH
2090
2091 pp_table->AvfsGbCksOn.m1 =
2092 cpu_to_le32(avfs_params.ulGbFuseTableCksonM1);
2093 pp_table->AvfsGbCksOn.m2 =
6524e494 2094 cpu_to_le16(avfs_params.ulGbFuseTableCksonM2);
f83a9991
EH
2095 pp_table->AvfsGbCksOn.b =
2096 cpu_to_le32(avfs_params.ulGbFuseTableCksonB);
2097 pp_table->AvfsGbCksOn.m1_shift = 24;
2098 pp_table->AvfsGbCksOn.m2_shift = 12;
6524e494 2099 pp_table->AvfsGbCksOn.b_shift = 0;
f83a9991 2100
6524e494
RZ
2101 pp_table->OverrideAvfsGbCksOn =
2102 avfs_params.ucEnableGbFuseTableCkson;
f83a9991
EH
2103 pp_table->AvfsGbCksOff.m1 =
2104 cpu_to_le32(avfs_params.ulGbFuseTableCksoffM1);
2105 pp_table->AvfsGbCksOff.m2 =
6524e494 2106 cpu_to_le16(avfs_params.ulGbFuseTableCksoffM2);
f83a9991
EH
2107 pp_table->AvfsGbCksOff.b =
2108 cpu_to_le32(avfs_params.ulGbFuseTableCksoffB);
2109 pp_table->AvfsGbCksOff.m1_shift = 24;
2110 pp_table->AvfsGbCksOff.m2_shift = 12;
6524e494
RZ
2111 pp_table->AvfsGbCksOff.b_shift = 0;
2112
2113 for (i = 0; i < dep_table->count; i++) {
2114 if (dep_table->entries[i].sclk_offset == 0)
2115 pp_table->StaticVoltageOffsetVid[i] = 248;
2116 else
2117 pp_table->StaticVoltageOffsetVid[i] =
2118 (uint8_t)(dep_table->entries[i].sclk_offset *
f83a9991
EH
2119 VOLTAGE_VID_OFFSET_SCALE2 /
2120 VOLTAGE_VID_OFFSET_SCALE1);
6524e494 2121 }
f83a9991
EH
2122
2123 if ((PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2124 data->disp_clk_quad_eqn_a) &&
2125 (PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2126 data->disp_clk_quad_eqn_b)) {
2127 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m1 =
2128 (int32_t)data->disp_clk_quad_eqn_a;
2129 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m2 =
6524e494 2130 (int32_t)data->disp_clk_quad_eqn_b;
f83a9991
EH
2131 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].b =
2132 (int32_t)data->disp_clk_quad_eqn_c;
2133 } else {
2134 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m1 =
2135 (int32_t)avfs_params.ulDispclk2GfxclkM1;
2136 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m2 =
6524e494 2137 (int32_t)avfs_params.ulDispclk2GfxclkM2;
f83a9991
EH
2138 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].b =
2139 (int32_t)avfs_params.ulDispclk2GfxclkB;
2140 }
2141
2142 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m1_shift = 24;
2143 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m2_shift = 12;
4bae05e1 2144 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].b_shift = 12;
f83a9991
EH
2145
2146 if ((PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2147 data->dcef_clk_quad_eqn_a) &&
2148 (PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2149 data->dcef_clk_quad_eqn_b)) {
2150 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m1 =
2151 (int32_t)data->dcef_clk_quad_eqn_a;
2152 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m2 =
6524e494 2153 (int32_t)data->dcef_clk_quad_eqn_b;
f83a9991
EH
2154 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].b =
2155 (int32_t)data->dcef_clk_quad_eqn_c;
2156 } else {
2157 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m1 =
2158 (int32_t)avfs_params.ulDcefclk2GfxclkM1;
2159 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m2 =
6524e494 2160 (int32_t)avfs_params.ulDcefclk2GfxclkM2;
f83a9991
EH
2161 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].b =
2162 (int32_t)avfs_params.ulDcefclk2GfxclkB;
2163 }
2164
2165 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m1_shift = 24;
2166 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m2_shift = 12;
4bae05e1 2167 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].b_shift = 12;
f83a9991
EH
2168
2169 if ((PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2170 data->pixel_clk_quad_eqn_a) &&
2171 (PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2172 data->pixel_clk_quad_eqn_b)) {
2173 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m1 =
2174 (int32_t)data->pixel_clk_quad_eqn_a;
2175 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m2 =
6524e494 2176 (int32_t)data->pixel_clk_quad_eqn_b;
f83a9991
EH
2177 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].b =
2178 (int32_t)data->pixel_clk_quad_eqn_c;
2179 } else {
2180 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m1 =
2181 (int32_t)avfs_params.ulPixelclk2GfxclkM1;
2182 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m2 =
6524e494 2183 (int32_t)avfs_params.ulPixelclk2GfxclkM2;
f83a9991
EH
2184 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].b =
2185 (int32_t)avfs_params.ulPixelclk2GfxclkB;
2186 }
2187
2188 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m1_shift = 24;
2189 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m2_shift = 12;
4bae05e1 2190 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].b_shift = 12;
f83a9991
EH
2191 if ((PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2192 data->phy_clk_quad_eqn_a) &&
2193 (PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2194 data->phy_clk_quad_eqn_b)) {
2195 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m1 =
2196 (int32_t)data->phy_clk_quad_eqn_a;
2197 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m2 =
6524e494 2198 (int32_t)data->phy_clk_quad_eqn_b;
f83a9991
EH
2199 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].b =
2200 (int32_t)data->phy_clk_quad_eqn_c;
2201 } else {
2202 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m1 =
2203 (int32_t)avfs_params.ulPhyclk2GfxclkM1;
2204 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m2 =
6524e494 2205 (int32_t)avfs_params.ulPhyclk2GfxclkM2;
f83a9991
EH
2206 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].b =
2207 (int32_t)avfs_params.ulPhyclk2GfxclkB;
2208 }
2209
2210 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m1_shift = 24;
2211 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m2_shift = 12;
4bae05e1 2212 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].b_shift = 12;
f83a9991
EH
2213 } else {
2214 data->smu_features[GNLD_AVFS].supported = false;
2215 }
2216 }
2217
2218 return 0;
2219}
2220
2221static int vega10_populate_gpio_parameters(struct pp_hwmgr *hwmgr)
2222{
2223 struct vega10_hwmgr *data =
2224 (struct vega10_hwmgr *)(hwmgr->backend);
2225 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
2226 struct pp_atomfwctrl_gpio_parameters gpio_params = {0};
2227 int result;
2228
2229 result = pp_atomfwctrl_get_gpio_information(hwmgr, &gpio_params);
2230 if (!result) {
2231 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2232 PHM_PlatformCaps_RegulatorHot) &&
2233 (data->registry_data.regulator_hot_gpio_support)) {
2234 pp_table->VR0HotGpio = gpio_params.ucVR0HotGpio;
2235 pp_table->VR0HotPolarity = gpio_params.ucVR0HotPolarity;
2236 pp_table->VR1HotGpio = gpio_params.ucVR1HotGpio;
2237 pp_table->VR1HotPolarity = gpio_params.ucVR1HotPolarity;
2238 } else {
2239 pp_table->VR0HotGpio = 0;
2240 pp_table->VR0HotPolarity = 0;
2241 pp_table->VR1HotGpio = 0;
2242 pp_table->VR1HotPolarity = 0;
2243 }
2244
2245 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2246 PHM_PlatformCaps_AutomaticDCTransition) &&
2247 (data->registry_data.ac_dc_switch_gpio_support)) {
2248 pp_table->AcDcGpio = gpio_params.ucAcDcGpio;
2249 pp_table->AcDcPolarity = gpio_params.ucAcDcPolarity;
2250 } else {
2251 pp_table->AcDcGpio = 0;
2252 pp_table->AcDcPolarity = 0;
2253 }
2254 }
2255
2256 return result;
2257}
2258
2259static int vega10_avfs_enable(struct pp_hwmgr *hwmgr, bool enable)
2260{
2261 struct vega10_hwmgr *data =
2262 (struct vega10_hwmgr *)(hwmgr->backend);
2263
2264 if (data->smu_features[GNLD_AVFS].supported) {
2265 if (enable) {
2266 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
2267 true,
2268 data->smu_features[GNLD_AVFS].smu_feature_bitmap),
2269 "[avfs_control] Attempt to Enable AVFS feature Failed!",
2270 return -1);
2271 data->smu_features[GNLD_AVFS].enabled = true;
2272 } else {
2273 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
2274 false,
2275 data->smu_features[GNLD_AVFS].smu_feature_id),
2276 "[avfs_control] Attempt to Disable AVFS feature Failed!",
2277 return -1);
2278 data->smu_features[GNLD_AVFS].enabled = false;
2279 }
2280 }
2281
2282 return 0;
2283}
2284
2285/**
2286* Initializes the SMC table and uploads it
2287*
2288* @param hwmgr the address of the powerplay hardware manager.
2289* @param pInput the pointer to input data (PowerState)
2290* @return always 0
2291*/
2292static int vega10_init_smc_table(struct pp_hwmgr *hwmgr)
2293{
2294 int result;
2295 struct vega10_hwmgr *data =
2296 (struct vega10_hwmgr *)(hwmgr->backend);
2297 struct phm_ppt_v2_information *table_info =
2298 (struct phm_ppt_v2_information *)(hwmgr->pptable);
2299 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
2300 struct pp_atomfwctrl_voltage_table voltage_table;
2301
2302 result = vega10_setup_default_dpm_tables(hwmgr);
2303 PP_ASSERT_WITH_CODE(!result,
2304 "Failed to setup default DPM tables!",
2305 return result);
2306
2307 pp_atomfwctrl_get_voltage_table_v4(hwmgr, VOLTAGE_TYPE_VDDC,
2308 VOLTAGE_OBJ_SVID2, &voltage_table);
2309 pp_table->MaxVidStep = voltage_table.max_vid_step;
2310
2311 pp_table->GfxDpmVoltageMode =
2312 (uint8_t)(table_info->uc_gfx_dpm_voltage_mode);
2313 pp_table->SocDpmVoltageMode =
2314 (uint8_t)(table_info->uc_soc_dpm_voltage_mode);
2315 pp_table->UclkDpmVoltageMode =
2316 (uint8_t)(table_info->uc_uclk_dpm_voltage_mode);
2317 pp_table->UvdDpmVoltageMode =
2318 (uint8_t)(table_info->uc_uvd_dpm_voltage_mode);
2319 pp_table->VceDpmVoltageMode =
2320 (uint8_t)(table_info->uc_vce_dpm_voltage_mode);
2321 pp_table->Mp0DpmVoltageMode =
2322 (uint8_t)(table_info->uc_mp0_dpm_voltage_mode);
effa290c 2323
f83a9991
EH
2324 pp_table->DisplayDpmVoltageMode =
2325 (uint8_t)(table_info->uc_dcef_dpm_voltage_mode);
2326
2327 if (data->registry_data.ulv_support &&
2328 table_info->us_ulv_voltage_offset) {
2329 result = vega10_populate_ulv_state(hwmgr);
2330 PP_ASSERT_WITH_CODE(!result,
2331 "Failed to initialize ULV state!",
2332 return result);
2333 }
2334
2335 result = vega10_populate_smc_link_levels(hwmgr);
2336 PP_ASSERT_WITH_CODE(!result,
2337 "Failed to initialize Link Level!",
2338 return result);
2339
2340 result = vega10_populate_all_graphic_levels(hwmgr);
2341 PP_ASSERT_WITH_CODE(!result,
2342 "Failed to initialize Graphics Level!",
2343 return result);
2344
2345 result = vega10_populate_all_memory_levels(hwmgr);
2346 PP_ASSERT_WITH_CODE(!result,
2347 "Failed to initialize Memory Level!",
2348 return result);
2349
2350 result = vega10_populate_all_display_clock_levels(hwmgr);
2351 PP_ASSERT_WITH_CODE(!result,
2352 "Failed to initialize Display Level!",
2353 return result);
2354
2355 result = vega10_populate_smc_vce_levels(hwmgr);
2356 PP_ASSERT_WITH_CODE(!result,
2357 "Failed to initialize VCE Level!",
2358 return result);
2359
2360 result = vega10_populate_smc_uvd_levels(hwmgr);
2361 PP_ASSERT_WITH_CODE(!result,
2362 "Failed to initialize UVD Level!",
2363 return result);
2364
afc0255c 2365 if (data->registry_data.clock_stretcher_support) {
f83a9991
EH
2366 result = vega10_populate_clock_stretcher_table(hwmgr);
2367 PP_ASSERT_WITH_CODE(!result,
2368 "Failed to populate Clock Stretcher Table!",
2369 return result);
2370 }
2371
2372 result = vega10_populate_avfs_parameters(hwmgr);
2373 PP_ASSERT_WITH_CODE(!result,
2374 "Failed to initialize AVFS Parameters!",
2375 return result);
2376
2377 result = vega10_populate_gpio_parameters(hwmgr);
2378 PP_ASSERT_WITH_CODE(!result,
2379 "Failed to initialize GPIO Parameters!",
2380 return result);
2381
2382 pp_table->GfxclkAverageAlpha = (uint8_t)
2383 (data->gfxclk_average_alpha);
2384 pp_table->SocclkAverageAlpha = (uint8_t)
2385 (data->socclk_average_alpha);
2386 pp_table->UclkAverageAlpha = (uint8_t)
2387 (data->uclk_average_alpha);
2388 pp_table->GfxActivityAverageAlpha = (uint8_t)
2389 (data->gfx_activity_average_alpha);
2390
2391 result = vega10_copy_table_to_smc(hwmgr->smumgr,
2392 (uint8_t *)pp_table, PPTABLE);
2393 PP_ASSERT_WITH_CODE(!result,
2394 "Failed to upload PPtable!", return result);
2395
2211a787
RZ
2396 result = vega10_avfs_enable(hwmgr, true);
2397 PP_ASSERT_WITH_CODE(!result, "Attempt to enable AVFS feature Failed!",
f83a9991 2398 return result);
f83a9991
EH
2399
2400 return 0;
2401}
2402
2403static int vega10_enable_thermal_protection(struct pp_hwmgr *hwmgr)
2404{
2405 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
2406
2407 if (data->smu_features[GNLD_THERMAL].supported) {
2408 if (data->smu_features[GNLD_THERMAL].enabled)
2409 pr_info("THERMAL Feature Already enabled!");
2410
2411 PP_ASSERT_WITH_CODE(
2412 !vega10_enable_smc_features(hwmgr->smumgr,
2413 true,
2414 data->smu_features[GNLD_THERMAL].smu_feature_bitmap),
2415 "Enable THERMAL Feature Failed!",
2416 return -1);
2417 data->smu_features[GNLD_THERMAL].enabled = true;
2418 }
2419
2420 return 0;
2421}
2422
2423static int vega10_enable_vrhot_feature(struct pp_hwmgr *hwmgr)
2424{
2425 struct vega10_hwmgr *data =
2426 (struct vega10_hwmgr *)(hwmgr->backend);
2427
2428 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2429 PHM_PlatformCaps_RegulatorHot)) {
2430 if (data->smu_features[GNLD_VR0HOT].supported) {
2431 PP_ASSERT_WITH_CODE(
2432 !vega10_enable_smc_features(hwmgr->smumgr,
2433 true,
2434 data->smu_features[GNLD_VR0HOT].smu_feature_bitmap),
2435 "Attempt to Enable VR0 Hot feature Failed!",
2436 return -1);
2437 data->smu_features[GNLD_VR0HOT].enabled = true;
2438 } else {
2439 if (data->smu_features[GNLD_VR1HOT].supported) {
2440 PP_ASSERT_WITH_CODE(
2441 !vega10_enable_smc_features(hwmgr->smumgr,
2442 true,
2443 data->smu_features[GNLD_VR1HOT].smu_feature_bitmap),
2444 "Attempt to Enable VR0 Hot feature Failed!",
2445 return -1);
2446 data->smu_features[GNLD_VR1HOT].enabled = true;
2447 }
2448 }
2449 }
2450 return 0;
2451}
2452
2453static int vega10_enable_ulv(struct pp_hwmgr *hwmgr)
2454{
2455 struct vega10_hwmgr *data =
2456 (struct vega10_hwmgr *)(hwmgr->backend);
2457
2458 if (data->registry_data.ulv_support) {
2459 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
2460 true, data->smu_features[GNLD_ULV].smu_feature_bitmap),
2461 "Enable ULV Feature Failed!",
2462 return -1);
2463 data->smu_features[GNLD_ULV].enabled = true;
2464 }
2465
2466 return 0;
2467}
2468
2469static int vega10_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
2470{
2471 struct vega10_hwmgr *data =
2472 (struct vega10_hwmgr *)(hwmgr->backend);
2473
2474 if (data->smu_features[GNLD_DS_GFXCLK].supported) {
2475 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
2476 true, data->smu_features[GNLD_DS_GFXCLK].smu_feature_bitmap),
2477 "Attempt to Enable DS_GFXCLK Feature Failed!",
2478 return -1);
2479 data->smu_features[GNLD_DS_GFXCLK].enabled = true;
2480 }
2481
2482 if (data->smu_features[GNLD_DS_SOCCLK].supported) {
2483 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
2484 true, data->smu_features[GNLD_DS_SOCCLK].smu_feature_bitmap),
2485 "Attempt to Enable DS_GFXCLK Feature Failed!",
2486 return -1);
2487 data->smu_features[GNLD_DS_SOCCLK].enabled = true;
2488 }
2489
2490 if (data->smu_features[GNLD_DS_LCLK].supported) {
2491 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
2492 true, data->smu_features[GNLD_DS_LCLK].smu_feature_bitmap),
2493 "Attempt to Enable DS_GFXCLK Feature Failed!",
2494 return -1);
2495 data->smu_features[GNLD_DS_LCLK].enabled = true;
2496 }
2497
2498 return 0;
2499}
2500
2501/**
2502 * @brief Tell SMC to enabled the supported DPMs.
2503 *
2504 * @param hwmgr - the address of the powerplay hardware manager.
2505 * @Param bitmap - bitmap for the features to enabled.
2506 * @return 0 on at least one DPM is successfully enabled.
2507 */
2508static int vega10_start_dpm(struct pp_hwmgr *hwmgr, uint32_t bitmap)
2509{
2510 struct vega10_hwmgr *data =
2511 (struct vega10_hwmgr *)(hwmgr->backend);
2512 uint32_t i, feature_mask = 0;
2513
2514 for (i = 0; i < GNLD_DPM_MAX; i++) {
2515 if (data->smu_features[i].smu_feature_bitmap & bitmap) {
2516 if (data->smu_features[i].supported) {
2517 if (!data->smu_features[i].enabled) {
2518 feature_mask |= data->smu_features[i].
2519 smu_feature_bitmap;
2520 data->smu_features[i].enabled = true;
2521 }
2522 }
2523 }
2524 }
2525
2526 if (vega10_enable_smc_features(hwmgr->smumgr,
2527 true, feature_mask)) {
2528 for (i = 0; i < GNLD_DPM_MAX; i++) {
2529 if (data->smu_features[i].smu_feature_bitmap &
2530 feature_mask)
2531 data->smu_features[i].enabled = false;
2532 }
2533 }
2534
2535 if(data->smu_features[GNLD_LED_DISPLAY].supported == true){
2536 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
2537 true, data->smu_features[GNLD_LED_DISPLAY].smu_feature_bitmap),
2538 "Attempt to Enable LED DPM feature Failed!", return -EINVAL);
2539 data->smu_features[GNLD_LED_DISPLAY].enabled = true;
2540 }
2541
2542 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2543 PHM_PlatformCaps_Falcon_QuickTransition)) {
2544 if (data->smu_features[GNLD_ACDC].supported) {
2545 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
2546 true, data->smu_features[GNLD_ACDC].smu_feature_bitmap),
2547 "Attempt to Enable DS_GFXCLK Feature Failed!",
2548 return -1);
2549 data->smu_features[GNLD_ACDC].enabled = true;
2550 }
2551 }
2552
2553 return 0;
2554}
2555
2556static int vega10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
2557{
2558 struct vega10_hwmgr *data =
2559 (struct vega10_hwmgr *)(hwmgr->backend);
2560 int tmp_result, result = 0;
2561
2562 tmp_result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
2563 PPSMC_MSG_ConfigureTelemetry, data->config_telemetry);
2564 PP_ASSERT_WITH_CODE(!tmp_result,
2565 "Failed to configure telemetry!",
2566 return tmp_result);
2567
f83a9991
EH
2568 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
2569 PPSMC_MSG_NumOfDisplays, 0);
2570
2571 tmp_result = (!vega10_is_dpm_running(hwmgr)) ? 0 : -1;
2572 PP_ASSERT_WITH_CODE(!tmp_result,
2573 "DPM is already running right , skipping re-enablement!",
2574 return 0);
2575
2576 tmp_result = vega10_construct_voltage_tables(hwmgr);
2577 PP_ASSERT_WITH_CODE(!tmp_result,
2578 "Failed to contruct voltage tables!",
2579 result = tmp_result);
2580
2581 tmp_result = vega10_init_smc_table(hwmgr);
2582 PP_ASSERT_WITH_CODE(!tmp_result,
2583 "Failed to initialize SMC table!",
2584 result = tmp_result);
2585
2586 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2587 PHM_PlatformCaps_ThermalController)) {
2588 tmp_result = vega10_enable_thermal_protection(hwmgr);
2589 PP_ASSERT_WITH_CODE(!tmp_result,
2590 "Failed to enable thermal protection!",
2591 result = tmp_result);
2592 }
2593
2594 tmp_result = vega10_enable_vrhot_feature(hwmgr);
2595 PP_ASSERT_WITH_CODE(!tmp_result,
2596 "Failed to enable VR hot feature!",
2597 result = tmp_result);
2598
2599 tmp_result = vega10_enable_ulv(hwmgr);
2600 PP_ASSERT_WITH_CODE(!tmp_result,
2601 "Failed to enable ULV!",
2602 result = tmp_result);
2603
2604 tmp_result = vega10_enable_deep_sleep_master_switch(hwmgr);
2605 PP_ASSERT_WITH_CODE(!tmp_result,
2606 "Failed to enable deep sleep master switch!",
2607 result = tmp_result);
2608
2609 tmp_result = vega10_start_dpm(hwmgr, SMC_DPM_FEATURES);
2610 PP_ASSERT_WITH_CODE(!tmp_result,
2611 "Failed to start DPM!", result = tmp_result);
2612
2613 tmp_result = vega10_enable_power_containment(hwmgr);
2614 PP_ASSERT_WITH_CODE(!tmp_result,
2615 "Failed to enable power containment!",
2616 result = tmp_result);
2617
2618 tmp_result = vega10_power_control_set_level(hwmgr);
2619 PP_ASSERT_WITH_CODE(!tmp_result,
2620 "Failed to power control set level!",
2621 result = tmp_result);
2622
2623 return result;
2624}
2625
2626static int vega10_get_power_state_size(struct pp_hwmgr *hwmgr)
2627{
2628 return sizeof(struct vega10_power_state);
2629}
2630
2631static int vega10_get_pp_table_entry_callback_func(struct pp_hwmgr *hwmgr,
2632 void *state, struct pp_power_state *power_state,
2633 void *pp_table, uint32_t classification_flag)
2634{
2635 struct vega10_power_state *vega10_power_state =
2636 cast_phw_vega10_power_state(&(power_state->hardware));
2637 struct vega10_performance_level *performance_level;
2638 ATOM_Vega10_State *state_entry = (ATOM_Vega10_State *)state;
2639 ATOM_Vega10_POWERPLAYTABLE *powerplay_table =
2640 (ATOM_Vega10_POWERPLAYTABLE *)pp_table;
2641 ATOM_Vega10_SOCCLK_Dependency_Table *socclk_dep_table =
2642 (ATOM_Vega10_SOCCLK_Dependency_Table *)
2643 (((unsigned long)powerplay_table) +
2644 le16_to_cpu(powerplay_table->usSocclkDependencyTableOffset));
2645 ATOM_Vega10_GFXCLK_Dependency_Table *gfxclk_dep_table =
2646 (ATOM_Vega10_GFXCLK_Dependency_Table *)
2647 (((unsigned long)powerplay_table) +
2648 le16_to_cpu(powerplay_table->usGfxclkDependencyTableOffset));
2649 ATOM_Vega10_MCLK_Dependency_Table *mclk_dep_table =
2650 (ATOM_Vega10_MCLK_Dependency_Table *)
2651 (((unsigned long)powerplay_table) +
2652 le16_to_cpu(powerplay_table->usMclkDependencyTableOffset));
2653
2654
2655 /* The following fields are not initialized here:
2656 * id orderedList allStatesList
2657 */
2658 power_state->classification.ui_label =
2659 (le16_to_cpu(state_entry->usClassification) &
2660 ATOM_PPLIB_CLASSIFICATION_UI_MASK) >>
2661 ATOM_PPLIB_CLASSIFICATION_UI_SHIFT;
2662 power_state->classification.flags = classification_flag;
2663 /* NOTE: There is a classification2 flag in BIOS
2664 * that is not being used right now
2665 */
2666 power_state->classification.temporary_state = false;
2667 power_state->classification.to_be_deleted = false;
2668
2669 power_state->validation.disallowOnDC =
2670 ((le32_to_cpu(state_entry->ulCapsAndSettings) &
2671 ATOM_Vega10_DISALLOW_ON_DC) != 0);
2672
2673 power_state->display.disableFrameModulation = false;
2674 power_state->display.limitRefreshrate = false;
2675 power_state->display.enableVariBright =
2676 ((le32_to_cpu(state_entry->ulCapsAndSettings) &
2677 ATOM_Vega10_ENABLE_VARIBRIGHT) != 0);
2678
2679 power_state->validation.supportedPowerLevels = 0;
2680 power_state->uvd_clocks.VCLK = 0;
2681 power_state->uvd_clocks.DCLK = 0;
2682 power_state->temperatures.min = 0;
2683 power_state->temperatures.max = 0;
2684
2685 performance_level = &(vega10_power_state->performance_levels
2686 [vega10_power_state->performance_level_count++]);
2687
2688 PP_ASSERT_WITH_CODE(
2689 (vega10_power_state->performance_level_count <
2690 NUM_GFXCLK_DPM_LEVELS),
2691 "Performance levels exceeds SMC limit!",
2692 return -1);
2693
2694 PP_ASSERT_WITH_CODE(
2695 (vega10_power_state->performance_level_count <=
2696 hwmgr->platform_descriptor.
2697 hardwareActivityPerformanceLevels),
2698 "Performance levels exceeds Driver limit!",
2699 return -1);
2700
2701 /* Performance levels are arranged from low to high. */
2702 performance_level->soc_clock = socclk_dep_table->entries
2703 [state_entry->ucSocClockIndexLow].ulClk;
2704 performance_level->gfx_clock = gfxclk_dep_table->entries
2705 [state_entry->ucGfxClockIndexLow].ulClk;
2706 performance_level->mem_clock = mclk_dep_table->entries
2707 [state_entry->ucMemClockIndexLow].ulMemClk;
2708
2709 performance_level = &(vega10_power_state->performance_levels
2710 [vega10_power_state->performance_level_count++]);
2711
2712 performance_level->soc_clock = socclk_dep_table->entries
2713 [state_entry->ucSocClockIndexHigh].ulClk;
2714 performance_level->gfx_clock = gfxclk_dep_table->entries
2715 [state_entry->ucGfxClockIndexHigh].ulClk;
2716 performance_level->mem_clock = mclk_dep_table->entries
2717 [state_entry->ucMemClockIndexHigh].ulMemClk;
2718 return 0;
2719}
2720
2721static int vega10_get_pp_table_entry(struct pp_hwmgr *hwmgr,
2722 unsigned long entry_index, struct pp_power_state *state)
2723{
2724 int result;
2725 struct vega10_power_state *ps;
2726
2727 state->hardware.magic = PhwVega10_Magic;
2728
2729 ps = cast_phw_vega10_power_state(&state->hardware);
2730
2731 result = vega10_get_powerplay_table_entry(hwmgr, entry_index, state,
2732 vega10_get_pp_table_entry_callback_func);
2733
2734 /*
2735 * This is the earliest time we have all the dependency table
2736 * and the VBIOS boot state
2737 */
2738 /* set DC compatible flag if this state supports DC */
2739 if (!state->validation.disallowOnDC)
2740 ps->dc_compatible = true;
2741
2742 ps->uvd_clks.vclk = state->uvd_clocks.VCLK;
2743 ps->uvd_clks.dclk = state->uvd_clocks.DCLK;
2744
2745 return 0;
2746}
2747
2748static int vega10_patch_boot_state(struct pp_hwmgr *hwmgr,
2749 struct pp_hw_power_state *hw_ps)
2750{
2751 return 0;
2752}
2753
2754static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
2755 struct pp_power_state *request_ps,
2756 const struct pp_power_state *current_ps)
2757{
2758 struct vega10_power_state *vega10_ps =
2759 cast_phw_vega10_power_state(&request_ps->hardware);
2760 uint32_t sclk;
2761 uint32_t mclk;
2762 struct PP_Clocks minimum_clocks = {0};
2763 bool disable_mclk_switching;
2764 bool disable_mclk_switching_for_frame_lock;
2765 bool disable_mclk_switching_for_vr;
2766 bool force_mclk_high;
2767 struct cgs_display_info info = {0};
2768 const struct phm_clock_and_voltage_limits *max_limits;
2769 uint32_t i;
2770 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
2771 struct phm_ppt_v2_information *table_info =
2772 (struct phm_ppt_v2_information *)(hwmgr->pptable);
2773 int32_t count;
2774 uint32_t stable_pstate_sclk_dpm_percentage;
2775 uint32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0;
2776 uint32_t latency;
2777
2778 data->battery_state = (PP_StateUILabel_Battery ==
2779 request_ps->classification.ui_label);
2780
2781 if (vega10_ps->performance_level_count != 2)
2782 pr_info("VI should always have 2 performance levels");
2783
2784 max_limits = (PP_PowerSource_AC == hwmgr->power_source) ?
2785 &(hwmgr->dyn_state.max_clock_voltage_on_ac) :
2786 &(hwmgr->dyn_state.max_clock_voltage_on_dc);
2787
2788 /* Cap clock DPM tables at DC MAX if it is in DC. */
2789 if (PP_PowerSource_DC == hwmgr->power_source) {
2790 for (i = 0; i < vega10_ps->performance_level_count; i++) {
2791 if (vega10_ps->performance_levels[i].mem_clock >
2792 max_limits->mclk)
2793 vega10_ps->performance_levels[i].mem_clock =
2794 max_limits->mclk;
2795 if (vega10_ps->performance_levels[i].gfx_clock >
2796 max_limits->sclk)
2797 vega10_ps->performance_levels[i].gfx_clock =
2798 max_limits->sclk;
2799 }
2800 }
2801
2802 vega10_ps->vce_clks.evclk = hwmgr->vce_arbiter.evclk;
2803 vega10_ps->vce_clks.ecclk = hwmgr->vce_arbiter.ecclk;
2804
2805 cgs_get_active_displays_info(hwmgr->device, &info);
2806
2807 /* result = PHM_CheckVBlankTime(hwmgr, &vblankTooShort);*/
2808 minimum_clocks.engineClock = hwmgr->display_config.min_core_set_clock;
2809 /* minimum_clocks.memoryClock = hwmgr->display_config.min_mem_set_clock; */
2810
2811 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2812 PHM_PlatformCaps_StablePState)) {
2813 PP_ASSERT_WITH_CODE(
2814 data->registry_data.stable_pstate_sclk_dpm_percentage >= 1 &&
2815 data->registry_data.stable_pstate_sclk_dpm_percentage <= 100,
2816 "percent sclk value must range from 1% to 100%, setting default value",
2817 stable_pstate_sclk_dpm_percentage = 75);
2818
2819 max_limits = &(hwmgr->dyn_state.max_clock_voltage_on_ac);
2820 stable_pstate_sclk = (max_limits->sclk *
2821 stable_pstate_sclk_dpm_percentage) / 100;
2822
2823 for (count = table_info->vdd_dep_on_sclk->count - 1;
2824 count >= 0; count--) {
2825 if (stable_pstate_sclk >=
2826 table_info->vdd_dep_on_sclk->entries[count].clk) {
2827 stable_pstate_sclk =
2828 table_info->vdd_dep_on_sclk->entries[count].clk;
2829 break;
2830 }
2831 }
2832
2833 if (count < 0)
2834 stable_pstate_sclk = table_info->vdd_dep_on_sclk->entries[0].clk;
2835
2836 stable_pstate_mclk = max_limits->mclk;
2837
2838 minimum_clocks.engineClock = stable_pstate_sclk;
2839 minimum_clocks.memoryClock = stable_pstate_mclk;
2840 }
2841
2842 if (minimum_clocks.engineClock < hwmgr->gfx_arbiter.sclk)
2843 minimum_clocks.engineClock = hwmgr->gfx_arbiter.sclk;
2844
2845 if (minimum_clocks.memoryClock < hwmgr->gfx_arbiter.mclk)
2846 minimum_clocks.memoryClock = hwmgr->gfx_arbiter.mclk;
2847
2848 vega10_ps->sclk_threshold = hwmgr->gfx_arbiter.sclk_threshold;
2849
2850 if (hwmgr->gfx_arbiter.sclk_over_drive) {
2851 PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.sclk_over_drive <=
2852 hwmgr->platform_descriptor.overdriveLimit.engineClock),
2853 "Overdrive sclk exceeds limit",
2854 hwmgr->gfx_arbiter.sclk_over_drive =
2855 hwmgr->platform_descriptor.overdriveLimit.engineClock);
2856
2857 if (hwmgr->gfx_arbiter.sclk_over_drive >= hwmgr->gfx_arbiter.sclk)
2858 vega10_ps->performance_levels[1].gfx_clock =
2859 hwmgr->gfx_arbiter.sclk_over_drive;
2860 }
2861
2862 if (hwmgr->gfx_arbiter.mclk_over_drive) {
2863 PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.mclk_over_drive <=
2864 hwmgr->platform_descriptor.overdriveLimit.memoryClock),
2865 "Overdrive mclk exceeds limit",
2866 hwmgr->gfx_arbiter.mclk_over_drive =
2867 hwmgr->platform_descriptor.overdriveLimit.memoryClock);
2868
2869 if (hwmgr->gfx_arbiter.mclk_over_drive >= hwmgr->gfx_arbiter.mclk)
2870 vega10_ps->performance_levels[1].mem_clock =
2871 hwmgr->gfx_arbiter.mclk_over_drive;
2872 }
2873
2874 disable_mclk_switching_for_frame_lock = phm_cap_enabled(
2875 hwmgr->platform_descriptor.platformCaps,
2876 PHM_PlatformCaps_DisableMclkSwitchingForFrameLock);
2877 disable_mclk_switching_for_vr = phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2878 PHM_PlatformCaps_DisableMclkSwitchForVR);
2879 force_mclk_high = phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2880 PHM_PlatformCaps_ForceMclkHigh);
2881
2882 disable_mclk_switching = (info.display_count > 1) ||
2883 disable_mclk_switching_for_frame_lock ||
2884 disable_mclk_switching_for_vr ||
2885 force_mclk_high;
2886
2887 sclk = vega10_ps->performance_levels[0].gfx_clock;
2888 mclk = vega10_ps->performance_levels[0].mem_clock;
2889
2890 if (sclk < minimum_clocks.engineClock)
2891 sclk = (minimum_clocks.engineClock > max_limits->sclk) ?
2892 max_limits->sclk : minimum_clocks.engineClock;
2893
2894 if (mclk < minimum_clocks.memoryClock)
2895 mclk = (minimum_clocks.memoryClock > max_limits->mclk) ?
2896 max_limits->mclk : minimum_clocks.memoryClock;
2897
2898 vega10_ps->performance_levels[0].gfx_clock = sclk;
2899 vega10_ps->performance_levels[0].mem_clock = mclk;
2900
2901 vega10_ps->performance_levels[1].gfx_clock =
2902 (vega10_ps->performance_levels[1].gfx_clock >=
2903 vega10_ps->performance_levels[0].gfx_clock) ?
2904 vega10_ps->performance_levels[1].gfx_clock :
2905 vega10_ps->performance_levels[0].gfx_clock;
2906
2907 if (disable_mclk_switching) {
2908 /* Set Mclk the max of level 0 and level 1 */
2909 if (mclk < vega10_ps->performance_levels[1].mem_clock)
2910 mclk = vega10_ps->performance_levels[1].mem_clock;
2911
2912 /* Find the lowest MCLK frequency that is within
2913 * the tolerable latency defined in DAL
2914 */
2915 latency = 0;
2916 for (i = 0; i < data->mclk_latency_table.count; i++) {
2917 if ((data->mclk_latency_table.entries[i].latency <= latency) &&
2918 (data->mclk_latency_table.entries[i].frequency >=
2919 vega10_ps->performance_levels[0].mem_clock) &&
2920 (data->mclk_latency_table.entries[i].frequency <=
2921 vega10_ps->performance_levels[1].mem_clock))
2922 mclk = data->mclk_latency_table.entries[i].frequency;
2923 }
2924 vega10_ps->performance_levels[0].mem_clock = mclk;
2925 } else {
2926 if (vega10_ps->performance_levels[1].mem_clock <
2927 vega10_ps->performance_levels[0].mem_clock)
2928 vega10_ps->performance_levels[1].mem_clock =
2929 vega10_ps->performance_levels[0].mem_clock;
2930 }
2931
2932 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2933 PHM_PlatformCaps_StablePState)) {
2934 for (i = 0; i < vega10_ps->performance_level_count; i++) {
2935 vega10_ps->performance_levels[i].gfx_clock = stable_pstate_sclk;
2936 vega10_ps->performance_levels[i].mem_clock = stable_pstate_mclk;
2937 }
2938 }
2939
2940 return 0;
2941}
2942
2943static int vega10_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, const void *input)
2944{
2945 const struct phm_set_power_state_input *states =
2946 (const struct phm_set_power_state_input *)input;
2947 const struct vega10_power_state *vega10_ps =
2948 cast_const_phw_vega10_power_state(states->pnew_state);
2949 struct vega10_hwmgr *data =
2950 (struct vega10_hwmgr *)(hwmgr->backend);
2951 struct vega10_single_dpm_table *sclk_table =
2952 &(data->dpm_table.gfx_table);
2953 uint32_t sclk = vega10_ps->performance_levels
2954 [vega10_ps->performance_level_count - 1].gfx_clock;
2955 struct vega10_single_dpm_table *mclk_table =
2956 &(data->dpm_table.mem_table);
2957 uint32_t mclk = vega10_ps->performance_levels
2958 [vega10_ps->performance_level_count - 1].mem_clock;
2959 struct PP_Clocks min_clocks = {0};
2960 uint32_t i;
2961 struct cgs_display_info info = {0};
2962
2963 data->need_update_dpm_table = 0;
2964
2965 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2966 PHM_PlatformCaps_ODNinACSupport) ||
2967 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2968 PHM_PlatformCaps_ODNinDCSupport)) {
2969 for (i = 0; i < sclk_table->count; i++) {
2970 if (sclk == sclk_table->dpm_levels[i].value)
2971 break;
2972 }
2973
2974 if (!(data->apply_overdrive_next_settings_mask &
2975 DPMTABLE_OD_UPDATE_SCLK) && i >= sclk_table->count) {
2976 /* Check SCLK in DAL's minimum clocks
2977 * in case DeepSleep divider update is required.
2978 */
2979 if (data->display_timing.min_clock_in_sr !=
2980 min_clocks.engineClockInSR &&
2981 (min_clocks.engineClockInSR >=
2982 VEGA10_MINIMUM_ENGINE_CLOCK ||
2983 data->display_timing.min_clock_in_sr >=
2984 VEGA10_MINIMUM_ENGINE_CLOCK))
2985 data->need_update_dpm_table |= DPMTABLE_UPDATE_SCLK;
2986 }
2987
2988 cgs_get_active_displays_info(hwmgr->device, &info);
2989
2990 if (data->display_timing.num_existing_displays !=
2991 info.display_count)
2992 data->need_update_dpm_table |= DPMTABLE_UPDATE_MCLK;
2993 } else {
2994 for (i = 0; i < sclk_table->count; i++) {
2995 if (sclk == sclk_table->dpm_levels[i].value)
2996 break;
2997 }
2998
2999 if (i >= sclk_table->count)
3000 data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
3001 else {
3002 /* Check SCLK in DAL's minimum clocks
3003 * in case DeepSleep divider update is required.
3004 */
3005 if (data->display_timing.min_clock_in_sr !=
3006 min_clocks.engineClockInSR &&
3007 (min_clocks.engineClockInSR >=
3008 VEGA10_MINIMUM_ENGINE_CLOCK ||
3009 data->display_timing.min_clock_in_sr >=
3010 VEGA10_MINIMUM_ENGINE_CLOCK))
3011 data->need_update_dpm_table |= DPMTABLE_UPDATE_SCLK;
3012 }
3013
3014 for (i = 0; i < mclk_table->count; i++) {
3015 if (mclk == mclk_table->dpm_levels[i].value)
3016 break;
3017 }
3018
3019 cgs_get_active_displays_info(hwmgr->device, &info);
3020
3021 if (i >= mclk_table->count)
3022 data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
3023
3024 if (data->display_timing.num_existing_displays !=
3025 info.display_count ||
3026 i >= mclk_table->count)
3027 data->need_update_dpm_table |= DPMTABLE_UPDATE_MCLK;
3028 }
3029 return 0;
3030}
3031
3032static int vega10_populate_and_upload_sclk_mclk_dpm_levels(
3033 struct pp_hwmgr *hwmgr, const void *input)
3034{
3035 int result = 0;
3036 const struct phm_set_power_state_input *states =
3037 (const struct phm_set_power_state_input *)input;
3038 const struct vega10_power_state *vega10_ps =
3039 cast_const_phw_vega10_power_state(states->pnew_state);
3040 struct vega10_hwmgr *data =
3041 (struct vega10_hwmgr *)(hwmgr->backend);
3042 uint32_t sclk = vega10_ps->performance_levels
3043 [vega10_ps->performance_level_count - 1].gfx_clock;
3044 uint32_t mclk = vega10_ps->performance_levels
3045 [vega10_ps->performance_level_count - 1].mem_clock;
3046 struct vega10_dpm_table *dpm_table = &data->dpm_table;
3047 struct vega10_dpm_table *golden_dpm_table =
3048 &data->golden_dpm_table;
3049 uint32_t dpm_count, clock_percent;
3050 uint32_t i;
3051
3052 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3053 PHM_PlatformCaps_ODNinACSupport) ||
3054 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3055 PHM_PlatformCaps_ODNinDCSupport)) {
3056
3057 if (!data->need_update_dpm_table &&
3058 !data->apply_optimized_settings &&
3059 !data->apply_overdrive_next_settings_mask)
3060 return 0;
3061
3062 if (data->apply_overdrive_next_settings_mask &
3063 DPMTABLE_OD_UPDATE_SCLK) {
3064 for (dpm_count = 0;
3065 dpm_count < dpm_table->gfx_table.count;
3066 dpm_count++) {
3067 dpm_table->gfx_table.dpm_levels[dpm_count].enabled =
3068 data->odn_dpm_table.odn_core_clock_dpm_levels.
3069 performance_level_entries[dpm_count].enabled;
3070 dpm_table->gfx_table.dpm_levels[dpm_count].value =
3071 data->odn_dpm_table.odn_core_clock_dpm_levels.
3072 performance_level_entries[dpm_count].clock;
3073 }
3074 }
3075
3076 if (data->apply_overdrive_next_settings_mask &
3077 DPMTABLE_OD_UPDATE_MCLK) {
3078 for (dpm_count = 0;
3079 dpm_count < dpm_table->mem_table.count;
3080 dpm_count++) {
3081 dpm_table->mem_table.dpm_levels[dpm_count].enabled =
3082 data->odn_dpm_table.odn_memory_clock_dpm_levels.
3083 performance_level_entries[dpm_count].enabled;
3084 dpm_table->mem_table.dpm_levels[dpm_count].value =
3085 data->odn_dpm_table.odn_memory_clock_dpm_levels.
3086 performance_level_entries[dpm_count].clock;
3087 }
3088 }
3089
3090 if ((data->need_update_dpm_table & DPMTABLE_UPDATE_SCLK) ||
3091 data->apply_optimized_settings ||
3092 (data->apply_overdrive_next_settings_mask &
3093 DPMTABLE_OD_UPDATE_SCLK)) {
3094 result = vega10_populate_all_graphic_levels(hwmgr);
3095 PP_ASSERT_WITH_CODE(!result,
3096 "Failed to populate SCLK during \
3097 PopulateNewDPMClocksStates Function!",
3098 return result);
3099 }
3100
3101 if ((data->need_update_dpm_table & DPMTABLE_UPDATE_MCLK) ||
3102 (data->apply_overdrive_next_settings_mask &
3103 DPMTABLE_OD_UPDATE_MCLK)){
3104 result = vega10_populate_all_memory_levels(hwmgr);
3105 PP_ASSERT_WITH_CODE(!result,
3106 "Failed to populate MCLK during \
3107 PopulateNewDPMClocksStates Function!",
3108 return result);
3109 }
3110 } else {
3111 if (!data->need_update_dpm_table &&
3112 !data->apply_optimized_settings)
3113 return 0;
3114
3115 if (data->need_update_dpm_table & DPMTABLE_OD_UPDATE_SCLK &&
3116 data->smu_features[GNLD_DPM_GFXCLK].supported) {
3117 dpm_table->
3118 gfx_table.dpm_levels[dpm_table->gfx_table.count - 1].
3119 value = sclk;
3120
3121 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3122 PHM_PlatformCaps_OD6PlusinACSupport) ||
3123 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3124 PHM_PlatformCaps_OD6PlusinDCSupport)) {
3125 /* Need to do calculation based on the golden DPM table
3126 * as the Heatmap GPU Clock axis is also based on
3127 * the default values
3128 */
3129 PP_ASSERT_WITH_CODE(
3130 golden_dpm_table->gfx_table.dpm_levels
3131 [golden_dpm_table->gfx_table.count - 1].value,
3132 "Divide by 0!",
3133 return -1);
3134
3135 dpm_count = dpm_table->gfx_table.count < 2 ?
3136 0 : dpm_table->gfx_table.count - 2;
3137 for (i = dpm_count; i > 1; i--) {
3138 if (sclk > golden_dpm_table->gfx_table.dpm_levels
3139 [golden_dpm_table->gfx_table.count - 1].value) {
3140 clock_percent =
3141 ((sclk - golden_dpm_table->gfx_table.dpm_levels
3142 [golden_dpm_table->gfx_table.count - 1].value) *
3143 100) /
3144 golden_dpm_table->gfx_table.dpm_levels
3145 [golden_dpm_table->gfx_table.count - 1].value;
3146
3147 dpm_table->gfx_table.dpm_levels[i].value =
3148 golden_dpm_table->gfx_table.dpm_levels[i].value +
3149 (golden_dpm_table->gfx_table.dpm_levels[i].value *
3150 clock_percent) / 100;
3151 } else if (golden_dpm_table->
3152 gfx_table.dpm_levels[dpm_table->gfx_table.count-1].value >
3153 sclk) {
3154 clock_percent =
3155 ((golden_dpm_table->gfx_table.dpm_levels
3156 [golden_dpm_table->gfx_table.count - 1].value -
3157 sclk) * 100) /
3158 golden_dpm_table->gfx_table.dpm_levels
3159 [golden_dpm_table->gfx_table.count-1].value;
3160
3161 dpm_table->gfx_table.dpm_levels[i].value =
3162 golden_dpm_table->gfx_table.dpm_levels[i].value -
3163 (golden_dpm_table->gfx_table.dpm_levels[i].value *
3164 clock_percent) / 100;
3165 } else
3166 dpm_table->gfx_table.dpm_levels[i].value =
3167 golden_dpm_table->gfx_table.dpm_levels[i].value;
3168 }
3169 }
3170 }
3171
3172 if (data->need_update_dpm_table & DPMTABLE_OD_UPDATE_MCLK &&
3173 data->smu_features[GNLD_DPM_UCLK].supported) {
3174 dpm_table->
3175 mem_table.dpm_levels[dpm_table->mem_table.count - 1].
3176 value = mclk;
3177
3178 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3179 PHM_PlatformCaps_OD6PlusinACSupport) ||
3180 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3181 PHM_PlatformCaps_OD6PlusinDCSupport)) {
3182
3183 PP_ASSERT_WITH_CODE(
3184 golden_dpm_table->mem_table.dpm_levels
3185 [golden_dpm_table->mem_table.count - 1].value,
3186 "Divide by 0!",
3187 return -1);
3188
3189 dpm_count = dpm_table->mem_table.count < 2 ?
3190 0 : dpm_table->mem_table.count - 2;
3191 for (i = dpm_count; i > 1; i--) {
3192 if (mclk > golden_dpm_table->mem_table.dpm_levels
3193 [golden_dpm_table->mem_table.count-1].value) {
3194 clock_percent = ((mclk -
3195 golden_dpm_table->mem_table.dpm_levels
3196 [golden_dpm_table->mem_table.count-1].value) *
3197 100) /
3198 golden_dpm_table->mem_table.dpm_levels
3199 [golden_dpm_table->mem_table.count-1].value;
3200
3201 dpm_table->mem_table.dpm_levels[i].value =
3202 golden_dpm_table->mem_table.dpm_levels[i].value +
3203 (golden_dpm_table->mem_table.dpm_levels[i].value *
3204 clock_percent) / 100;
3205 } else if (golden_dpm_table->mem_table.dpm_levels
3206 [dpm_table->mem_table.count-1].value > mclk) {
3207 clock_percent = ((golden_dpm_table->mem_table.dpm_levels
3208 [golden_dpm_table->mem_table.count-1].value - mclk) *
3209 100) /
3210 golden_dpm_table->mem_table.dpm_levels
3211 [golden_dpm_table->mem_table.count-1].value;
3212
3213 dpm_table->mem_table.dpm_levels[i].value =
3214 golden_dpm_table->mem_table.dpm_levels[i].value -
3215 (golden_dpm_table->mem_table.dpm_levels[i].value *
3216 clock_percent) / 100;
3217 } else
3218 dpm_table->mem_table.dpm_levels[i].value =
3219 golden_dpm_table->mem_table.dpm_levels[i].value;
3220 }
3221 }
3222 }
3223
3224 if ((data->need_update_dpm_table &
3225 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK)) ||
3226 data->apply_optimized_settings) {
3227 result = vega10_populate_all_graphic_levels(hwmgr);
3228 PP_ASSERT_WITH_CODE(!result,
3229 "Failed to populate SCLK during \
3230 PopulateNewDPMClocksStates Function!",
3231 return result);
3232 }
3233
3234 if (data->need_update_dpm_table &
3235 (DPMTABLE_OD_UPDATE_MCLK + DPMTABLE_UPDATE_MCLK)) {
3236 result = vega10_populate_all_memory_levels(hwmgr);
3237 PP_ASSERT_WITH_CODE(!result,
3238 "Failed to populate MCLK during \
3239 PopulateNewDPMClocksStates Function!",
3240 return result);
3241 }
3242 }
3243
3244 return result;
3245}
3246
3247static int vega10_trim_single_dpm_states(struct pp_hwmgr *hwmgr,
3248 struct vega10_single_dpm_table *dpm_table,
3249 uint32_t low_limit, uint32_t high_limit)
3250{
3251 uint32_t i;
3252
3253 for (i = 0; i < dpm_table->count; i++) {
3254 if ((dpm_table->dpm_levels[i].value < low_limit) ||
3255 (dpm_table->dpm_levels[i].value > high_limit))
3256 dpm_table->dpm_levels[i].enabled = false;
3257 else
3258 dpm_table->dpm_levels[i].enabled = true;
3259 }
3260 return 0;
3261}
3262
3263static int vega10_trim_single_dpm_states_with_mask(struct pp_hwmgr *hwmgr,
3264 struct vega10_single_dpm_table *dpm_table,
3265 uint32_t low_limit, uint32_t high_limit,
3266 uint32_t disable_dpm_mask)
3267{
3268 uint32_t i;
3269
3270 for (i = 0; i < dpm_table->count; i++) {
3271 if ((dpm_table->dpm_levels[i].value < low_limit) ||
3272 (dpm_table->dpm_levels[i].value > high_limit))
3273 dpm_table->dpm_levels[i].enabled = false;
3274 else if (!((1 << i) & disable_dpm_mask))
3275 dpm_table->dpm_levels[i].enabled = false;
3276 else
3277 dpm_table->dpm_levels[i].enabled = true;
3278 }
3279 return 0;
3280}
3281
3282static int vega10_trim_dpm_states(struct pp_hwmgr *hwmgr,
3283 const struct vega10_power_state *vega10_ps)
3284{
3285 struct vega10_hwmgr *data =
3286 (struct vega10_hwmgr *)(hwmgr->backend);
3287 uint32_t high_limit_count;
3288
3289 PP_ASSERT_WITH_CODE((vega10_ps->performance_level_count >= 1),
3290 "power state did not have any performance level",
3291 return -1);
3292
3293 high_limit_count = (vega10_ps->performance_level_count == 1) ? 0 : 1;
3294
3295 vega10_trim_single_dpm_states(hwmgr,
3296 &(data->dpm_table.soc_table),
3297 vega10_ps->performance_levels[0].soc_clock,
3298 vega10_ps->performance_levels[high_limit_count].soc_clock);
3299
3300 vega10_trim_single_dpm_states_with_mask(hwmgr,
3301 &(data->dpm_table.gfx_table),
3302 vega10_ps->performance_levels[0].gfx_clock,
3303 vega10_ps->performance_levels[high_limit_count].gfx_clock,
3304 data->disable_dpm_mask);
3305
3306 vega10_trim_single_dpm_states(hwmgr,
3307 &(data->dpm_table.mem_table),
3308 vega10_ps->performance_levels[0].mem_clock,
3309 vega10_ps->performance_levels[high_limit_count].mem_clock);
3310
3311 return 0;
3312}
3313
3314static uint32_t vega10_find_lowest_dpm_level(
3315 struct vega10_single_dpm_table *table)
3316{
3317 uint32_t i;
3318
3319 for (i = 0; i < table->count; i++) {
3320 if (table->dpm_levels[i].enabled)
3321 break;
3322 }
3323
3324 return i;
3325}
3326
3327static uint32_t vega10_find_highest_dpm_level(
3328 struct vega10_single_dpm_table *table)
3329{
3330 uint32_t i = 0;
3331
3332 if (table->count <= MAX_REGULAR_DPM_NUMBER) {
3333 for (i = table->count; i > 0; i--) {
3334 if (table->dpm_levels[i - 1].enabled)
3335 return i - 1;
3336 }
3337 } else {
3338 pr_info("DPM Table Has Too Many Entries!");
3339 return MAX_REGULAR_DPM_NUMBER - 1;
3340 }
3341
3342 return i;
3343}
3344
3345static void vega10_apply_dal_minimum_voltage_request(
3346 struct pp_hwmgr *hwmgr)
3347{
3348 return;
3349}
3350
3351static int vega10_upload_dpm_bootup_level(struct pp_hwmgr *hwmgr)
3352{
3353 struct vega10_hwmgr *data =
3354 (struct vega10_hwmgr *)(hwmgr->backend);
3355
3356 vega10_apply_dal_minimum_voltage_request(hwmgr);
3357
3358 if (!data->registry_data.sclk_dpm_key_disabled) {
3359 if (data->smc_state_table.gfx_boot_level !=
3360 data->dpm_table.gfx_table.dpm_state.soft_min_level) {
3361 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter(
3362 hwmgr->smumgr,
3363 PPSMC_MSG_SetSoftMinGfxclkByIndex,
3364 data->smc_state_table.gfx_boot_level),
3365 "Failed to set soft min sclk index!",
3366 return -EINVAL);
3367 data->dpm_table.gfx_table.dpm_state.soft_min_level =
3368 data->smc_state_table.gfx_boot_level;
3369 }
3370 }
3371
3372 if (!data->registry_data.mclk_dpm_key_disabled) {
3373 if (data->smc_state_table.mem_boot_level !=
3374 data->dpm_table.mem_table.dpm_state.soft_min_level) {
3375 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter(
3376 hwmgr->smumgr,
3377 PPSMC_MSG_SetSoftMinUclkByIndex,
3378 data->smc_state_table.mem_boot_level),
3379 "Failed to set soft min mclk index!",
3380 return -EINVAL);
3381
3382 data->dpm_table.mem_table.dpm_state.soft_min_level =
3383 data->smc_state_table.mem_boot_level;
3384 }
3385 }
3386
3387 return 0;
3388}
3389
3390static int vega10_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
3391{
3392 struct vega10_hwmgr *data =
3393 (struct vega10_hwmgr *)(hwmgr->backend);
3394
3395 vega10_apply_dal_minimum_voltage_request(hwmgr);
3396
3397 if (!data->registry_data.sclk_dpm_key_disabled) {
3398 if (data->smc_state_table.gfx_max_level !=
3399 data->dpm_table.gfx_table.dpm_state.soft_max_level) {
3400 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter(
3401 hwmgr->smumgr,
3402 PPSMC_MSG_SetSoftMaxGfxclkByIndex,
3403 data->smc_state_table.gfx_max_level),
3404 "Failed to set soft max sclk index!",
3405 return -EINVAL);
3406 data->dpm_table.gfx_table.dpm_state.soft_max_level =
3407 data->smc_state_table.gfx_max_level;
3408 }
3409 }
3410
3411 if (!data->registry_data.mclk_dpm_key_disabled) {
3412 if (data->smc_state_table.mem_max_level !=
3413 data->dpm_table.mem_table.dpm_state.soft_max_level) {
3414 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter(
3415 hwmgr->smumgr,
3416 PPSMC_MSG_SetSoftMaxUclkByIndex,
3417 data->smc_state_table.mem_max_level),
3418 "Failed to set soft max mclk index!",
3419 return -EINVAL);
3420 data->dpm_table.mem_table.dpm_state.soft_max_level =
3421 data->smc_state_table.mem_max_level;
3422 }
3423 }
3424
3425 return 0;
3426}
3427
3428static int vega10_generate_dpm_level_enable_mask(
3429 struct pp_hwmgr *hwmgr, const void *input)
3430{
3431 struct vega10_hwmgr *data =
3432 (struct vega10_hwmgr *)(hwmgr->backend);
3433 const struct phm_set_power_state_input *states =
3434 (const struct phm_set_power_state_input *)input;
3435 const struct vega10_power_state *vega10_ps =
3436 cast_const_phw_vega10_power_state(states->pnew_state);
3437 int i;
3438
3439 PP_ASSERT_WITH_CODE(!vega10_trim_dpm_states(hwmgr, vega10_ps),
3440 "Attempt to Trim DPM States Failed!",
3441 return -1);
3442
3443 data->smc_state_table.gfx_boot_level =
3444 vega10_find_lowest_dpm_level(&(data->dpm_table.gfx_table));
3445 data->smc_state_table.gfx_max_level =
3446 vega10_find_highest_dpm_level(&(data->dpm_table.gfx_table));
3447 data->smc_state_table.mem_boot_level =
3448 vega10_find_lowest_dpm_level(&(data->dpm_table.mem_table));
3449 data->smc_state_table.mem_max_level =
3450 vega10_find_highest_dpm_level(&(data->dpm_table.mem_table));
3451
3452 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr),
3453 "Attempt to upload DPM Bootup Levels Failed!",
3454 return -1);
3455 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr),
3456 "Attempt to upload DPM Max Levels Failed!",
3457 return -1);
3458 for(i = data->smc_state_table.gfx_boot_level; i < data->smc_state_table.gfx_max_level; i++)
3459 data->dpm_table.gfx_table.dpm_levels[i].enabled = true;
3460
3461
3462 for(i = data->smc_state_table.mem_boot_level; i < data->smc_state_table.mem_max_level; i++)
3463 data->dpm_table.mem_table.dpm_levels[i].enabled = true;
3464
3465 return 0;
3466}
3467
3468int vega10_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
3469{
3470 struct vega10_hwmgr *data =
3471 (struct vega10_hwmgr *)(hwmgr->backend);
3472
3473 if (data->smu_features[GNLD_DPM_VCE].supported) {
3474 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
3475 enable,
3476 data->smu_features[GNLD_DPM_VCE].smu_feature_bitmap),
3477 "Attempt to Enable/Disable DPM VCE Failed!",
3478 return -1);
3479 data->smu_features[GNLD_DPM_VCE].enabled = enable;
3480 }
3481
3482 return 0;
3483}
3484
3485static int vega10_update_sclk_threshold(struct pp_hwmgr *hwmgr)
3486{
3487 struct vega10_hwmgr *data =
3488 (struct vega10_hwmgr *)(hwmgr->backend);
3489 int result = 0;
3490 uint32_t low_sclk_interrupt_threshold = 0;
3491
3492 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3493 PHM_PlatformCaps_SclkThrottleLowNotification)
3494 && (hwmgr->gfx_arbiter.sclk_threshold !=
3495 data->low_sclk_interrupt_threshold)) {
3496 data->low_sclk_interrupt_threshold =
3497 hwmgr->gfx_arbiter.sclk_threshold;
3498 low_sclk_interrupt_threshold =
3499 data->low_sclk_interrupt_threshold;
3500
3501 data->smc_state_table.pp_table.LowGfxclkInterruptThreshold =
3502 cpu_to_le32(low_sclk_interrupt_threshold);
3503
3504 /* This message will also enable SmcToHost Interrupt */
3505 result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
3506 PPSMC_MSG_SetLowGfxclkInterruptThreshold,
3507 (uint32_t)low_sclk_interrupt_threshold);
3508 }
3509
3510 return result;
3511}
3512
3513static int vega10_set_power_state_tasks(struct pp_hwmgr *hwmgr,
3514 const void *input)
3515{
3516 int tmp_result, result = 0;
3517 struct vega10_hwmgr *data =
3518 (struct vega10_hwmgr *)(hwmgr->backend);
3519 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
3520
3521 tmp_result = vega10_find_dpm_states_clocks_in_dpm_table(hwmgr, input);
3522 PP_ASSERT_WITH_CODE(!tmp_result,
3523 "Failed to find DPM states clocks in DPM table!",
3524 result = tmp_result);
3525
3526 tmp_result = vega10_populate_and_upload_sclk_mclk_dpm_levels(hwmgr, input);
3527 PP_ASSERT_WITH_CODE(!tmp_result,
3528 "Failed to populate and upload SCLK MCLK DPM levels!",
3529 result = tmp_result);
3530
3531 tmp_result = vega10_generate_dpm_level_enable_mask(hwmgr, input);
3532 PP_ASSERT_WITH_CODE(!tmp_result,
3533 "Failed to generate DPM level enabled mask!",
3534 result = tmp_result);
3535
3536 tmp_result = vega10_update_sclk_threshold(hwmgr);
3537 PP_ASSERT_WITH_CODE(!tmp_result,
3538 "Failed to update SCLK threshold!",
3539 result = tmp_result);
3540
3541 result = vega10_copy_table_to_smc(hwmgr->smumgr,
3542 (uint8_t *)pp_table, PPTABLE);
3543 PP_ASSERT_WITH_CODE(!result,
3544 "Failed to upload PPtable!", return result);
3545
3546 data->apply_optimized_settings = false;
3547 data->apply_overdrive_next_settings_mask = 0;
3548
3549 return 0;
3550}
3551
3552static int vega10_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
3553{
3554 struct pp_power_state *ps;
3555 struct vega10_power_state *vega10_ps;
3556
3557 if (hwmgr == NULL)
3558 return -EINVAL;
3559
3560 ps = hwmgr->request_ps;
3561
3562 if (ps == NULL)
3563 return -EINVAL;
3564
3565 vega10_ps = cast_phw_vega10_power_state(&ps->hardware);
3566
3567 if (low)
3568 return vega10_ps->performance_levels[0].gfx_clock;
3569 else
3570 return vega10_ps->performance_levels
3571 [vega10_ps->performance_level_count - 1].gfx_clock;
3572}
3573
3574static int vega10_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
3575{
3576 struct pp_power_state *ps;
3577 struct vega10_power_state *vega10_ps;
3578
3579 if (hwmgr == NULL)
3580 return -EINVAL;
3581
3582 ps = hwmgr->request_ps;
3583
3584 if (ps == NULL)
3585 return -EINVAL;
3586
3587 vega10_ps = cast_phw_vega10_power_state(&ps->hardware);
3588
3589 if (low)
3590 return vega10_ps->performance_levels[0].mem_clock;
3591 else
3592 return vega10_ps->performance_levels
3593 [vega10_ps->performance_level_count-1].mem_clock;
3594}
3595
3596static int vega10_read_sensor(struct pp_hwmgr *hwmgr, int idx,
3597 void *value, int *size)
3598{
3599 uint32_t sclk_idx, mclk_idx, activity_percent = 0;
3600 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
3601 struct vega10_dpm_table *dpm_table = &data->dpm_table;
3602 int ret = 0;
3603
3604 switch (idx) {
3605 case AMDGPU_PP_SENSOR_GFX_SCLK:
3606 ret = smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_GetCurrentGfxclkIndex);
3607 if (!ret) {
3608 vega10_read_arg_from_smc(hwmgr->smumgr, &sclk_idx);
3609 *((uint32_t *)value) = dpm_table->gfx_table.dpm_levels[sclk_idx].value;
3610 *size = 4;
3611 }
3612 break;
3613 case AMDGPU_PP_SENSOR_GFX_MCLK:
3614 ret = smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_GetCurrentUclkIndex);
3615 if (!ret) {
3616 vega10_read_arg_from_smc(hwmgr->smumgr, &mclk_idx);
3617 *((uint32_t *)value) = dpm_table->mem_table.dpm_levels[mclk_idx].value;
3618 *size = 4;
3619 }
3620 break;
3621 case AMDGPU_PP_SENSOR_GPU_LOAD:
3622 ret = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_GetAverageGfxActivity, 0);
3623 if (!ret) {
3624 vega10_read_arg_from_smc(hwmgr->smumgr, &activity_percent);
3625 *((uint32_t *)value) = activity_percent > 100 ? 100 : activity_percent;
3626 *size = 4;
3627 }
3628 break;
3629 case AMDGPU_PP_SENSOR_GPU_TEMP:
3630 *((uint32_t *)value) = vega10_thermal_get_temperature(hwmgr);
3631 *size = 4;
3632 break;
3633 case AMDGPU_PP_SENSOR_UVD_POWER:
3634 *((uint32_t *)value) = data->uvd_power_gated ? 0 : 1;
3635 *size = 4;
3636 break;
3637 case AMDGPU_PP_SENSOR_VCE_POWER:
3638 *((uint32_t *)value) = data->vce_power_gated ? 0 : 1;
3639 *size = 4;
3640 break;
3641 default:
3642 ret = -EINVAL;
3643 break;
3644 }
3645 return ret;
3646}
3647
3648static int vega10_notify_smc_display_change(struct pp_hwmgr *hwmgr,
3649 bool has_disp)
3650{
3651 return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
3652 PPSMC_MSG_SetUclkFastSwitch,
3653 has_disp ? 0 : 1);
3654}
3655
3656int vega10_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
3657 struct pp_display_clock_request *clock_req)
3658{
3659 int result = 0;
3660 enum amd_pp_clock_type clk_type = clock_req->clock_type;
3661 uint32_t clk_freq = clock_req->clock_freq_in_khz / 100;
3662 DSPCLK_e clk_select = 0;
3663 uint32_t clk_request = 0;
3664
3665 switch (clk_type) {
3666 case amd_pp_dcef_clock:
3667 clk_select = DSPCLK_DCEFCLK;
3668 break;
3669 case amd_pp_disp_clock:
3670 clk_select = DSPCLK_DISPCLK;
3671 break;
3672 case amd_pp_pixel_clock:
3673 clk_select = DSPCLK_PIXCLK;
3674 break;
3675 case amd_pp_phy_clock:
3676 clk_select = DSPCLK_PHYCLK;
3677 break;
3678 default:
3679 pr_info("[DisplayClockVoltageRequest]Invalid Clock Type!");
3680 result = -1;
3681 break;
3682 }
3683
3684 if (!result) {
3685 clk_request = (clk_freq << 16) | clk_select;
3686 result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
3687 PPSMC_MSG_RequestDisplayClockByFreq,
3688 clk_request);
3689 }
3690
3691 return result;
3692}
3693
3694static int vega10_notify_smc_display_config_after_ps_adjustment(
3695 struct pp_hwmgr *hwmgr)
3696{
3697 struct vega10_hwmgr *data =
3698 (struct vega10_hwmgr *)(hwmgr->backend);
3699 struct vega10_single_dpm_table *dpm_table =
3700 &data->dpm_table.dcef_table;
3701 uint32_t num_active_disps = 0;
3702 struct cgs_display_info info = {0};
3703 struct PP_Clocks min_clocks = {0};
3704 uint32_t i;
3705 struct pp_display_clock_request clock_req;
3706
3707 info.mode_info = NULL;
3708
3709 cgs_get_active_displays_info(hwmgr->device, &info);
3710
3711 num_active_disps = info.display_count;
3712
3713 if (num_active_disps > 1)
3714 vega10_notify_smc_display_change(hwmgr, false);
3715 else
3716 vega10_notify_smc_display_change(hwmgr, true);
3717
3718 min_clocks.dcefClock = hwmgr->display_config.min_dcef_set_clk;
3719 min_clocks.dcefClockInSR = hwmgr->display_config.min_dcef_deep_sleep_set_clk;
3720
3721 for (i = 0; i < dpm_table->count; i++) {
3722 if (dpm_table->dpm_levels[i].value == min_clocks.dcefClock)
3723 break;
3724 }
3725
3726 if (i < dpm_table->count) {
3727 clock_req.clock_type = amd_pp_dcef_clock;
3728 clock_req.clock_freq_in_khz = dpm_table->dpm_levels[i].value;
3729 if (!vega10_display_clock_voltage_request(hwmgr, &clock_req)) {
3730 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter(
3731 hwmgr->smumgr, PPSMC_MSG_SetMinDeepSleepDcefclk,
3732 min_clocks.dcefClockInSR),
3733 "Attempt to set divider for DCEFCLK Failed!",);
3734 } else
3735 pr_info("Attempt to set Hard Min for DCEFCLK Failed!");
3736 } else
3737 pr_info("Cannot find requested DCEFCLK!");
3738
3739 return 0;
3740}
3741
3742static int vega10_force_dpm_highest(struct pp_hwmgr *hwmgr)
3743{
3744 struct vega10_hwmgr *data =
3745 (struct vega10_hwmgr *)(hwmgr->backend);
3746
3747 data->smc_state_table.gfx_boot_level =
3748 data->smc_state_table.gfx_max_level =
3749 vega10_find_highest_dpm_level(&(data->dpm_table.gfx_table));
3750 data->smc_state_table.mem_boot_level =
3751 data->smc_state_table.mem_max_level =
3752 vega10_find_highest_dpm_level(&(data->dpm_table.mem_table));
3753
3754 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr),
3755 "Failed to upload boot level to highest!",
3756 return -1);
3757
3758 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr),
3759 "Failed to upload dpm max level to highest!",
3760 return -1);
3761
3762 return 0;
3763}
3764
3765static int vega10_force_dpm_lowest(struct pp_hwmgr *hwmgr)
3766{
3767 struct vega10_hwmgr *data =
3768 (struct vega10_hwmgr *)(hwmgr->backend);
3769
3770 data->smc_state_table.gfx_boot_level =
3771 data->smc_state_table.gfx_max_level =
3772 vega10_find_lowest_dpm_level(&(data->dpm_table.gfx_table));
3773 data->smc_state_table.mem_boot_level =
3774 data->smc_state_table.mem_max_level =
3775 vega10_find_lowest_dpm_level(&(data->dpm_table.mem_table));
3776
3777 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr),
3778 "Failed to upload boot level to highest!",
3779 return -1);
3780
3781 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr),
3782 "Failed to upload dpm max level to highest!",
3783 return -1);
3784
3785 return 0;
3786
3787}
3788
3789static int vega10_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
3790{
3791 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
3792
3793 data->smc_state_table.gfx_boot_level =
3794 vega10_find_lowest_dpm_level(&(data->dpm_table.gfx_table));
3795 data->smc_state_table.gfx_max_level =
3796 vega10_find_highest_dpm_level(&(data->dpm_table.gfx_table));
3797 data->smc_state_table.mem_boot_level =
3798 vega10_find_lowest_dpm_level(&(data->dpm_table.mem_table));
3799 data->smc_state_table.mem_max_level =
3800 vega10_find_highest_dpm_level(&(data->dpm_table.mem_table));
3801
3802 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr),
3803 "Failed to upload DPM Bootup Levels!",
3804 return -1);
3805
3806 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr),
3807 "Failed to upload DPM Max Levels!",
3808 return -1);
3809 return 0;
3810}
3811
3812static int vega10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
3813 enum amd_dpm_forced_level level)
3814{
3815 int ret = 0;
3816
3817 switch (level) {
3818 case AMD_DPM_FORCED_LEVEL_HIGH:
3819 ret = vega10_force_dpm_highest(hwmgr);
3820 if (ret)
3821 return ret;
3822 break;
3823 case AMD_DPM_FORCED_LEVEL_LOW:
3824 ret = vega10_force_dpm_lowest(hwmgr);
3825 if (ret)
3826 return ret;
3827 break;
3828 case AMD_DPM_FORCED_LEVEL_AUTO:
3829 ret = vega10_unforce_dpm_levels(hwmgr);
3830 if (ret)
3831 return ret;
3832 break;
3833 default:
3834 break;
3835 }
3836
3837 hwmgr->dpm_level = level;
3838
3839 return ret;
3840}
3841
3842static int vega10_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
3843{
3844 if (mode) {
3845 /* stop auto-manage */
3846 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3847 PHM_PlatformCaps_MicrocodeFanControl))
3848 vega10_fan_ctrl_stop_smc_fan_control(hwmgr);
3849 vega10_fan_ctrl_set_static_mode(hwmgr, mode);
3850 } else
3851 /* restart auto-manage */
3852 vega10_fan_ctrl_reset_fan_speed_to_default(hwmgr);
3853
3854 return 0;
3855}
3856
3857static int vega10_get_fan_control_mode(struct pp_hwmgr *hwmgr)
3858{
3859 uint32_t reg;
3860
3861 if (hwmgr->fan_ctrl_is_in_default_mode) {
3862 return hwmgr->fan_ctrl_default_mode;
3863 } else {
3864 reg = soc15_get_register_offset(THM_HWID, 0,
3865 mmCG_FDO_CTRL2_BASE_IDX, mmCG_FDO_CTRL2);
3866 return (cgs_read_register(hwmgr->device, reg) &
3867 CG_FDO_CTRL2__FDO_PWM_MODE_MASK) >>
3868 CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT;
3869 }
3870}
3871
3872static int vega10_get_dal_power_level(struct pp_hwmgr *hwmgr,
3873 struct amd_pp_simple_clock_info *info)
3874{
3875 struct phm_ppt_v2_information *table_info =
3876 (struct phm_ppt_v2_information *)hwmgr->pptable;
3877 struct phm_clock_and_voltage_limits *max_limits =
3878 &table_info->max_clock_voltage_on_ac;
3879
3880 info->engine_max_clock = max_limits->sclk;
3881 info->memory_max_clock = max_limits->mclk;
3882
3883 return 0;
3884}
3885
3886static void vega10_get_sclks(struct pp_hwmgr *hwmgr,
3887 struct pp_clock_levels_with_latency *clocks)
3888{
3889 struct phm_ppt_v2_information *table_info =
3890 (struct phm_ppt_v2_information *)hwmgr->pptable;
3891 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table =
3892 table_info->vdd_dep_on_sclk;
3893 uint32_t i;
3894
3895 for (i = 0; i < dep_table->count; i++) {
3896 if (dep_table->entries[i].clk) {
3897 clocks->data[clocks->num_levels].clocks_in_khz =
3898 dep_table->entries[i].clk;
3899 clocks->num_levels++;
3900 }
3901 }
3902
3903}
3904
3905static uint32_t vega10_get_mem_latency(struct pp_hwmgr *hwmgr,
3906 uint32_t clock)
3907{
3908 if (clock >= MEM_FREQ_LOW_LATENCY &&
3909 clock < MEM_FREQ_HIGH_LATENCY)
3910 return MEM_LATENCY_HIGH;
3911 else if (clock >= MEM_FREQ_HIGH_LATENCY)
3912 return MEM_LATENCY_LOW;
3913 else
3914 return MEM_LATENCY_ERR;
3915}
3916
3917static void vega10_get_memclocks(struct pp_hwmgr *hwmgr,
3918 struct pp_clock_levels_with_latency *clocks)
3919{
3920 struct phm_ppt_v2_information *table_info =
3921 (struct phm_ppt_v2_information *)hwmgr->pptable;
3922 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table =
3923 table_info->vdd_dep_on_mclk;
3924 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
3925 uint32_t i;
3926
3927 clocks->num_levels = 0;
3928 data->mclk_latency_table.count = 0;
3929
3930 for (i = 0; i < dep_table->count; i++) {
3931 if (dep_table->entries[i].clk) {
3932 clocks->data[clocks->num_levels].clocks_in_khz =
3933 data->mclk_latency_table.entries
3934 [data->mclk_latency_table.count].frequency =
3935 dep_table->entries[i].clk;
3936 clocks->data[clocks->num_levels].latency_in_us =
3937 data->mclk_latency_table.entries
3938 [data->mclk_latency_table.count].latency =
3939 vega10_get_mem_latency(hwmgr,
3940 dep_table->entries[i].clk);
3941 clocks->num_levels++;
3942 data->mclk_latency_table.count++;
3943 }
3944 }
3945}
3946
3947static void vega10_get_dcefclocks(struct pp_hwmgr *hwmgr,
3948 struct pp_clock_levels_with_latency *clocks)
3949{
3950 struct phm_ppt_v2_information *table_info =
3951 (struct phm_ppt_v2_information *)hwmgr->pptable;
3952 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table =
3953 table_info->vdd_dep_on_dcefclk;
3954 uint32_t i;
3955
3956 for (i = 0; i < dep_table->count; i++) {
3957 clocks->data[i].clocks_in_khz = dep_table->entries[i].clk;
3958 clocks->data[i].latency_in_us = 0;
3959 clocks->num_levels++;
3960 }
3961}
3962
3963static void vega10_get_socclocks(struct pp_hwmgr *hwmgr,
3964 struct pp_clock_levels_with_latency *clocks)
3965{
3966 struct phm_ppt_v2_information *table_info =
3967 (struct phm_ppt_v2_information *)hwmgr->pptable;
3968 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table =
3969 table_info->vdd_dep_on_socclk;
3970 uint32_t i;
3971
3972 for (i = 0; i < dep_table->count; i++) {
3973 clocks->data[i].clocks_in_khz = dep_table->entries[i].clk;
3974 clocks->data[i].latency_in_us = 0;
3975 clocks->num_levels++;
3976 }
3977}
3978
3979static int vega10_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr,
3980 enum amd_pp_clock_type type,
3981 struct pp_clock_levels_with_latency *clocks)
3982{
3983 switch (type) {
3984 case amd_pp_sys_clock:
3985 vega10_get_sclks(hwmgr, clocks);
3986 break;
3987 case amd_pp_mem_clock:
3988 vega10_get_memclocks(hwmgr, clocks);
3989 break;
3990 case amd_pp_dcef_clock:
3991 vega10_get_dcefclocks(hwmgr, clocks);
3992 break;
3993 case amd_pp_soc_clock:
3994 vega10_get_socclocks(hwmgr, clocks);
3995 break;
3996 default:
3997 return -1;
3998 }
3999
4000 return 0;
4001}
4002
4003static int vega10_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr,
4004 enum amd_pp_clock_type type,
4005 struct pp_clock_levels_with_voltage *clocks)
4006{
4007 struct phm_ppt_v2_information *table_info =
4008 (struct phm_ppt_v2_information *)hwmgr->pptable;
4009 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table;
4010 uint32_t i;
4011
4012 switch (type) {
4013 case amd_pp_mem_clock:
4014 dep_table = table_info->vdd_dep_on_mclk;
4015 break;
4016 case amd_pp_dcef_clock:
4017 dep_table = table_info->vdd_dep_on_dcefclk;
4018 break;
4019 case amd_pp_disp_clock:
4020 dep_table = table_info->vdd_dep_on_dispclk;
4021 break;
4022 case amd_pp_pixel_clock:
4023 dep_table = table_info->vdd_dep_on_pixclk;
4024 break;
4025 case amd_pp_phy_clock:
4026 dep_table = table_info->vdd_dep_on_phyclk;
4027 break;
4028 default:
4029 return -1;
4030 }
4031
4032 for (i = 0; i < dep_table->count; i++) {
4033 clocks->data[i].clocks_in_khz = dep_table->entries[i].clk;
4034 clocks->data[i].voltage_in_mv = (uint32_t)(table_info->vddc_lookup_table->
4035 entries[dep_table->entries[i].vddInd].us_vdd);
4036 clocks->num_levels++;
4037 }
4038
4039 if (i < dep_table->count)
4040 return -1;
4041
4042 return 0;
4043}
4044
4045static int vega10_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
4046 struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges)
4047{
4048 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
4049 Watermarks_t *table = &(data->smc_state_table.water_marks_table);
4050 int result = 0;
4051 uint32_t i;
4052
4053 if (!data->registry_data.disable_water_mark) {
4054 for (i = 0; i < wm_with_clock_ranges->num_wm_sets_dmif; i++) {
4055 table->WatermarkRow[WM_DCEFCLK][i].MinClock =
4056 cpu_to_le16((uint16_t)
4057 (wm_with_clock_ranges->wm_sets_dmif[i].wm_min_dcefclk_in_khz) /
4058 100);
4059 table->WatermarkRow[WM_DCEFCLK][i].MaxClock =
4060 cpu_to_le16((uint16_t)
4061 (wm_with_clock_ranges->wm_sets_dmif[i].wm_max_dcefclk_in_khz) /
4062 100);
4063 table->WatermarkRow[WM_DCEFCLK][i].MinUclk =
4064 cpu_to_le16((uint16_t)
4065 (wm_with_clock_ranges->wm_sets_dmif[i].wm_min_memclk_in_khz) /
4066 100);
4067 table->WatermarkRow[WM_DCEFCLK][i].MaxUclk =
4068 cpu_to_le16((uint16_t)
4069 (wm_with_clock_ranges->wm_sets_dmif[i].wm_max_memclk_in_khz) /
4070 100);
4071 table->WatermarkRow[WM_DCEFCLK][i].WmSetting = (uint8_t)
4072 wm_with_clock_ranges->wm_sets_dmif[i].wm_set_id;
4073 }
4074
4075 for (i = 0; i < wm_with_clock_ranges->num_wm_sets_mcif; i++) {
4076 table->WatermarkRow[WM_SOCCLK][i].MinClock =
4077 cpu_to_le16((uint16_t)
4078 (wm_with_clock_ranges->wm_sets_mcif[i].wm_min_socclk_in_khz) /
4079 100);
4080 table->WatermarkRow[WM_SOCCLK][i].MaxClock =
4081 cpu_to_le16((uint16_t)
4082 (wm_with_clock_ranges->wm_sets_mcif[i].wm_max_socclk_in_khz) /
4083 100);
4084 table->WatermarkRow[WM_SOCCLK][i].MinUclk =
4085 cpu_to_le16((uint16_t)
4086 (wm_with_clock_ranges->wm_sets_mcif[i].wm_min_memclk_in_khz) /
4087 100);
4088 table->WatermarkRow[WM_SOCCLK][i].MaxUclk =
4089 cpu_to_le16((uint16_t)
4090 (wm_with_clock_ranges->wm_sets_mcif[i].wm_max_memclk_in_khz) /
4091 100);
4092 table->WatermarkRow[WM_SOCCLK][i].WmSetting = (uint8_t)
4093 wm_with_clock_ranges->wm_sets_mcif[i].wm_set_id;
4094 }
4095 data->water_marks_bitmap = WaterMarksExist;
4096 }
4097
4098 return result;
4099}
4100
4101static int vega10_force_clock_level(struct pp_hwmgr *hwmgr,
4102 enum pp_clock_type type, uint32_t mask)
4103{
4104 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
4105 uint32_t i;
4106
4107 if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
4108 return -EINVAL;
4109
4110 switch (type) {
4111 case PP_SCLK:
4112 if (data->registry_data.sclk_dpm_key_disabled)
4113 break;
4114
4115 for (i = 0; i < 32; i++) {
4116 if (mask & (1 << i))
4117 break;
4118 }
4119
4120 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter(
4121 hwmgr->smumgr,
4122 PPSMC_MSG_SetSoftMinGfxclkByIndex,
4123 i),
4124 "Failed to set soft min sclk index!",
4125 return -1);
4126 break;
4127
4128 case PP_MCLK:
4129 if (data->registry_data.mclk_dpm_key_disabled)
4130 break;
4131
4132 for (i = 0; i < 32; i++) {
4133 if (mask & (1 << i))
4134 break;
4135 }
4136
4137 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter(
4138 hwmgr->smumgr,
4139 PPSMC_MSG_SetSoftMinUclkByIndex,
4140 i),
4141 "Failed to set soft min mclk index!",
4142 return -1);
4143 break;
4144
4145 case PP_PCIE:
4146 if (data->registry_data.pcie_dpm_key_disabled)
4147 break;
4148
4149 for (i = 0; i < 32; i++) {
4150 if (mask & (1 << i))
4151 break;
4152 }
4153
4154 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter(
4155 hwmgr->smumgr,
4156 PPSMC_MSG_SetMinLinkDpmByIndex,
4157 i),
4158 "Failed to set min pcie index!",
4159 return -1);
4160 break;
4161 default:
4162 break;
4163 }
4164
4165 return 0;
4166}
4167
4168static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
4169 enum pp_clock_type type, char *buf)
4170{
4171 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
4172 struct vega10_single_dpm_table *sclk_table = &(data->dpm_table.gfx_table);
4173 struct vega10_single_dpm_table *mclk_table = &(data->dpm_table.mem_table);
4174 struct vega10_pcie_table *pcie_table = &(data->dpm_table.pcie_table);
4175 int i, now, size = 0;
4176
4177 switch (type) {
4178 case PP_SCLK:
4179 if (data->registry_data.sclk_dpm_key_disabled)
4180 break;
4181
4182 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr->smumgr,
4183 PPSMC_MSG_GetCurrentGfxclkIndex),
4184 "Attempt to get current sclk index Failed!",
4185 return -1);
4186 PP_ASSERT_WITH_CODE(!vega10_read_arg_from_smc(hwmgr->smumgr,
4187 &now),
4188 "Attempt to read sclk index Failed!",
4189 return -1);
4190
4191 for (i = 0; i < sclk_table->count; i++)
4192 size += sprintf(buf + size, "%d: %uMhz %s\n",
4193 i, sclk_table->dpm_levels[i].value / 100,
4194 (i == now) ? "*" : "");
4195 break;
4196 case PP_MCLK:
4197 if (data->registry_data.mclk_dpm_key_disabled)
4198 break;
4199
4200 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr->smumgr,
4201 PPSMC_MSG_GetCurrentUclkIndex),
4202 "Attempt to get current mclk index Failed!",
4203 return -1);
4204 PP_ASSERT_WITH_CODE(!vega10_read_arg_from_smc(hwmgr->smumgr,
4205 &now),
4206 "Attempt to read mclk index Failed!",
4207 return -1);
4208
4209 for (i = 0; i < mclk_table->count; i++)
4210 size += sprintf(buf + size, "%d: %uMhz %s\n",
4211 i, mclk_table->dpm_levels[i].value / 100,
4212 (i == now) ? "*" : "");
4213 break;
4214 case PP_PCIE:
4215 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr->smumgr,
4216 PPSMC_MSG_GetCurrentLinkIndex),
4217 "Attempt to get current mclk index Failed!",
4218 return -1);
4219 PP_ASSERT_WITH_CODE(!vega10_read_arg_from_smc(hwmgr->smumgr,
4220 &now),
4221 "Attempt to read mclk index Failed!",
4222 return -1);
4223
4224 for (i = 0; i < pcie_table->count; i++)
4225 size += sprintf(buf + size, "%d: %s %s\n", i,
4226 (pcie_table->pcie_gen[i] == 0) ? "2.5GB, x1" :
4227 (pcie_table->pcie_gen[i] == 1) ? "5.0GB, x16" :
4228 (pcie_table->pcie_gen[i] == 2) ? "8.0GB, x16" : "",
4229 (i == now) ? "*" : "");
4230 break;
4231 default:
4232 break;
4233 }
4234 return size;
4235}
4236
4237static int vega10_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
4238{
4239 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
4240 int result = 0;
4241 uint32_t num_turned_on_displays = 1;
4242 Watermarks_t *wm_table = &(data->smc_state_table.water_marks_table);
4243 struct cgs_display_info info = {0};
4244
4245 if ((data->water_marks_bitmap & WaterMarksExist) &&
4246 !(data->water_marks_bitmap & WaterMarksLoaded)) {
4247 result = vega10_copy_table_to_smc(hwmgr->smumgr,
4248 (uint8_t *)wm_table, WMTABLE);
4249 PP_ASSERT_WITH_CODE(result, "Failed to update WMTABLE!", return EINVAL);
4250 data->water_marks_bitmap |= WaterMarksLoaded;
4251 }
4252
4253 if (data->water_marks_bitmap & WaterMarksLoaded) {
4254 cgs_get_active_displays_info(hwmgr->device, &info);
4255 num_turned_on_displays = info.display_count;
4256 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
4257 PPSMC_MSG_NumOfDisplays, num_turned_on_displays);
4258 }
4259
4260 return result;
4261}
4262
4263int vega10_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable)
4264{
4265 struct vega10_hwmgr *data =
4266 (struct vega10_hwmgr *)(hwmgr->backend);
4267
4268 if (data->smu_features[GNLD_DPM_UVD].supported) {
4269 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
4270 enable,
4271 data->smu_features[GNLD_DPM_UVD].smu_feature_bitmap),
4272 "Attempt to Enable/Disable DPM UVD Failed!",
4273 return -1);
4274 data->smu_features[GNLD_DPM_UVD].enabled = enable;
4275 }
4276 return 0;
4277}
4278
4279static int vega10_power_gate_vce(struct pp_hwmgr *hwmgr, bool bgate)
4280{
4281 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
4282
4283 data->vce_power_gated = bgate;
4284 return vega10_enable_disable_vce_dpm(hwmgr, !bgate);
4285}
4286
4287static int vega10_power_gate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
4288{
4289 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
4290
4291 data->uvd_power_gated = bgate;
4292 return vega10_enable_disable_uvd_dpm(hwmgr, !bgate);
4293}
4294
4295static inline bool vega10_are_power_levels_equal(
4296 const struct vega10_performance_level *pl1,
4297 const struct vega10_performance_level *pl2)
4298{
4299 return ((pl1->soc_clock == pl2->soc_clock) &&
4300 (pl1->gfx_clock == pl2->gfx_clock) &&
4301 (pl1->mem_clock == pl2->mem_clock));
4302}
4303
4304static int vega10_check_states_equal(struct pp_hwmgr *hwmgr,
4305 const struct pp_hw_power_state *pstate1,
4306 const struct pp_hw_power_state *pstate2, bool *equal)
4307{
4308 const struct vega10_power_state *psa;
4309 const struct vega10_power_state *psb;
4310 int i;
4311
4312 if (pstate1 == NULL || pstate2 == NULL || equal == NULL)
4313 return -EINVAL;
4314
4315 psa = cast_const_phw_vega10_power_state(pstate1);
4316 psb = cast_const_phw_vega10_power_state(pstate2);
4317 /* If the two states don't even have the same number of performance levels they cannot be the same state. */
4318 if (psa->performance_level_count != psb->performance_level_count) {
4319 *equal = false;
4320 return 0;
4321 }
4322
4323 for (i = 0; i < psa->performance_level_count; i++) {
4324 if (!vega10_are_power_levels_equal(&(psa->performance_levels[i]), &(psb->performance_levels[i]))) {
4325 /* If we have found even one performance level pair that is different the states are different. */
4326 *equal = false;
4327 return 0;
4328 }
4329 }
4330
4331 /* If all performance levels are the same try to use the UVD clocks to break the tie.*/
4332 *equal = ((psa->uvd_clks.vclk == psb->uvd_clks.vclk) && (psa->uvd_clks.dclk == psb->uvd_clks.dclk));
4333 *equal &= ((psa->vce_clks.evclk == psb->vce_clks.evclk) && (psa->vce_clks.ecclk == psb->vce_clks.ecclk));
4334 *equal &= (psa->sclk_threshold == psb->sclk_threshold);
4335
4336 return 0;
4337}
4338
4339static bool
4340vega10_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
4341{
4342 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
4343 bool is_update_required = false;
4344 struct cgs_display_info info = {0, 0, NULL};
4345
4346 cgs_get_active_displays_info(hwmgr->device, &info);
4347
4348 if (data->display_timing.num_existing_displays != info.display_count)
4349 is_update_required = true;
4350
4351 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) {
4352 if (data->display_timing.min_clock_in_sr != hwmgr->display_config.min_core_set_clock_in_sr)
4353 is_update_required = true;
4354 }
4355
4356 return is_update_required;
4357}
4358
4359static const struct pp_hwmgr_func vega10_hwmgr_funcs = {
4360 .backend_init = vega10_hwmgr_backend_init,
4361 .backend_fini = vega10_hwmgr_backend_fini,
4362 .asic_setup = vega10_setup_asic_task,
4363 .dynamic_state_management_enable = vega10_enable_dpm_tasks,
4364 .get_num_of_pp_table_entries =
4365 vega10_get_number_of_powerplay_table_entries,
4366 .get_power_state_size = vega10_get_power_state_size,
4367 .get_pp_table_entry = vega10_get_pp_table_entry,
4368 .patch_boot_state = vega10_patch_boot_state,
4369 .apply_state_adjust_rules = vega10_apply_state_adjust_rules,
4370 .power_state_set = vega10_set_power_state_tasks,
4371 .get_sclk = vega10_dpm_get_sclk,
4372 .get_mclk = vega10_dpm_get_mclk,
4373 .notify_smc_display_config_after_ps_adjustment =
4374 vega10_notify_smc_display_config_after_ps_adjustment,
4375 .force_dpm_level = vega10_dpm_force_dpm_level,
4376 .get_temperature = vega10_thermal_get_temperature,
4377 .stop_thermal_controller = vega10_thermal_stop_thermal_controller,
4378 .get_fan_speed_info = vega10_fan_ctrl_get_fan_speed_info,
4379 .get_fan_speed_percent = vega10_fan_ctrl_get_fan_speed_percent,
4380 .set_fan_speed_percent = vega10_fan_ctrl_set_fan_speed_percent,
4381 .reset_fan_speed_to_default =
4382 vega10_fan_ctrl_reset_fan_speed_to_default,
4383 .get_fan_speed_rpm = vega10_fan_ctrl_get_fan_speed_rpm,
4384 .set_fan_speed_rpm = vega10_fan_ctrl_set_fan_speed_rpm,
4385 .uninitialize_thermal_controller =
4386 vega10_thermal_ctrl_uninitialize_thermal_controller,
4387 .set_fan_control_mode = vega10_set_fan_control_mode,
4388 .get_fan_control_mode = vega10_get_fan_control_mode,
4389 .read_sensor = vega10_read_sensor,
4390 .get_dal_power_level = vega10_get_dal_power_level,
4391 .get_clock_by_type_with_latency = vega10_get_clock_by_type_with_latency,
4392 .get_clock_by_type_with_voltage = vega10_get_clock_by_type_with_voltage,
4393 .set_watermarks_for_clocks_ranges = vega10_set_watermarks_for_clocks_ranges,
4394 .display_clock_voltage_request = vega10_display_clock_voltage_request,
4395 .force_clock_level = vega10_force_clock_level,
4396 .print_clock_levels = vega10_print_clock_levels,
4397 .display_config_changed = vega10_display_configuration_changed_task,
4398 .powergate_uvd = vega10_power_gate_uvd,
4399 .powergate_vce = vega10_power_gate_vce,
4400 .check_states_equal = vega10_check_states_equal,
4401 .check_smc_update_required_for_display_configuration =
4402 vega10_check_smc_update_required_for_display_configuration,
4403};
4404
4405int vega10_hwmgr_init(struct pp_hwmgr *hwmgr)
4406{
4407 hwmgr->hwmgr_func = &vega10_hwmgr_funcs;
4408 hwmgr->pptable_func = &vega10_pptable_funcs;
4409 pp_vega10_thermal_initialize(hwmgr);
4410 return 0;
4411}