]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
mmc: core: prepend 0x to OCR entry in sysfs
[mirror_ubuntu-bionic-kernel.git] / drivers / gpu / drm / amd / powerplay / hwmgr / vega10_hwmgr.c
1 /*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24 #include <linux/delay.h>
25 #include <linux/fb.h>
26 #include <linux/module.h>
27 #include <linux/slab.h>
28
29 #include "hwmgr.h"
30 #include "amd_powerplay.h"
31 #include "vega10_smumgr.h"
32 #include "hardwaremanager.h"
33 #include "ppatomfwctrl.h"
34 #include "atomfirmware.h"
35 #include "cgs_common.h"
36 #include "vega10_powertune.h"
37 #include "smu9.h"
38 #include "smu9_driver_if.h"
39 #include "vega10_inc.h"
40 #include "pp_soc15.h"
41 #include "pppcielanes.h"
42 #include "vega10_hwmgr.h"
43 #include "vega10_processpptables.h"
44 #include "vega10_pptable.h"
45 #include "vega10_thermal.h"
46 #include "pp_debug.h"
47 #include "pp_acpi.h"
48 #include "amd_pcie_helpers.h"
49 #include "cgs_linux.h"
50 #include "ppinterrupt.h"
51 #include "pp_overdriver.h"
52
53 #define VOLTAGE_SCALE 4
54 #define VOLTAGE_VID_OFFSET_SCALE1 625
55 #define VOLTAGE_VID_OFFSET_SCALE2 100
56
57 #define HBM_MEMORY_CHANNEL_WIDTH 128
58
59 static const uint32_t channel_number[] = {1, 2, 0, 4, 0, 8, 0, 16, 2};
60
61 #define MEM_FREQ_LOW_LATENCY 25000
62 #define MEM_FREQ_HIGH_LATENCY 80000
63 #define MEM_LATENCY_HIGH 245
64 #define MEM_LATENCY_LOW 35
65 #define MEM_LATENCY_ERR 0xFFFF
66
67 #define mmDF_CS_AON0_DramBaseAddress0 0x0044
68 #define mmDF_CS_AON0_DramBaseAddress0_BASE_IDX 0
69
70 //DF_CS_AON0_DramBaseAddress0
71 #define DF_CS_AON0_DramBaseAddress0__AddrRngVal__SHIFT 0x0
72 #define DF_CS_AON0_DramBaseAddress0__LgcyMmioHoleEn__SHIFT 0x1
73 #define DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT 0x4
74 #define DF_CS_AON0_DramBaseAddress0__IntLvAddrSel__SHIFT 0x8
75 #define DF_CS_AON0_DramBaseAddress0__DramBaseAddr__SHIFT 0xc
76 #define DF_CS_AON0_DramBaseAddress0__AddrRngVal_MASK 0x00000001L
77 #define DF_CS_AON0_DramBaseAddress0__LgcyMmioHoleEn_MASK 0x00000002L
78 #define DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK 0x000000F0L
79 #define DF_CS_AON0_DramBaseAddress0__IntLvAddrSel_MASK 0x00000700L
80 #define DF_CS_AON0_DramBaseAddress0__DramBaseAddr_MASK 0xFFFFF000L
81 static int vega10_force_clock_level(struct pp_hwmgr *hwmgr,
82 enum pp_clock_type type, uint32_t mask);
83
84 static const ULONG PhwVega10_Magic = (ULONG)(PHM_VIslands_Magic);
85
86 struct vega10_power_state *cast_phw_vega10_power_state(
87 struct pp_hw_power_state *hw_ps)
88 {
89 PP_ASSERT_WITH_CODE((PhwVega10_Magic == hw_ps->magic),
90 "Invalid Powerstate Type!",
91 return NULL;);
92
93 return (struct vega10_power_state *)hw_ps;
94 }
95
96 const struct vega10_power_state *cast_const_phw_vega10_power_state(
97 const struct pp_hw_power_state *hw_ps)
98 {
99 PP_ASSERT_WITH_CODE((PhwVega10_Magic == hw_ps->magic),
100 "Invalid Powerstate Type!",
101 return NULL;);
102
103 return (const struct vega10_power_state *)hw_ps;
104 }
105
106 static void vega10_set_default_registry_data(struct pp_hwmgr *hwmgr)
107 {
108 struct vega10_hwmgr *data =
109 (struct vega10_hwmgr *)(hwmgr->backend);
110
111 data->registry_data.sclk_dpm_key_disabled =
112 hwmgr->feature_mask & PP_SCLK_DPM_MASK ? false : true;
113 data->registry_data.socclk_dpm_key_disabled =
114 hwmgr->feature_mask & PP_SOCCLK_DPM_MASK ? false : true;
115 data->registry_data.mclk_dpm_key_disabled =
116 hwmgr->feature_mask & PP_MCLK_DPM_MASK ? false : true;
117 data->registry_data.pcie_dpm_key_disabled =
118 hwmgr->feature_mask & PP_PCIE_DPM_MASK ? false : true;
119
120 data->registry_data.dcefclk_dpm_key_disabled =
121 hwmgr->feature_mask & PP_DCEFCLK_DPM_MASK ? false : true;
122
123 if (hwmgr->feature_mask & PP_POWER_CONTAINMENT_MASK) {
124 data->registry_data.power_containment_support = 1;
125 data->registry_data.enable_pkg_pwr_tracking_feature = 1;
126 data->registry_data.enable_tdc_limit_feature = 1;
127 }
128
129 data->registry_data.clock_stretcher_support =
130 hwmgr->feature_mask & PP_CLOCK_STRETCH_MASK ? true : false;
131
132 data->registry_data.ulv_support =
133 hwmgr->feature_mask & PP_ULV_MASK ? true : false;
134
135 data->registry_data.sclk_deep_sleep_support =
136 hwmgr->feature_mask & PP_SCLK_DEEP_SLEEP_MASK ? true : false;
137
138 data->registry_data.disable_water_mark = 0;
139
140 data->registry_data.fan_control_support = 1;
141 data->registry_data.thermal_support = 1;
142 data->registry_data.fw_ctf_enabled = 1;
143
144 data->registry_data.avfs_support = 1;
145 data->registry_data.led_dpm_enabled = 1;
146
147 data->registry_data.vr0hot_enabled = 1;
148 data->registry_data.vr1hot_enabled = 1;
149 data->registry_data.regulator_hot_gpio_support = 1;
150
151 data->registry_data.didt_support = 1;
152 if (data->registry_data.didt_support) {
153 data->registry_data.didt_mode = 6;
154 data->registry_data.sq_ramping_support = 1;
155 data->registry_data.db_ramping_support = 0;
156 data->registry_data.td_ramping_support = 0;
157 data->registry_data.tcp_ramping_support = 0;
158 data->registry_data.dbr_ramping_support = 0;
159 data->registry_data.edc_didt_support = 1;
160 data->registry_data.gc_didt_support = 0;
161 data->registry_data.psm_didt_support = 0;
162 }
163
164 data->display_voltage_mode = PPVEGA10_VEGA10DISPLAYVOLTAGEMODE_DFLT;
165 data->dcef_clk_quad_eqn_a = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
166 data->dcef_clk_quad_eqn_b = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
167 data->dcef_clk_quad_eqn_c = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
168 data->disp_clk_quad_eqn_a = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
169 data->disp_clk_quad_eqn_b = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
170 data->disp_clk_quad_eqn_c = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
171 data->pixel_clk_quad_eqn_a = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
172 data->pixel_clk_quad_eqn_b = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
173 data->pixel_clk_quad_eqn_c = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
174 data->phy_clk_quad_eqn_a = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
175 data->phy_clk_quad_eqn_b = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
176 data->phy_clk_quad_eqn_c = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
177
178 data->gfxclk_average_alpha = PPVEGA10_VEGA10GFXCLKAVERAGEALPHA_DFLT;
179 data->socclk_average_alpha = PPVEGA10_VEGA10SOCCLKAVERAGEALPHA_DFLT;
180 data->uclk_average_alpha = PPVEGA10_VEGA10UCLKCLKAVERAGEALPHA_DFLT;
181 data->gfx_activity_average_alpha = PPVEGA10_VEGA10GFXACTIVITYAVERAGEALPHA_DFLT;
182 }
183
184 static int vega10_set_features_platform_caps(struct pp_hwmgr *hwmgr)
185 {
186 struct vega10_hwmgr *data =
187 (struct vega10_hwmgr *)(hwmgr->backend);
188 struct phm_ppt_v2_information *table_info =
189 (struct phm_ppt_v2_information *)hwmgr->pptable;
190 struct cgs_system_info sys_info = {0};
191 int result;
192
193 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
194 PHM_PlatformCaps_SclkDeepSleep);
195
196 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
197 PHM_PlatformCaps_DynamicPatchPowerState);
198
199 if (data->vddci_control == VEGA10_VOLTAGE_CONTROL_NONE)
200 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
201 PHM_PlatformCaps_ControlVDDCI);
202
203 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
204 PHM_PlatformCaps_EnableSMU7ThermalManagement);
205
206 sys_info.size = sizeof(struct cgs_system_info);
207 sys_info.info_id = CGS_SYSTEM_INFO_PG_FLAGS;
208 result = cgs_query_system_info(hwmgr->device, &sys_info);
209
210 if (!result && (sys_info.value & AMD_PG_SUPPORT_UVD))
211 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
212 PHM_PlatformCaps_UVDPowerGating);
213
214 if (!result && (sys_info.value & AMD_PG_SUPPORT_VCE))
215 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
216 PHM_PlatformCaps_VCEPowerGating);
217
218 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
219 PHM_PlatformCaps_UnTabledHardwareInterface);
220
221 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
222 PHM_PlatformCaps_FanSpeedInTableIsRPM);
223
224 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
225 PHM_PlatformCaps_ODFuzzyFanControlSupport);
226
227 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
228 PHM_PlatformCaps_DynamicPowerManagement);
229
230 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
231 PHM_PlatformCaps_SMC);
232
233 /* power tune caps */
234 /* assume disabled */
235 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
236 PHM_PlatformCaps_PowerContainment);
237 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
238 PHM_PlatformCaps_DiDtSupport);
239 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
240 PHM_PlatformCaps_SQRamping);
241 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
242 PHM_PlatformCaps_DBRamping);
243 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
244 PHM_PlatformCaps_TDRamping);
245 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
246 PHM_PlatformCaps_TCPRamping);
247 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
248 PHM_PlatformCaps_DBRRamping);
249 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
250 PHM_PlatformCaps_DiDtEDCEnable);
251 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
252 PHM_PlatformCaps_GCEDC);
253 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
254 PHM_PlatformCaps_PSM);
255
256 if (data->registry_data.didt_support) {
257 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DiDtSupport);
258 if (data->registry_data.sq_ramping_support)
259 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SQRamping);
260 if (data->registry_data.db_ramping_support)
261 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRamping);
262 if (data->registry_data.td_ramping_support)
263 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TDRamping);
264 if (data->registry_data.tcp_ramping_support)
265 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping);
266 if (data->registry_data.dbr_ramping_support)
267 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRRamping);
268 if (data->registry_data.edc_didt_support)
269 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DiDtEDCEnable);
270 if (data->registry_data.gc_didt_support)
271 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_GCEDC);
272 if (data->registry_data.psm_didt_support)
273 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PSM);
274 }
275
276 if (data->registry_data.power_containment_support)
277 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
278 PHM_PlatformCaps_PowerContainment);
279 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
280 PHM_PlatformCaps_CAC);
281
282 if (table_info->tdp_table->usClockStretchAmount &&
283 data->registry_data.clock_stretcher_support)
284 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
285 PHM_PlatformCaps_ClockStretcher);
286
287 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
288 PHM_PlatformCaps_RegulatorHot);
289 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
290 PHM_PlatformCaps_AutomaticDCTransition);
291
292 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
293 PHM_PlatformCaps_UVDDPM);
294 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
295 PHM_PlatformCaps_VCEDPM);
296
297 return 0;
298 }
299
300 static void vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr)
301 {
302 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
303 int i;
304
305 vega10_initialize_power_tune_defaults(hwmgr);
306
307 for (i = 0; i < GNLD_FEATURES_MAX; i++) {
308 data->smu_features[i].smu_feature_id = 0xffff;
309 data->smu_features[i].smu_feature_bitmap = 1 << i;
310 data->smu_features[i].enabled = false;
311 data->smu_features[i].supported = false;
312 }
313
314 data->smu_features[GNLD_DPM_PREFETCHER].smu_feature_id =
315 FEATURE_DPM_PREFETCHER_BIT;
316 data->smu_features[GNLD_DPM_GFXCLK].smu_feature_id =
317 FEATURE_DPM_GFXCLK_BIT;
318 data->smu_features[GNLD_DPM_UCLK].smu_feature_id =
319 FEATURE_DPM_UCLK_BIT;
320 data->smu_features[GNLD_DPM_SOCCLK].smu_feature_id =
321 FEATURE_DPM_SOCCLK_BIT;
322 data->smu_features[GNLD_DPM_UVD].smu_feature_id =
323 FEATURE_DPM_UVD_BIT;
324 data->smu_features[GNLD_DPM_VCE].smu_feature_id =
325 FEATURE_DPM_VCE_BIT;
326 data->smu_features[GNLD_DPM_MP0CLK].smu_feature_id =
327 FEATURE_DPM_MP0CLK_BIT;
328 data->smu_features[GNLD_DPM_LINK].smu_feature_id =
329 FEATURE_DPM_LINK_BIT;
330 data->smu_features[GNLD_DPM_DCEFCLK].smu_feature_id =
331 FEATURE_DPM_DCEFCLK_BIT;
332 data->smu_features[GNLD_ULV].smu_feature_id =
333 FEATURE_ULV_BIT;
334 data->smu_features[GNLD_AVFS].smu_feature_id =
335 FEATURE_AVFS_BIT;
336 data->smu_features[GNLD_DS_GFXCLK].smu_feature_id =
337 FEATURE_DS_GFXCLK_BIT;
338 data->smu_features[GNLD_DS_SOCCLK].smu_feature_id =
339 FEATURE_DS_SOCCLK_BIT;
340 data->smu_features[GNLD_DS_LCLK].smu_feature_id =
341 FEATURE_DS_LCLK_BIT;
342 data->smu_features[GNLD_PPT].smu_feature_id =
343 FEATURE_PPT_BIT;
344 data->smu_features[GNLD_TDC].smu_feature_id =
345 FEATURE_TDC_BIT;
346 data->smu_features[GNLD_THERMAL].smu_feature_id =
347 FEATURE_THERMAL_BIT;
348 data->smu_features[GNLD_GFX_PER_CU_CG].smu_feature_id =
349 FEATURE_GFX_PER_CU_CG_BIT;
350 data->smu_features[GNLD_RM].smu_feature_id =
351 FEATURE_RM_BIT;
352 data->smu_features[GNLD_DS_DCEFCLK].smu_feature_id =
353 FEATURE_DS_DCEFCLK_BIT;
354 data->smu_features[GNLD_ACDC].smu_feature_id =
355 FEATURE_ACDC_BIT;
356 data->smu_features[GNLD_VR0HOT].smu_feature_id =
357 FEATURE_VR0HOT_BIT;
358 data->smu_features[GNLD_VR1HOT].smu_feature_id =
359 FEATURE_VR1HOT_BIT;
360 data->smu_features[GNLD_FW_CTF].smu_feature_id =
361 FEATURE_FW_CTF_BIT;
362 data->smu_features[GNLD_LED_DISPLAY].smu_feature_id =
363 FEATURE_LED_DISPLAY_BIT;
364 data->smu_features[GNLD_FAN_CONTROL].smu_feature_id =
365 FEATURE_FAN_CONTROL_BIT;
366 data->smu_features[GNLD_ACG].smu_feature_id = FEATURE_ACG_BIT;
367 data->smu_features[GNLD_DIDT].smu_feature_id = FEATURE_GFX_EDC_BIT;
368
369 if (!data->registry_data.prefetcher_dpm_key_disabled)
370 data->smu_features[GNLD_DPM_PREFETCHER].supported = true;
371
372 if (!data->registry_data.sclk_dpm_key_disabled)
373 data->smu_features[GNLD_DPM_GFXCLK].supported = true;
374
375 if (!data->registry_data.mclk_dpm_key_disabled)
376 data->smu_features[GNLD_DPM_UCLK].supported = true;
377
378 if (!data->registry_data.socclk_dpm_key_disabled)
379 data->smu_features[GNLD_DPM_SOCCLK].supported = true;
380
381 if (PP_CAP(PHM_PlatformCaps_UVDDPM))
382 data->smu_features[GNLD_DPM_UVD].supported = true;
383
384 if (PP_CAP(PHM_PlatformCaps_VCEDPM))
385 data->smu_features[GNLD_DPM_VCE].supported = true;
386
387 if (!data->registry_data.pcie_dpm_key_disabled)
388 data->smu_features[GNLD_DPM_LINK].supported = true;
389
390 if (!data->registry_data.dcefclk_dpm_key_disabled)
391 data->smu_features[GNLD_DPM_DCEFCLK].supported = true;
392
393 if (PP_CAP(PHM_PlatformCaps_SclkDeepSleep) &&
394 data->registry_data.sclk_deep_sleep_support) {
395 data->smu_features[GNLD_DS_GFXCLK].supported = true;
396 data->smu_features[GNLD_DS_SOCCLK].supported = true;
397 data->smu_features[GNLD_DS_LCLK].supported = true;
398 data->smu_features[GNLD_DS_DCEFCLK].supported = true;
399 }
400
401 if (data->registry_data.enable_pkg_pwr_tracking_feature)
402 data->smu_features[GNLD_PPT].supported = true;
403
404 if (data->registry_data.enable_tdc_limit_feature)
405 data->smu_features[GNLD_TDC].supported = true;
406
407 if (data->registry_data.thermal_support)
408 data->smu_features[GNLD_THERMAL].supported = true;
409
410 if (data->registry_data.fan_control_support)
411 data->smu_features[GNLD_FAN_CONTROL].supported = true;
412
413 if (data->registry_data.fw_ctf_enabled)
414 data->smu_features[GNLD_FW_CTF].supported = true;
415
416 if (data->registry_data.avfs_support)
417 data->smu_features[GNLD_AVFS].supported = true;
418
419 if (data->registry_data.led_dpm_enabled)
420 data->smu_features[GNLD_LED_DISPLAY].supported = true;
421
422 if (data->registry_data.vr1hot_enabled)
423 data->smu_features[GNLD_VR1HOT].supported = true;
424
425 if (data->registry_data.vr0hot_enabled)
426 data->smu_features[GNLD_VR0HOT].supported = true;
427
428 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetSmuVersion);
429 vega10_read_arg_from_smc(hwmgr, &(data->smu_version));
430 /* ACG firmware has major version 5 */
431 if ((data->smu_version & 0xff000000) == 0x5000000)
432 data->smu_features[GNLD_ACG].supported = true;
433
434 if (data->registry_data.didt_support)
435 data->smu_features[GNLD_DIDT].supported = true;
436
437 }
438
439 #ifdef PPLIB_VEGA10_EVV_SUPPORT
440 static int vega10_get_socclk_for_voltage_evv(struct pp_hwmgr *hwmgr,
441 phm_ppt_v1_voltage_lookup_table *lookup_table,
442 uint16_t virtual_voltage_id, int32_t *socclk)
443 {
444 uint8_t entry_id;
445 uint8_t voltage_id;
446 struct phm_ppt_v2_information *table_info =
447 (struct phm_ppt_v2_information *)(hwmgr->pptable);
448
449 PP_ASSERT_WITH_CODE(lookup_table->count != 0,
450 "Lookup table is empty",
451 return -EINVAL);
452
453 /* search for leakage voltage ID 0xff01 ~ 0xff08 and sclk */
454 for (entry_id = 0; entry_id < table_info->vdd_dep_on_sclk->count; entry_id++) {
455 voltage_id = table_info->vdd_dep_on_socclk->entries[entry_id].vddInd;
456 if (lookup_table->entries[voltage_id].us_vdd == virtual_voltage_id)
457 break;
458 }
459
460 PP_ASSERT_WITH_CODE(entry_id < table_info->vdd_dep_on_socclk->count,
461 "Can't find requested voltage id in vdd_dep_on_socclk table!",
462 return -EINVAL);
463
464 *socclk = table_info->vdd_dep_on_socclk->entries[entry_id].clk;
465
466 return 0;
467 }
468
469 #define ATOM_VIRTUAL_VOLTAGE_ID0 0xff01
470 /**
471 * Get Leakage VDDC based on leakage ID.
472 *
473 * @param hwmgr the address of the powerplay hardware manager.
474 * @return always 0.
475 */
476 static int vega10_get_evv_voltages(struct pp_hwmgr *hwmgr)
477 {
478 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
479 uint16_t vv_id;
480 uint32_t vddc = 0;
481 uint16_t i, j;
482 uint32_t sclk = 0;
483 struct phm_ppt_v2_information *table_info =
484 (struct phm_ppt_v2_information *)hwmgr->pptable;
485 struct phm_ppt_v1_clock_voltage_dependency_table *socclk_table =
486 table_info->vdd_dep_on_socclk;
487 int result;
488
489 for (i = 0; i < VEGA10_MAX_LEAKAGE_COUNT; i++) {
490 vv_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
491
492 if (!vega10_get_socclk_for_voltage_evv(hwmgr,
493 table_info->vddc_lookup_table, vv_id, &sclk)) {
494 if (PP_CAP(PHM_PlatformCaps_ClockStretcher)) {
495 for (j = 1; j < socclk_table->count; j++) {
496 if (socclk_table->entries[j].clk == sclk &&
497 socclk_table->entries[j].cks_enable == 0) {
498 sclk += 5000;
499 break;
500 }
501 }
502 }
503
504 PP_ASSERT_WITH_CODE(!atomctrl_get_voltage_evv_on_sclk_ai(hwmgr,
505 VOLTAGE_TYPE_VDDC, sclk, vv_id, &vddc),
506 "Error retrieving EVV voltage value!",
507 continue);
508
509
510 /* need to make sure vddc is less than 2v or else, it could burn the ASIC. */
511 PP_ASSERT_WITH_CODE((vddc < 2000 && vddc != 0),
512 "Invalid VDDC value", result = -EINVAL;);
513
514 /* the voltage should not be zero nor equal to leakage ID */
515 if (vddc != 0 && vddc != vv_id) {
516 data->vddc_leakage.actual_voltage[data->vddc_leakage.count] = (uint16_t)(vddc/100);
517 data->vddc_leakage.leakage_id[data->vddc_leakage.count] = vv_id;
518 data->vddc_leakage.count++;
519 }
520 }
521 }
522
523 return 0;
524 }
525
526 /**
527 * Change virtual leakage voltage to actual value.
528 *
529 * @param hwmgr the address of the powerplay hardware manager.
530 * @param pointer to changing voltage
531 * @param pointer to leakage table
532 */
533 static void vega10_patch_with_vdd_leakage(struct pp_hwmgr *hwmgr,
534 uint16_t *voltage, struct vega10_leakage_voltage *leakage_table)
535 {
536 uint32_t index;
537
538 /* search for leakage voltage ID 0xff01 ~ 0xff08 */
539 for (index = 0; index < leakage_table->count; index++) {
540 /* if this voltage matches a leakage voltage ID */
541 /* patch with actual leakage voltage */
542 if (leakage_table->leakage_id[index] == *voltage) {
543 *voltage = leakage_table->actual_voltage[index];
544 break;
545 }
546 }
547
548 if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0)
549 pr_info("Voltage value looks like a Leakage ID \
550 but it's not patched\n");
551 }
552
553 /**
554 * Patch voltage lookup table by EVV leakages.
555 *
556 * @param hwmgr the address of the powerplay hardware manager.
557 * @param pointer to voltage lookup table
558 * @param pointer to leakage table
559 * @return always 0
560 */
561 static int vega10_patch_lookup_table_with_leakage(struct pp_hwmgr *hwmgr,
562 phm_ppt_v1_voltage_lookup_table *lookup_table,
563 struct vega10_leakage_voltage *leakage_table)
564 {
565 uint32_t i;
566
567 for (i = 0; i < lookup_table->count; i++)
568 vega10_patch_with_vdd_leakage(hwmgr,
569 &lookup_table->entries[i].us_vdd, leakage_table);
570
571 return 0;
572 }
573
574 static int vega10_patch_clock_voltage_limits_with_vddc_leakage(
575 struct pp_hwmgr *hwmgr, struct vega10_leakage_voltage *leakage_table,
576 uint16_t *vddc)
577 {
578 vega10_patch_with_vdd_leakage(hwmgr, (uint16_t *)vddc, leakage_table);
579
580 return 0;
581 }
582 #endif
583
584 static int vega10_patch_voltage_dependency_tables_with_lookup_table(
585 struct pp_hwmgr *hwmgr)
586 {
587 uint8_t entry_id, voltage_id;
588 unsigned i;
589 struct phm_ppt_v2_information *table_info =
590 (struct phm_ppt_v2_information *)(hwmgr->pptable);
591 struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
592 table_info->mm_dep_table;
593 struct phm_ppt_v1_clock_voltage_dependency_table *mclk_table =
594 table_info->vdd_dep_on_mclk;
595
596 for (i = 0; i < 6; i++) {
597 struct phm_ppt_v1_clock_voltage_dependency_table *vdt;
598 switch (i) {
599 case 0: vdt = table_info->vdd_dep_on_socclk; break;
600 case 1: vdt = table_info->vdd_dep_on_sclk; break;
601 case 2: vdt = table_info->vdd_dep_on_dcefclk; break;
602 case 3: vdt = table_info->vdd_dep_on_pixclk; break;
603 case 4: vdt = table_info->vdd_dep_on_dispclk; break;
604 case 5: vdt = table_info->vdd_dep_on_phyclk; break;
605 }
606
607 for (entry_id = 0; entry_id < vdt->count; entry_id++) {
608 voltage_id = vdt->entries[entry_id].vddInd;
609 vdt->entries[entry_id].vddc =
610 table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
611 }
612 }
613
614 for (entry_id = 0; entry_id < mm_table->count; ++entry_id) {
615 voltage_id = mm_table->entries[entry_id].vddcInd;
616 mm_table->entries[entry_id].vddc =
617 table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
618 }
619
620 for (entry_id = 0; entry_id < mclk_table->count; ++entry_id) {
621 voltage_id = mclk_table->entries[entry_id].vddInd;
622 mclk_table->entries[entry_id].vddc =
623 table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
624 voltage_id = mclk_table->entries[entry_id].vddciInd;
625 mclk_table->entries[entry_id].vddci =
626 table_info->vddci_lookup_table->entries[voltage_id].us_vdd;
627 voltage_id = mclk_table->entries[entry_id].mvddInd;
628 mclk_table->entries[entry_id].mvdd =
629 table_info->vddmem_lookup_table->entries[voltage_id].us_vdd;
630 }
631
632
633 return 0;
634
635 }
636
637 static int vega10_sort_lookup_table(struct pp_hwmgr *hwmgr,
638 struct phm_ppt_v1_voltage_lookup_table *lookup_table)
639 {
640 uint32_t table_size, i, j;
641 struct phm_ppt_v1_voltage_lookup_record tmp_voltage_lookup_record;
642
643 PP_ASSERT_WITH_CODE(lookup_table && lookup_table->count,
644 "Lookup table is empty", return -EINVAL);
645
646 table_size = lookup_table->count;
647
648 /* Sorting voltages */
649 for (i = 0; i < table_size - 1; i++) {
650 for (j = i + 1; j > 0; j--) {
651 if (lookup_table->entries[j].us_vdd <
652 lookup_table->entries[j - 1].us_vdd) {
653 tmp_voltage_lookup_record = lookup_table->entries[j - 1];
654 lookup_table->entries[j - 1] = lookup_table->entries[j];
655 lookup_table->entries[j] = tmp_voltage_lookup_record;
656 }
657 }
658 }
659
660 return 0;
661 }
662
663 static int vega10_complete_dependency_tables(struct pp_hwmgr *hwmgr)
664 {
665 int result = 0;
666 int tmp_result;
667 struct phm_ppt_v2_information *table_info =
668 (struct phm_ppt_v2_information *)(hwmgr->pptable);
669 #ifdef PPLIB_VEGA10_EVV_SUPPORT
670 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
671
672 tmp_result = vega10_patch_lookup_table_with_leakage(hwmgr,
673 table_info->vddc_lookup_table, &(data->vddc_leakage));
674 if (tmp_result)
675 result = tmp_result;
676
677 tmp_result = vega10_patch_clock_voltage_limits_with_vddc_leakage(hwmgr,
678 &(data->vddc_leakage), &table_info->max_clock_voltage_on_dc.vddc);
679 if (tmp_result)
680 result = tmp_result;
681 #endif
682
683 tmp_result = vega10_patch_voltage_dependency_tables_with_lookup_table(hwmgr);
684 if (tmp_result)
685 result = tmp_result;
686
687 tmp_result = vega10_sort_lookup_table(hwmgr, table_info->vddc_lookup_table);
688 if (tmp_result)
689 result = tmp_result;
690
691 return result;
692 }
693
694 static int vega10_set_private_data_based_on_pptable(struct pp_hwmgr *hwmgr)
695 {
696 struct phm_ppt_v2_information *table_info =
697 (struct phm_ppt_v2_information *)(hwmgr->pptable);
698 struct phm_ppt_v1_clock_voltage_dependency_table *allowed_sclk_vdd_table =
699 table_info->vdd_dep_on_socclk;
700 struct phm_ppt_v1_clock_voltage_dependency_table *allowed_mclk_vdd_table =
701 table_info->vdd_dep_on_mclk;
702
703 PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table,
704 "VDD dependency on SCLK table is missing. \
705 This table is mandatory", return -EINVAL);
706 PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table->count >= 1,
707 "VDD dependency on SCLK table is empty. \
708 This table is mandatory", return -EINVAL);
709
710 PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table,
711 "VDD dependency on MCLK table is missing. \
712 This table is mandatory", return -EINVAL);
713 PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table->count >= 1,
714 "VDD dependency on MCLK table is empty. \
715 This table is mandatory", return -EINVAL);
716
717 table_info->max_clock_voltage_on_ac.sclk =
718 allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].clk;
719 table_info->max_clock_voltage_on_ac.mclk =
720 allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].clk;
721 table_info->max_clock_voltage_on_ac.vddc =
722 allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].vddc;
723 table_info->max_clock_voltage_on_ac.vddci =
724 allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].vddci;
725
726 hwmgr->dyn_state.max_clock_voltage_on_ac.sclk =
727 table_info->max_clock_voltage_on_ac.sclk;
728 hwmgr->dyn_state.max_clock_voltage_on_ac.mclk =
729 table_info->max_clock_voltage_on_ac.mclk;
730 hwmgr->dyn_state.max_clock_voltage_on_ac.vddc =
731 table_info->max_clock_voltage_on_ac.vddc;
732 hwmgr->dyn_state.max_clock_voltage_on_ac.vddci =
733 table_info->max_clock_voltage_on_ac.vddci;
734
735 return 0;
736 }
737
738 static int vega10_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
739 {
740 kfree(hwmgr->dyn_state.vddc_dep_on_dal_pwrl);
741 hwmgr->dyn_state.vddc_dep_on_dal_pwrl = NULL;
742
743 kfree(hwmgr->backend);
744 hwmgr->backend = NULL;
745
746 return 0;
747 }
748
749 static int vega10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
750 {
751 int result = 0;
752 struct vega10_hwmgr *data;
753 uint32_t config_telemetry = 0;
754 struct pp_atomfwctrl_voltage_table vol_table;
755 struct cgs_system_info sys_info = {0};
756
757 data = kzalloc(sizeof(struct vega10_hwmgr), GFP_KERNEL);
758 if (data == NULL)
759 return -ENOMEM;
760
761 hwmgr->backend = data;
762
763 vega10_set_default_registry_data(hwmgr);
764
765 data->disable_dpm_mask = 0xff;
766 data->workload_mask = 0xff;
767
768 /* need to set voltage control types before EVV patching */
769 data->vddc_control = VEGA10_VOLTAGE_CONTROL_NONE;
770 data->mvdd_control = VEGA10_VOLTAGE_CONTROL_NONE;
771 data->vddci_control = VEGA10_VOLTAGE_CONTROL_NONE;
772
773 /* VDDCR_SOC */
774 if (pp_atomfwctrl_is_voltage_controlled_by_gpio_v4(hwmgr,
775 VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2)) {
776 if (!pp_atomfwctrl_get_voltage_table_v4(hwmgr,
777 VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2,
778 &vol_table)) {
779 config_telemetry = ((vol_table.telemetry_slope << 8) & 0xff00) |
780 (vol_table.telemetry_offset & 0xff);
781 data->vddc_control = VEGA10_VOLTAGE_CONTROL_BY_SVID2;
782 }
783 } else {
784 kfree(hwmgr->backend);
785 hwmgr->backend = NULL;
786 PP_ASSERT_WITH_CODE(false,
787 "VDDCR_SOC is not SVID2!",
788 return -1);
789 }
790
791 /* MVDDC */
792 if (pp_atomfwctrl_is_voltage_controlled_by_gpio_v4(hwmgr,
793 VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2)) {
794 if (!pp_atomfwctrl_get_voltage_table_v4(hwmgr,
795 VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2,
796 &vol_table)) {
797 config_telemetry |=
798 ((vol_table.telemetry_slope << 24) & 0xff000000) |
799 ((vol_table.telemetry_offset << 16) & 0xff0000);
800 data->mvdd_control = VEGA10_VOLTAGE_CONTROL_BY_SVID2;
801 }
802 }
803
804 /* VDDCI_MEM */
805 if (PP_CAP(PHM_PlatformCaps_ControlVDDCI)) {
806 if (pp_atomfwctrl_is_voltage_controlled_by_gpio_v4(hwmgr,
807 VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT))
808 data->vddci_control = VEGA10_VOLTAGE_CONTROL_BY_GPIO;
809 }
810
811 data->config_telemetry = config_telemetry;
812
813 vega10_set_features_platform_caps(hwmgr);
814
815 vega10_init_dpm_defaults(hwmgr);
816
817 #ifdef PPLIB_VEGA10_EVV_SUPPORT
818 /* Get leakage voltage based on leakage ID. */
819 PP_ASSERT_WITH_CODE(!vega10_get_evv_voltages(hwmgr),
820 "Get EVV Voltage Failed. Abort Driver loading!",
821 return -1);
822 #endif
823
824 /* Patch our voltage dependency table with actual leakage voltage
825 * We need to perform leakage translation before it's used by other functions
826 */
827 vega10_complete_dependency_tables(hwmgr);
828
829 /* Parse pptable data read from VBIOS */
830 vega10_set_private_data_based_on_pptable(hwmgr);
831
832 data->is_tlu_enabled = false;
833
834 hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
835 VEGA10_MAX_HARDWARE_POWERLEVELS;
836 hwmgr->platform_descriptor.hardwarePerformanceLevels = 2;
837 hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50;
838
839 hwmgr->platform_descriptor.vbiosInterruptId = 0x20000400; /* IRQ_SOURCE1_SW_INT */
840 /* The true clock step depends on the frequency, typically 4.5 or 9 MHz. Here we use 5. */
841 hwmgr->platform_descriptor.clockStep.engineClock = 500;
842 hwmgr->platform_descriptor.clockStep.memoryClock = 500;
843
844 sys_info.size = sizeof(struct cgs_system_info);
845 sys_info.info_id = CGS_SYSTEM_INFO_GFX_CU_INFO;
846 result = cgs_query_system_info(hwmgr->device, &sys_info);
847 data->total_active_cus = sys_info.value;
848 /* Setup default Overdrive Fan control settings */
849 data->odn_fan_table.target_fan_speed =
850 hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM;
851 data->odn_fan_table.target_temperature =
852 hwmgr->thermal_controller.
853 advanceFanControlParameters.ucTargetTemperature;
854 data->odn_fan_table.min_performance_clock =
855 hwmgr->thermal_controller.advanceFanControlParameters.
856 ulMinFanSCLKAcousticLimit;
857 data->odn_fan_table.min_fan_limit =
858 hwmgr->thermal_controller.
859 advanceFanControlParameters.usFanPWMMinLimit *
860 hwmgr->thermal_controller.fanInfo.ulMaxRPM / 100;
861
862 return result;
863 }
864
865 static int vega10_init_sclk_threshold(struct pp_hwmgr *hwmgr)
866 {
867 struct vega10_hwmgr *data =
868 (struct vega10_hwmgr *)(hwmgr->backend);
869
870 data->low_sclk_interrupt_threshold = 0;
871
872 return 0;
873 }
874
875 static int vega10_setup_dpm_led_config(struct pp_hwmgr *hwmgr)
876 {
877 struct vega10_hwmgr *data =
878 (struct vega10_hwmgr *)(hwmgr->backend);
879 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
880
881 struct pp_atomfwctrl_voltage_table table;
882 uint8_t i, j;
883 uint32_t mask = 0;
884 uint32_t tmp;
885 int32_t ret = 0;
886
887 ret = pp_atomfwctrl_get_voltage_table_v4(hwmgr, VOLTAGE_TYPE_LEDDPM,
888 VOLTAGE_OBJ_GPIO_LUT, &table);
889
890 if (!ret) {
891 tmp = table.mask_low;
892 for (i = 0, j = 0; i < 32; i++) {
893 if (tmp & 1) {
894 mask |= (uint32_t)(i << (8 * j));
895 if (++j >= 3)
896 break;
897 }
898 tmp >>= 1;
899 }
900 }
901
902 pp_table->LedPin0 = (uint8_t)(mask & 0xff);
903 pp_table->LedPin1 = (uint8_t)((mask >> 8) & 0xff);
904 pp_table->LedPin2 = (uint8_t)((mask >> 16) & 0xff);
905 return 0;
906 }
907
908 static int vega10_setup_asic_task(struct pp_hwmgr *hwmgr)
909 {
910 PP_ASSERT_WITH_CODE(!vega10_init_sclk_threshold(hwmgr),
911 "Failed to init sclk threshold!",
912 return -EINVAL);
913
914 PP_ASSERT_WITH_CODE(!vega10_setup_dpm_led_config(hwmgr),
915 "Failed to set up led dpm config!",
916 return -EINVAL);
917
918 return 0;
919 }
920
921 static bool vega10_is_dpm_running(struct pp_hwmgr *hwmgr)
922 {
923 uint32_t features_enabled;
924
925 if (!vega10_get_smc_features(hwmgr, &features_enabled)) {
926 if (features_enabled & SMC_DPM_FEATURES)
927 return true;
928 }
929 return false;
930 }
931
932 /**
933 * Remove repeated voltage values and create table with unique values.
934 *
935 * @param hwmgr the address of the powerplay hardware manager.
936 * @param vol_table the pointer to changing voltage table
937 * @return 0 in success
938 */
939
940 static int vega10_trim_voltage_table(struct pp_hwmgr *hwmgr,
941 struct pp_atomfwctrl_voltage_table *vol_table)
942 {
943 uint32_t i, j;
944 uint16_t vvalue;
945 bool found = false;
946 struct pp_atomfwctrl_voltage_table *table;
947
948 PP_ASSERT_WITH_CODE(vol_table,
949 "Voltage Table empty.", return -EINVAL);
950 table = kzalloc(sizeof(struct pp_atomfwctrl_voltage_table),
951 GFP_KERNEL);
952
953 if (!table)
954 return -ENOMEM;
955
956 table->mask_low = vol_table->mask_low;
957 table->phase_delay = vol_table->phase_delay;
958
959 for (i = 0; i < vol_table->count; i++) {
960 vvalue = vol_table->entries[i].value;
961 found = false;
962
963 for (j = 0; j < table->count; j++) {
964 if (vvalue == table->entries[j].value) {
965 found = true;
966 break;
967 }
968 }
969
970 if (!found) {
971 table->entries[table->count].value = vvalue;
972 table->entries[table->count].smio_low =
973 vol_table->entries[i].smio_low;
974 table->count++;
975 }
976 }
977
978 memcpy(vol_table, table, sizeof(struct pp_atomfwctrl_voltage_table));
979 kfree(table);
980
981 return 0;
982 }
983
984 static int vega10_get_mvdd_voltage_table(struct pp_hwmgr *hwmgr,
985 phm_ppt_v1_clock_voltage_dependency_table *dep_table,
986 struct pp_atomfwctrl_voltage_table *vol_table)
987 {
988 int i;
989
990 PP_ASSERT_WITH_CODE(dep_table->count,
991 "Voltage Dependency Table empty.",
992 return -EINVAL);
993
994 vol_table->mask_low = 0;
995 vol_table->phase_delay = 0;
996 vol_table->count = dep_table->count;
997
998 for (i = 0; i < vol_table->count; i++) {
999 vol_table->entries[i].value = dep_table->entries[i].mvdd;
1000 vol_table->entries[i].smio_low = 0;
1001 }
1002
1003 PP_ASSERT_WITH_CODE(!vega10_trim_voltage_table(hwmgr,
1004 vol_table),
1005 "Failed to trim MVDD Table!",
1006 return -1);
1007
1008 return 0;
1009 }
1010
1011 static int vega10_get_vddci_voltage_table(struct pp_hwmgr *hwmgr,
1012 phm_ppt_v1_clock_voltage_dependency_table *dep_table,
1013 struct pp_atomfwctrl_voltage_table *vol_table)
1014 {
1015 uint32_t i;
1016
1017 PP_ASSERT_WITH_CODE(dep_table->count,
1018 "Voltage Dependency Table empty.",
1019 return -EINVAL);
1020
1021 vol_table->mask_low = 0;
1022 vol_table->phase_delay = 0;
1023 vol_table->count = dep_table->count;
1024
1025 for (i = 0; i < dep_table->count; i++) {
1026 vol_table->entries[i].value = dep_table->entries[i].vddci;
1027 vol_table->entries[i].smio_low = 0;
1028 }
1029
1030 PP_ASSERT_WITH_CODE(!vega10_trim_voltage_table(hwmgr, vol_table),
1031 "Failed to trim VDDCI table.",
1032 return -1);
1033
1034 return 0;
1035 }
1036
1037 static int vega10_get_vdd_voltage_table(struct pp_hwmgr *hwmgr,
1038 phm_ppt_v1_clock_voltage_dependency_table *dep_table,
1039 struct pp_atomfwctrl_voltage_table *vol_table)
1040 {
1041 int i;
1042
1043 PP_ASSERT_WITH_CODE(dep_table->count,
1044 "Voltage Dependency Table empty.",
1045 return -EINVAL);
1046
1047 vol_table->mask_low = 0;
1048 vol_table->phase_delay = 0;
1049 vol_table->count = dep_table->count;
1050
1051 for (i = 0; i < vol_table->count; i++) {
1052 vol_table->entries[i].value = dep_table->entries[i].vddc;
1053 vol_table->entries[i].smio_low = 0;
1054 }
1055
1056 return 0;
1057 }
1058
1059 /* ---- Voltage Tables ----
1060 * If the voltage table would be bigger than
1061 * what will fit into the state table on
1062 * the SMC keep only the higher entries.
1063 */
1064 static void vega10_trim_voltage_table_to_fit_state_table(
1065 struct pp_hwmgr *hwmgr,
1066 uint32_t max_vol_steps,
1067 struct pp_atomfwctrl_voltage_table *vol_table)
1068 {
1069 unsigned int i, diff;
1070
1071 if (vol_table->count <= max_vol_steps)
1072 return;
1073
1074 diff = vol_table->count - max_vol_steps;
1075
1076 for (i = 0; i < max_vol_steps; i++)
1077 vol_table->entries[i] = vol_table->entries[i + diff];
1078
1079 vol_table->count = max_vol_steps;
1080 }
1081
1082 /**
1083 * Create Voltage Tables.
1084 *
1085 * @param hwmgr the address of the powerplay hardware manager.
1086 * @return always 0
1087 */
1088 static int vega10_construct_voltage_tables(struct pp_hwmgr *hwmgr)
1089 {
1090 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
1091 struct phm_ppt_v2_information *table_info =
1092 (struct phm_ppt_v2_information *)hwmgr->pptable;
1093 int result;
1094
1095 if (data->mvdd_control == VEGA10_VOLTAGE_CONTROL_BY_SVID2 ||
1096 data->mvdd_control == VEGA10_VOLTAGE_CONTROL_NONE) {
1097 result = vega10_get_mvdd_voltage_table(hwmgr,
1098 table_info->vdd_dep_on_mclk,
1099 &(data->mvdd_voltage_table));
1100 PP_ASSERT_WITH_CODE(!result,
1101 "Failed to retrieve MVDDC table!",
1102 return result);
1103 }
1104
1105 if (data->vddci_control == VEGA10_VOLTAGE_CONTROL_NONE) {
1106 result = vega10_get_vddci_voltage_table(hwmgr,
1107 table_info->vdd_dep_on_mclk,
1108 &(data->vddci_voltage_table));
1109 PP_ASSERT_WITH_CODE(!result,
1110 "Failed to retrieve VDDCI_MEM table!",
1111 return result);
1112 }
1113
1114 if (data->vddc_control == VEGA10_VOLTAGE_CONTROL_BY_SVID2 ||
1115 data->vddc_control == VEGA10_VOLTAGE_CONTROL_NONE) {
1116 result = vega10_get_vdd_voltage_table(hwmgr,
1117 table_info->vdd_dep_on_sclk,
1118 &(data->vddc_voltage_table));
1119 PP_ASSERT_WITH_CODE(!result,
1120 "Failed to retrieve VDDCR_SOC table!",
1121 return result);
1122 }
1123
1124 PP_ASSERT_WITH_CODE(data->vddc_voltage_table.count <= 16,
1125 "Too many voltage values for VDDC. Trimming to fit state table.",
1126 vega10_trim_voltage_table_to_fit_state_table(hwmgr,
1127 16, &(data->vddc_voltage_table)));
1128
1129 PP_ASSERT_WITH_CODE(data->vddci_voltage_table.count <= 16,
1130 "Too many voltage values for VDDCI. Trimming to fit state table.",
1131 vega10_trim_voltage_table_to_fit_state_table(hwmgr,
1132 16, &(data->vddci_voltage_table)));
1133
1134 PP_ASSERT_WITH_CODE(data->mvdd_voltage_table.count <= 16,
1135 "Too many voltage values for MVDD. Trimming to fit state table.",
1136 vega10_trim_voltage_table_to_fit_state_table(hwmgr,
1137 16, &(data->mvdd_voltage_table)));
1138
1139
1140 return 0;
1141 }
1142
1143 /*
1144 * @fn vega10_init_dpm_state
1145 * @brief Function to initialize all Soft Min/Max and Hard Min/Max to 0xff.
1146 *
1147 * @param dpm_state - the address of the DPM Table to initiailize.
1148 * @return None.
1149 */
1150 static void vega10_init_dpm_state(struct vega10_dpm_state *dpm_state)
1151 {
1152 dpm_state->soft_min_level = 0xff;
1153 dpm_state->soft_max_level = 0xff;
1154 dpm_state->hard_min_level = 0xff;
1155 dpm_state->hard_max_level = 0xff;
1156 }
1157
1158 static void vega10_setup_default_single_dpm_table(struct pp_hwmgr *hwmgr,
1159 struct vega10_single_dpm_table *dpm_table,
1160 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table)
1161 {
1162 int i;
1163
1164 dpm_table->count = 0;
1165
1166 for (i = 0; i < dep_table->count; i++) {
1167 if (i == 0 || dpm_table->dpm_levels[dpm_table->count - 1].value <=
1168 dep_table->entries[i].clk) {
1169 dpm_table->dpm_levels[dpm_table->count].value =
1170 dep_table->entries[i].clk;
1171 dpm_table->dpm_levels[dpm_table->count].enabled = true;
1172 dpm_table->count++;
1173 }
1174 }
1175 }
1176 static int vega10_setup_default_pcie_table(struct pp_hwmgr *hwmgr)
1177 {
1178 struct vega10_hwmgr *data =
1179 (struct vega10_hwmgr *)(hwmgr->backend);
1180 struct vega10_pcie_table *pcie_table = &(data->dpm_table.pcie_table);
1181 struct phm_ppt_v2_information *table_info =
1182 (struct phm_ppt_v2_information *)(hwmgr->pptable);
1183 struct phm_ppt_v1_pcie_table *bios_pcie_table =
1184 table_info->pcie_table;
1185 uint32_t i;
1186
1187 PP_ASSERT_WITH_CODE(bios_pcie_table->count,
1188 "Incorrect number of PCIE States from VBIOS!",
1189 return -1);
1190
1191 for (i = 0; i < NUM_LINK_LEVELS; i++) {
1192 if (data->registry_data.pcieSpeedOverride)
1193 pcie_table->pcie_gen[i] =
1194 data->registry_data.pcieSpeedOverride;
1195 else
1196 pcie_table->pcie_gen[i] =
1197 bios_pcie_table->entries[i].gen_speed;
1198
1199 if (data->registry_data.pcieLaneOverride)
1200 pcie_table->pcie_lane[i] = (uint8_t)encode_pcie_lane_width(
1201 data->registry_data.pcieLaneOverride);
1202 else
1203 pcie_table->pcie_lane[i] = (uint8_t)encode_pcie_lane_width(
1204 bios_pcie_table->entries[i].lane_width);
1205 if (data->registry_data.pcieClockOverride)
1206 pcie_table->lclk[i] =
1207 data->registry_data.pcieClockOverride;
1208 else
1209 pcie_table->lclk[i] =
1210 bios_pcie_table->entries[i].pcie_sclk;
1211 }
1212
1213 pcie_table->count = NUM_LINK_LEVELS;
1214
1215 return 0;
1216 }
1217
1218 /*
1219 * This function is to initialize all DPM state tables
1220 * for SMU based on the dependency table.
1221 * Dynamic state patching function will then trim these
1222 * state tables to the allowed range based
1223 * on the power policy or external client requests,
1224 * such as UVD request, etc.
1225 */
1226 static int vega10_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
1227 {
1228 struct vega10_hwmgr *data =
1229 (struct vega10_hwmgr *)(hwmgr->backend);
1230 struct phm_ppt_v2_information *table_info =
1231 (struct phm_ppt_v2_information *)(hwmgr->pptable);
1232 struct vega10_single_dpm_table *dpm_table;
1233 uint32_t i;
1234
1235 struct phm_ppt_v1_clock_voltage_dependency_table *dep_soc_table =
1236 table_info->vdd_dep_on_socclk;
1237 struct phm_ppt_v1_clock_voltage_dependency_table *dep_gfx_table =
1238 table_info->vdd_dep_on_sclk;
1239 struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table =
1240 table_info->vdd_dep_on_mclk;
1241 struct phm_ppt_v1_mm_clock_voltage_dependency_table *dep_mm_table =
1242 table_info->mm_dep_table;
1243 struct phm_ppt_v1_clock_voltage_dependency_table *dep_dcef_table =
1244 table_info->vdd_dep_on_dcefclk;
1245 struct phm_ppt_v1_clock_voltage_dependency_table *dep_pix_table =
1246 table_info->vdd_dep_on_pixclk;
1247 struct phm_ppt_v1_clock_voltage_dependency_table *dep_disp_table =
1248 table_info->vdd_dep_on_dispclk;
1249 struct phm_ppt_v1_clock_voltage_dependency_table *dep_phy_table =
1250 table_info->vdd_dep_on_phyclk;
1251
1252 PP_ASSERT_WITH_CODE(dep_soc_table,
1253 "SOCCLK dependency table is missing. This table is mandatory",
1254 return -EINVAL);
1255 PP_ASSERT_WITH_CODE(dep_soc_table->count >= 1,
1256 "SOCCLK dependency table is empty. This table is mandatory",
1257 return -EINVAL);
1258
1259 PP_ASSERT_WITH_CODE(dep_gfx_table,
1260 "GFXCLK dependency table is missing. This table is mandatory",
1261 return -EINVAL);
1262 PP_ASSERT_WITH_CODE(dep_gfx_table->count >= 1,
1263 "GFXCLK dependency table is empty. This table is mandatory",
1264 return -EINVAL);
1265
1266 PP_ASSERT_WITH_CODE(dep_mclk_table,
1267 "MCLK dependency table is missing. This table is mandatory",
1268 return -EINVAL);
1269 PP_ASSERT_WITH_CODE(dep_mclk_table->count >= 1,
1270 "MCLK dependency table has to have is missing. This table is mandatory",
1271 return -EINVAL);
1272
1273 /* Initialize Sclk DPM table based on allow Sclk values */
1274 dpm_table = &(data->dpm_table.soc_table);
1275 vega10_setup_default_single_dpm_table(hwmgr,
1276 dpm_table,
1277 dep_soc_table);
1278
1279 vega10_init_dpm_state(&(dpm_table->dpm_state));
1280
1281 dpm_table = &(data->dpm_table.gfx_table);
1282 vega10_setup_default_single_dpm_table(hwmgr,
1283 dpm_table,
1284 dep_gfx_table);
1285 vega10_init_dpm_state(&(dpm_table->dpm_state));
1286
1287 /* Initialize Mclk DPM table based on allow Mclk values */
1288 data->dpm_table.mem_table.count = 0;
1289 dpm_table = &(data->dpm_table.mem_table);
1290 vega10_setup_default_single_dpm_table(hwmgr,
1291 dpm_table,
1292 dep_mclk_table);
1293 vega10_init_dpm_state(&(dpm_table->dpm_state));
1294
1295 data->dpm_table.eclk_table.count = 0;
1296 dpm_table = &(data->dpm_table.eclk_table);
1297 for (i = 0; i < dep_mm_table->count; i++) {
1298 if (i == 0 || dpm_table->dpm_levels
1299 [dpm_table->count - 1].value <=
1300 dep_mm_table->entries[i].eclk) {
1301 dpm_table->dpm_levels[dpm_table->count].value =
1302 dep_mm_table->entries[i].eclk;
1303 dpm_table->dpm_levels[dpm_table->count].enabled =
1304 (i == 0) ? true : false;
1305 dpm_table->count++;
1306 }
1307 }
1308 vega10_init_dpm_state(&(dpm_table->dpm_state));
1309
1310 data->dpm_table.vclk_table.count = 0;
1311 data->dpm_table.dclk_table.count = 0;
1312 dpm_table = &(data->dpm_table.vclk_table);
1313 for (i = 0; i < dep_mm_table->count; i++) {
1314 if (i == 0 || dpm_table->dpm_levels
1315 [dpm_table->count - 1].value <=
1316 dep_mm_table->entries[i].vclk) {
1317 dpm_table->dpm_levels[dpm_table->count].value =
1318 dep_mm_table->entries[i].vclk;
1319 dpm_table->dpm_levels[dpm_table->count].enabled =
1320 (i == 0) ? true : false;
1321 dpm_table->count++;
1322 }
1323 }
1324 vega10_init_dpm_state(&(dpm_table->dpm_state));
1325
1326 dpm_table = &(data->dpm_table.dclk_table);
1327 for (i = 0; i < dep_mm_table->count; i++) {
1328 if (i == 0 || dpm_table->dpm_levels
1329 [dpm_table->count - 1].value <=
1330 dep_mm_table->entries[i].dclk) {
1331 dpm_table->dpm_levels[dpm_table->count].value =
1332 dep_mm_table->entries[i].dclk;
1333 dpm_table->dpm_levels[dpm_table->count].enabled =
1334 (i == 0) ? true : false;
1335 dpm_table->count++;
1336 }
1337 }
1338 vega10_init_dpm_state(&(dpm_table->dpm_state));
1339
1340 /* Assume there is no headless Vega10 for now */
1341 dpm_table = &(data->dpm_table.dcef_table);
1342 vega10_setup_default_single_dpm_table(hwmgr,
1343 dpm_table,
1344 dep_dcef_table);
1345
1346 vega10_init_dpm_state(&(dpm_table->dpm_state));
1347
1348 dpm_table = &(data->dpm_table.pixel_table);
1349 vega10_setup_default_single_dpm_table(hwmgr,
1350 dpm_table,
1351 dep_pix_table);
1352
1353 vega10_init_dpm_state(&(dpm_table->dpm_state));
1354
1355 dpm_table = &(data->dpm_table.display_table);
1356 vega10_setup_default_single_dpm_table(hwmgr,
1357 dpm_table,
1358 dep_disp_table);
1359
1360 vega10_init_dpm_state(&(dpm_table->dpm_state));
1361
1362 dpm_table = &(data->dpm_table.phy_table);
1363 vega10_setup_default_single_dpm_table(hwmgr,
1364 dpm_table,
1365 dep_phy_table);
1366
1367 vega10_init_dpm_state(&(dpm_table->dpm_state));
1368
1369 vega10_setup_default_pcie_table(hwmgr);
1370
1371 /* save a copy of the default DPM table */
1372 memcpy(&(data->golden_dpm_table), &(data->dpm_table),
1373 sizeof(struct vega10_dpm_table));
1374
1375 if (PP_CAP(PHM_PlatformCaps_ODNinACSupport) ||
1376 PP_CAP(PHM_PlatformCaps_ODNinDCSupport)) {
1377 data->odn_dpm_table.odn_core_clock_dpm_levels.
1378 number_of_performance_levels = data->dpm_table.gfx_table.count;
1379 for (i = 0; i < data->dpm_table.gfx_table.count; i++) {
1380 data->odn_dpm_table.odn_core_clock_dpm_levels.
1381 performance_level_entries[i].clock =
1382 data->dpm_table.gfx_table.dpm_levels[i].value;
1383 data->odn_dpm_table.odn_core_clock_dpm_levels.
1384 performance_level_entries[i].enabled = true;
1385 }
1386
1387 data->odn_dpm_table.vdd_dependency_on_sclk.count =
1388 dep_gfx_table->count;
1389 for (i = 0; i < dep_gfx_table->count; i++) {
1390 data->odn_dpm_table.vdd_dependency_on_sclk.entries[i].clk =
1391 dep_gfx_table->entries[i].clk;
1392 data->odn_dpm_table.vdd_dependency_on_sclk.entries[i].vddInd =
1393 dep_gfx_table->entries[i].vddInd;
1394 data->odn_dpm_table.vdd_dependency_on_sclk.entries[i].cks_enable =
1395 dep_gfx_table->entries[i].cks_enable;
1396 data->odn_dpm_table.vdd_dependency_on_sclk.entries[i].cks_voffset =
1397 dep_gfx_table->entries[i].cks_voffset;
1398 }
1399
1400 data->odn_dpm_table.odn_memory_clock_dpm_levels.
1401 number_of_performance_levels = data->dpm_table.mem_table.count;
1402 for (i = 0; i < data->dpm_table.mem_table.count; i++) {
1403 data->odn_dpm_table.odn_memory_clock_dpm_levels.
1404 performance_level_entries[i].clock =
1405 data->dpm_table.mem_table.dpm_levels[i].value;
1406 data->odn_dpm_table.odn_memory_clock_dpm_levels.
1407 performance_level_entries[i].enabled = true;
1408 }
1409
1410 data->odn_dpm_table.vdd_dependency_on_mclk.count = dep_mclk_table->count;
1411 for (i = 0; i < dep_mclk_table->count; i++) {
1412 data->odn_dpm_table.vdd_dependency_on_mclk.entries[i].clk =
1413 dep_mclk_table->entries[i].clk;
1414 data->odn_dpm_table.vdd_dependency_on_mclk.entries[i].vddInd =
1415 dep_mclk_table->entries[i].vddInd;
1416 data->odn_dpm_table.vdd_dependency_on_mclk.entries[i].vddci =
1417 dep_mclk_table->entries[i].vddci;
1418 }
1419 }
1420
1421 return 0;
1422 }
1423
1424 /*
1425 * @fn vega10_populate_ulv_state
1426 * @brief Function to provide parameters for Utral Low Voltage state to SMC.
1427 *
1428 * @param hwmgr - the address of the hardware manager.
1429 * @return Always 0.
1430 */
1431 static int vega10_populate_ulv_state(struct pp_hwmgr *hwmgr)
1432 {
1433 struct vega10_hwmgr *data =
1434 (struct vega10_hwmgr *)(hwmgr->backend);
1435 struct phm_ppt_v2_information *table_info =
1436 (struct phm_ppt_v2_information *)(hwmgr->pptable);
1437
1438 data->smc_state_table.pp_table.UlvOffsetVid =
1439 (uint8_t)table_info->us_ulv_voltage_offset;
1440
1441 data->smc_state_table.pp_table.UlvSmnclkDid =
1442 (uint8_t)(table_info->us_ulv_smnclk_did);
1443 data->smc_state_table.pp_table.UlvMp1clkDid =
1444 (uint8_t)(table_info->us_ulv_mp1clk_did);
1445 data->smc_state_table.pp_table.UlvGfxclkBypass =
1446 (uint8_t)(table_info->us_ulv_gfxclk_bypass);
1447 data->smc_state_table.pp_table.UlvPhaseSheddingPsi0 =
1448 (uint8_t)(data->vddc_voltage_table.psi0_enable);
1449 data->smc_state_table.pp_table.UlvPhaseSheddingPsi1 =
1450 (uint8_t)(data->vddc_voltage_table.psi1_enable);
1451
1452 return 0;
1453 }
1454
1455 static int vega10_populate_single_lclk_level(struct pp_hwmgr *hwmgr,
1456 uint32_t lclock, uint8_t *curr_lclk_did)
1457 {
1458 struct pp_atomfwctrl_clock_dividers_soc15 dividers;
1459
1460 PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(
1461 hwmgr,
1462 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
1463 lclock, &dividers),
1464 "Failed to get LCLK clock settings from VBIOS!",
1465 return -1);
1466
1467 *curr_lclk_did = dividers.ulDid;
1468
1469 return 0;
1470 }
1471
1472 static int vega10_populate_smc_link_levels(struct pp_hwmgr *hwmgr)
1473 {
1474 int result = -1;
1475 struct vega10_hwmgr *data =
1476 (struct vega10_hwmgr *)(hwmgr->backend);
1477 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
1478 struct vega10_pcie_table *pcie_table =
1479 &(data->dpm_table.pcie_table);
1480 uint32_t i, j;
1481
1482 for (i = 0; i < pcie_table->count; i++) {
1483 pp_table->PcieGenSpeed[i] = pcie_table->pcie_gen[i];
1484 pp_table->PcieLaneCount[i] = pcie_table->pcie_lane[i];
1485
1486 result = vega10_populate_single_lclk_level(hwmgr,
1487 pcie_table->lclk[i], &(pp_table->LclkDid[i]));
1488 if (result) {
1489 pr_info("Populate LClock Level %d Failed!\n", i);
1490 return result;
1491 }
1492 }
1493
1494 j = i - 1;
1495 while (i < NUM_LINK_LEVELS) {
1496 pp_table->PcieGenSpeed[i] = pcie_table->pcie_gen[j];
1497 pp_table->PcieLaneCount[i] = pcie_table->pcie_lane[j];
1498
1499 result = vega10_populate_single_lclk_level(hwmgr,
1500 pcie_table->lclk[j], &(pp_table->LclkDid[i]));
1501 if (result) {
1502 pr_info("Populate LClock Level %d Failed!\n", i);
1503 return result;
1504 }
1505 i++;
1506 }
1507
1508 return result;
1509 }
1510
1511 /**
1512 * Populates single SMC GFXSCLK structure using the provided engine clock
1513 *
1514 * @param hwmgr the address of the hardware manager
1515 * @param gfx_clock the GFX clock to use to populate the structure.
1516 * @param current_gfxclk_level location in PPTable for the SMC GFXCLK structure.
1517 */
1518
1519 static int vega10_populate_single_gfx_level(struct pp_hwmgr *hwmgr,
1520 uint32_t gfx_clock, PllSetting_t *current_gfxclk_level,
1521 uint32_t *acg_freq)
1522 {
1523 struct phm_ppt_v2_information *table_info =
1524 (struct phm_ppt_v2_information *)(hwmgr->pptable);
1525 struct phm_ppt_v1_clock_voltage_dependency_table *dep_on_sclk =
1526 table_info->vdd_dep_on_sclk;
1527 struct vega10_hwmgr *data =
1528 (struct vega10_hwmgr *)(hwmgr->backend);
1529 struct pp_atomfwctrl_clock_dividers_soc15 dividers;
1530 uint32_t gfx_max_clock =
1531 hwmgr->platform_descriptor.overdriveLimit.engineClock;
1532 uint32_t i = 0;
1533
1534 if (data->apply_overdrive_next_settings_mask &
1535 DPMTABLE_OD_UPDATE_VDDC)
1536 dep_on_sclk = (struct phm_ppt_v1_clock_voltage_dependency_table *)
1537 &(data->odn_dpm_table.vdd_dependency_on_sclk);
1538
1539 PP_ASSERT_WITH_CODE(dep_on_sclk,
1540 "Invalid SOC_VDD-GFX_CLK Dependency Table!",
1541 return -EINVAL);
1542
1543 if (data->need_update_dpm_table & DPMTABLE_OD_UPDATE_SCLK)
1544 gfx_clock = gfx_clock > gfx_max_clock ? gfx_max_clock : gfx_clock;
1545 else {
1546 for (i = 0; i < dep_on_sclk->count; i++) {
1547 if (dep_on_sclk->entries[i].clk == gfx_clock)
1548 break;
1549 }
1550 PP_ASSERT_WITH_CODE(dep_on_sclk->count > i,
1551 "Cannot find gfx_clk in SOC_VDD-GFX_CLK!",
1552 return -EINVAL);
1553 }
1554
1555 PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(hwmgr,
1556 COMPUTE_GPUCLK_INPUT_FLAG_GFXCLK,
1557 gfx_clock, &dividers),
1558 "Failed to get GFX Clock settings from VBIOS!",
1559 return -EINVAL);
1560
1561 /* Feedback Multiplier: bit 0:8 int, bit 15:12 post_div, bit 31:16 frac */
1562 current_gfxclk_level->FbMult =
1563 cpu_to_le32(dividers.ulPll_fb_mult);
1564 /* Spread FB Multiplier bit: bit 0:8 int, bit 31:16 frac */
1565 current_gfxclk_level->SsOn = dividers.ucPll_ss_enable;
1566 current_gfxclk_level->SsFbMult =
1567 cpu_to_le32(dividers.ulPll_ss_fbsmult);
1568 current_gfxclk_level->SsSlewFrac =
1569 cpu_to_le16(dividers.usPll_ss_slew_frac);
1570 current_gfxclk_level->Did = (uint8_t)(dividers.ulDid);
1571
1572 *acg_freq = gfx_clock / 100; /* 100 Khz to Mhz conversion */
1573
1574 return 0;
1575 }
1576
1577 /**
1578 * @brief Populates single SMC SOCCLK structure using the provided clock.
1579 *
1580 * @param hwmgr - the address of the hardware manager.
1581 * @param soc_clock - the SOC clock to use to populate the structure.
1582 * @param current_socclk_level - location in PPTable for the SMC SOCCLK structure.
1583 * @return 0 on success..
1584 */
1585 static int vega10_populate_single_soc_level(struct pp_hwmgr *hwmgr,
1586 uint32_t soc_clock, uint8_t *current_soc_did,
1587 uint8_t *current_vol_index)
1588 {
1589 struct phm_ppt_v2_information *table_info =
1590 (struct phm_ppt_v2_information *)(hwmgr->pptable);
1591 struct phm_ppt_v1_clock_voltage_dependency_table *dep_on_soc =
1592 table_info->vdd_dep_on_socclk;
1593 struct pp_atomfwctrl_clock_dividers_soc15 dividers;
1594 uint32_t i;
1595
1596 PP_ASSERT_WITH_CODE(dep_on_soc,
1597 "Invalid SOC_VDD-SOC_CLK Dependency Table!",
1598 return -EINVAL);
1599 for (i = 0; i < dep_on_soc->count; i++) {
1600 if (dep_on_soc->entries[i].clk == soc_clock)
1601 break;
1602 }
1603 PP_ASSERT_WITH_CODE(dep_on_soc->count > i,
1604 "Cannot find SOC_CLK in SOC_VDD-SOC_CLK Dependency Table",
1605 return -EINVAL);
1606 PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(hwmgr,
1607 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
1608 soc_clock, &dividers),
1609 "Failed to get SOC Clock settings from VBIOS!",
1610 return -EINVAL);
1611
1612 *current_soc_did = (uint8_t)dividers.ulDid;
1613 *current_vol_index = (uint8_t)(dep_on_soc->entries[i].vddInd);
1614
1615 return 0;
1616 }
1617
1618 uint16_t vega10_locate_vddc_given_clock(struct pp_hwmgr *hwmgr,
1619 uint32_t clk,
1620 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table)
1621 {
1622 uint16_t i;
1623
1624 for (i = 0; i < dep_table->count; i++) {
1625 if (dep_table->entries[i].clk == clk)
1626 return dep_table->entries[i].vddc;
1627 }
1628
1629 pr_info("[LocateVddcGivenClock] Cannot locate SOC Vddc for this clock!");
1630 return 0;
1631 }
1632
1633 /**
1634 * Populates all SMC SCLK levels' structure based on the trimmed allowed dpm engine clock states
1635 *
1636 * @param hwmgr the address of the hardware manager
1637 */
1638 static int vega10_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
1639 {
1640 struct vega10_hwmgr *data =
1641 (struct vega10_hwmgr *)(hwmgr->backend);
1642 struct phm_ppt_v2_information *table_info =
1643 (struct phm_ppt_v2_information *)(hwmgr->pptable);
1644 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table =
1645 table_info->vdd_dep_on_socclk;
1646 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
1647 struct vega10_single_dpm_table *dpm_table = &(data->dpm_table.gfx_table);
1648 int result = 0;
1649 uint32_t i, j;
1650
1651 for (i = 0; i < dpm_table->count; i++) {
1652 result = vega10_populate_single_gfx_level(hwmgr,
1653 dpm_table->dpm_levels[i].value,
1654 &(pp_table->GfxclkLevel[i]),
1655 &(pp_table->AcgFreqTable[i]));
1656 if (result)
1657 return result;
1658 }
1659
1660 j = i - 1;
1661 while (i < NUM_GFXCLK_DPM_LEVELS) {
1662 result = vega10_populate_single_gfx_level(hwmgr,
1663 dpm_table->dpm_levels[j].value,
1664 &(pp_table->GfxclkLevel[i]),
1665 &(pp_table->AcgFreqTable[i]));
1666 if (result)
1667 return result;
1668 i++;
1669 }
1670
1671 pp_table->GfxclkSlewRate =
1672 cpu_to_le16(table_info->us_gfxclk_slew_rate);
1673
1674 dpm_table = &(data->dpm_table.soc_table);
1675 for (i = 0; i < dpm_table->count; i++) {
1676 pp_table->SocVid[i] =
1677 (uint8_t)convert_to_vid(
1678 vega10_locate_vddc_given_clock(hwmgr,
1679 dpm_table->dpm_levels[i].value,
1680 dep_table));
1681 result = vega10_populate_single_soc_level(hwmgr,
1682 dpm_table->dpm_levels[i].value,
1683 &(pp_table->SocclkDid[i]),
1684 &(pp_table->SocDpmVoltageIndex[i]));
1685 if (result)
1686 return result;
1687 }
1688
1689 j = i - 1;
1690 while (i < NUM_SOCCLK_DPM_LEVELS) {
1691 pp_table->SocVid[i] = pp_table->SocVid[j];
1692 result = vega10_populate_single_soc_level(hwmgr,
1693 dpm_table->dpm_levels[j].value,
1694 &(pp_table->SocclkDid[i]),
1695 &(pp_table->SocDpmVoltageIndex[i]));
1696 if (result)
1697 return result;
1698 i++;
1699 }
1700
1701 return result;
1702 }
1703
1704 /**
1705 * @brief Populates single SMC GFXCLK structure using the provided clock.
1706 *
1707 * @param hwmgr - the address of the hardware manager.
1708 * @param mem_clock - the memory clock to use to populate the structure.
1709 * @return 0 on success..
1710 */
1711 static int vega10_populate_single_memory_level(struct pp_hwmgr *hwmgr,
1712 uint32_t mem_clock, uint8_t *current_mem_vid,
1713 PllSetting_t *current_memclk_level, uint8_t *current_mem_soc_vind)
1714 {
1715 struct vega10_hwmgr *data =
1716 (struct vega10_hwmgr *)(hwmgr->backend);
1717 struct phm_ppt_v2_information *table_info =
1718 (struct phm_ppt_v2_information *)(hwmgr->pptable);
1719 struct phm_ppt_v1_clock_voltage_dependency_table *dep_on_mclk =
1720 table_info->vdd_dep_on_mclk;
1721 struct pp_atomfwctrl_clock_dividers_soc15 dividers;
1722 uint32_t mem_max_clock =
1723 hwmgr->platform_descriptor.overdriveLimit.memoryClock;
1724 uint32_t i = 0;
1725
1726 if (data->apply_overdrive_next_settings_mask &
1727 DPMTABLE_OD_UPDATE_VDDC)
1728 dep_on_mclk = (struct phm_ppt_v1_clock_voltage_dependency_table *)
1729 &data->odn_dpm_table.vdd_dependency_on_mclk;
1730
1731 PP_ASSERT_WITH_CODE(dep_on_mclk,
1732 "Invalid SOC_VDD-UCLK Dependency Table!",
1733 return -EINVAL);
1734
1735 if (data->need_update_dpm_table & DPMTABLE_OD_UPDATE_MCLK)
1736 mem_clock = mem_clock > mem_max_clock ? mem_max_clock : mem_clock;
1737 else {
1738 for (i = 0; i < dep_on_mclk->count; i++) {
1739 if (dep_on_mclk->entries[i].clk == mem_clock)
1740 break;
1741 }
1742 PP_ASSERT_WITH_CODE(dep_on_mclk->count > i,
1743 "Cannot find UCLK in SOC_VDD-UCLK Dependency Table!",
1744 return -EINVAL);
1745 }
1746
1747 PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(
1748 hwmgr, COMPUTE_GPUCLK_INPUT_FLAG_UCLK, mem_clock, &dividers),
1749 "Failed to get UCLK settings from VBIOS!",
1750 return -1);
1751
1752 *current_mem_vid =
1753 (uint8_t)(convert_to_vid(dep_on_mclk->entries[i].mvdd));
1754 *current_mem_soc_vind =
1755 (uint8_t)(dep_on_mclk->entries[i].vddInd);
1756 current_memclk_level->FbMult = cpu_to_le32(dividers.ulPll_fb_mult);
1757 current_memclk_level->Did = (uint8_t)(dividers.ulDid);
1758
1759 PP_ASSERT_WITH_CODE(current_memclk_level->Did >= 1,
1760 "Invalid Divider ID!",
1761 return -EINVAL);
1762
1763 return 0;
1764 }
1765
1766 /**
1767 * @brief Populates all SMC MCLK levels' structure based on the trimmed allowed dpm memory clock states.
1768 *
1769 * @param pHwMgr - the address of the hardware manager.
1770 * @return PP_Result_OK on success.
1771 */
1772 static int vega10_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
1773 {
1774 struct vega10_hwmgr *data =
1775 (struct vega10_hwmgr *)(hwmgr->backend);
1776 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
1777 struct vega10_single_dpm_table *dpm_table =
1778 &(data->dpm_table.mem_table);
1779 int result = 0;
1780 uint32_t i, j, reg, mem_channels;
1781
1782 for (i = 0; i < dpm_table->count; i++) {
1783 result = vega10_populate_single_memory_level(hwmgr,
1784 dpm_table->dpm_levels[i].value,
1785 &(pp_table->MemVid[i]),
1786 &(pp_table->UclkLevel[i]),
1787 &(pp_table->MemSocVoltageIndex[i]));
1788 if (result)
1789 return result;
1790 }
1791
1792 j = i - 1;
1793 while (i < NUM_UCLK_DPM_LEVELS) {
1794 result = vega10_populate_single_memory_level(hwmgr,
1795 dpm_table->dpm_levels[j].value,
1796 &(pp_table->MemVid[i]),
1797 &(pp_table->UclkLevel[i]),
1798 &(pp_table->MemSocVoltageIndex[i]));
1799 if (result)
1800 return result;
1801 i++;
1802 }
1803
1804 reg = soc15_get_register_offset(DF_HWID, 0,
1805 mmDF_CS_AON0_DramBaseAddress0_BASE_IDX,
1806 mmDF_CS_AON0_DramBaseAddress0);
1807 mem_channels = (cgs_read_register(hwmgr->device, reg) &
1808 DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK) >>
1809 DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT;
1810 PP_ASSERT_WITH_CODE(mem_channels < ARRAY_SIZE(channel_number),
1811 "Mem Channel Index Exceeded maximum!",
1812 return -1);
1813
1814 pp_table->NumMemoryChannels = cpu_to_le16(mem_channels);
1815 pp_table->MemoryChannelWidth =
1816 cpu_to_le16(HBM_MEMORY_CHANNEL_WIDTH *
1817 channel_number[mem_channels]);
1818
1819 pp_table->LowestUclkReservedForUlv =
1820 (uint8_t)(data->lowest_uclk_reserved_for_ulv);
1821
1822 return result;
1823 }
1824
1825 static int vega10_populate_single_display_type(struct pp_hwmgr *hwmgr,
1826 DSPCLK_e disp_clock)
1827 {
1828 struct vega10_hwmgr *data =
1829 (struct vega10_hwmgr *)(hwmgr->backend);
1830 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
1831 struct phm_ppt_v2_information *table_info =
1832 (struct phm_ppt_v2_information *)
1833 (hwmgr->pptable);
1834 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table;
1835 uint32_t i;
1836 uint16_t clk = 0, vddc = 0;
1837 uint8_t vid = 0;
1838
1839 switch (disp_clock) {
1840 case DSPCLK_DCEFCLK:
1841 dep_table = table_info->vdd_dep_on_dcefclk;
1842 break;
1843 case DSPCLK_DISPCLK:
1844 dep_table = table_info->vdd_dep_on_dispclk;
1845 break;
1846 case DSPCLK_PIXCLK:
1847 dep_table = table_info->vdd_dep_on_pixclk;
1848 break;
1849 case DSPCLK_PHYCLK:
1850 dep_table = table_info->vdd_dep_on_phyclk;
1851 break;
1852 default:
1853 return -1;
1854 }
1855
1856 PP_ASSERT_WITH_CODE(dep_table->count <= NUM_DSPCLK_LEVELS,
1857 "Number Of Entries Exceeded maximum!",
1858 return -1);
1859
1860 for (i = 0; i < dep_table->count; i++) {
1861 clk = (uint16_t)(dep_table->entries[i].clk / 100);
1862 vddc = table_info->vddc_lookup_table->
1863 entries[dep_table->entries[i].vddInd].us_vdd;
1864 vid = (uint8_t)convert_to_vid(vddc);
1865 pp_table->DisplayClockTable[disp_clock][i].Freq =
1866 cpu_to_le16(clk);
1867 pp_table->DisplayClockTable[disp_clock][i].Vid =
1868 cpu_to_le16(vid);
1869 }
1870
1871 while (i < NUM_DSPCLK_LEVELS) {
1872 pp_table->DisplayClockTable[disp_clock][i].Freq =
1873 cpu_to_le16(clk);
1874 pp_table->DisplayClockTable[disp_clock][i].Vid =
1875 cpu_to_le16(vid);
1876 i++;
1877 }
1878
1879 return 0;
1880 }
1881
1882 static int vega10_populate_all_display_clock_levels(struct pp_hwmgr *hwmgr)
1883 {
1884 uint32_t i;
1885
1886 for (i = 0; i < DSPCLK_COUNT; i++) {
1887 PP_ASSERT_WITH_CODE(!vega10_populate_single_display_type(hwmgr, i),
1888 "Failed to populate Clock in DisplayClockTable!",
1889 return -1);
1890 }
1891
1892 return 0;
1893 }
1894
1895 static int vega10_populate_single_eclock_level(struct pp_hwmgr *hwmgr,
1896 uint32_t eclock, uint8_t *current_eclk_did,
1897 uint8_t *current_soc_vol)
1898 {
1899 struct phm_ppt_v2_information *table_info =
1900 (struct phm_ppt_v2_information *)(hwmgr->pptable);
1901 struct phm_ppt_v1_mm_clock_voltage_dependency_table *dep_table =
1902 table_info->mm_dep_table;
1903 struct pp_atomfwctrl_clock_dividers_soc15 dividers;
1904 uint32_t i;
1905
1906 PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(hwmgr,
1907 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
1908 eclock, &dividers),
1909 "Failed to get ECLK clock settings from VBIOS!",
1910 return -1);
1911
1912 *current_eclk_did = (uint8_t)dividers.ulDid;
1913
1914 for (i = 0; i < dep_table->count; i++) {
1915 if (dep_table->entries[i].eclk == eclock)
1916 *current_soc_vol = dep_table->entries[i].vddcInd;
1917 }
1918
1919 return 0;
1920 }
1921
1922 static int vega10_populate_smc_vce_levels(struct pp_hwmgr *hwmgr)
1923 {
1924 struct vega10_hwmgr *data =
1925 (struct vega10_hwmgr *)(hwmgr->backend);
1926 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
1927 struct vega10_single_dpm_table *dpm_table = &(data->dpm_table.eclk_table);
1928 int result = -EINVAL;
1929 uint32_t i, j;
1930
1931 for (i = 0; i < dpm_table->count; i++) {
1932 result = vega10_populate_single_eclock_level(hwmgr,
1933 dpm_table->dpm_levels[i].value,
1934 &(pp_table->EclkDid[i]),
1935 &(pp_table->VceDpmVoltageIndex[i]));
1936 if (result)
1937 return result;
1938 }
1939
1940 j = i - 1;
1941 while (i < NUM_VCE_DPM_LEVELS) {
1942 result = vega10_populate_single_eclock_level(hwmgr,
1943 dpm_table->dpm_levels[j].value,
1944 &(pp_table->EclkDid[i]),
1945 &(pp_table->VceDpmVoltageIndex[i]));
1946 if (result)
1947 return result;
1948 i++;
1949 }
1950
1951 return result;
1952 }
1953
1954 static int vega10_populate_single_vclock_level(struct pp_hwmgr *hwmgr,
1955 uint32_t vclock, uint8_t *current_vclk_did)
1956 {
1957 struct pp_atomfwctrl_clock_dividers_soc15 dividers;
1958
1959 PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(hwmgr,
1960 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
1961 vclock, &dividers),
1962 "Failed to get VCLK clock settings from VBIOS!",
1963 return -EINVAL);
1964
1965 *current_vclk_did = (uint8_t)dividers.ulDid;
1966
1967 return 0;
1968 }
1969
1970 static int vega10_populate_single_dclock_level(struct pp_hwmgr *hwmgr,
1971 uint32_t dclock, uint8_t *current_dclk_did)
1972 {
1973 struct pp_atomfwctrl_clock_dividers_soc15 dividers;
1974
1975 PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(hwmgr,
1976 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
1977 dclock, &dividers),
1978 "Failed to get DCLK clock settings from VBIOS!",
1979 return -EINVAL);
1980
1981 *current_dclk_did = (uint8_t)dividers.ulDid;
1982
1983 return 0;
1984 }
1985
1986 static int vega10_populate_smc_uvd_levels(struct pp_hwmgr *hwmgr)
1987 {
1988 struct vega10_hwmgr *data =
1989 (struct vega10_hwmgr *)(hwmgr->backend);
1990 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
1991 struct vega10_single_dpm_table *vclk_dpm_table =
1992 &(data->dpm_table.vclk_table);
1993 struct vega10_single_dpm_table *dclk_dpm_table =
1994 &(data->dpm_table.dclk_table);
1995 struct phm_ppt_v2_information *table_info =
1996 (struct phm_ppt_v2_information *)(hwmgr->pptable);
1997 struct phm_ppt_v1_mm_clock_voltage_dependency_table *dep_table =
1998 table_info->mm_dep_table;
1999 int result = -EINVAL;
2000 uint32_t i, j;
2001
2002 for (i = 0; i < vclk_dpm_table->count; i++) {
2003 result = vega10_populate_single_vclock_level(hwmgr,
2004 vclk_dpm_table->dpm_levels[i].value,
2005 &(pp_table->VclkDid[i]));
2006 if (result)
2007 return result;
2008 }
2009
2010 j = i - 1;
2011 while (i < NUM_UVD_DPM_LEVELS) {
2012 result = vega10_populate_single_vclock_level(hwmgr,
2013 vclk_dpm_table->dpm_levels[j].value,
2014 &(pp_table->VclkDid[i]));
2015 if (result)
2016 return result;
2017 i++;
2018 }
2019
2020 for (i = 0; i < dclk_dpm_table->count; i++) {
2021 result = vega10_populate_single_dclock_level(hwmgr,
2022 dclk_dpm_table->dpm_levels[i].value,
2023 &(pp_table->DclkDid[i]));
2024 if (result)
2025 return result;
2026 }
2027
2028 j = i - 1;
2029 while (i < NUM_UVD_DPM_LEVELS) {
2030 result = vega10_populate_single_dclock_level(hwmgr,
2031 dclk_dpm_table->dpm_levels[j].value,
2032 &(pp_table->DclkDid[i]));
2033 if (result)
2034 return result;
2035 i++;
2036 }
2037
2038 for (i = 0; i < dep_table->count; i++) {
2039 if (dep_table->entries[i].vclk ==
2040 vclk_dpm_table->dpm_levels[i].value &&
2041 dep_table->entries[i].dclk ==
2042 dclk_dpm_table->dpm_levels[i].value)
2043 pp_table->UvdDpmVoltageIndex[i] =
2044 dep_table->entries[i].vddcInd;
2045 else
2046 return -1;
2047 }
2048
2049 j = i - 1;
2050 while (i < NUM_UVD_DPM_LEVELS) {
2051 pp_table->UvdDpmVoltageIndex[i] = dep_table->entries[j].vddcInd;
2052 i++;
2053 }
2054
2055 return 0;
2056 }
2057
2058 static int vega10_populate_clock_stretcher_table(struct pp_hwmgr *hwmgr)
2059 {
2060 struct vega10_hwmgr *data =
2061 (struct vega10_hwmgr *)(hwmgr->backend);
2062 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
2063 struct phm_ppt_v2_information *table_info =
2064 (struct phm_ppt_v2_information *)(hwmgr->pptable);
2065 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table =
2066 table_info->vdd_dep_on_sclk;
2067 uint32_t i;
2068
2069 for (i = 0; i < dep_table->count; i++) {
2070 pp_table->CksEnable[i] = dep_table->entries[i].cks_enable;
2071 pp_table->CksVidOffset[i] = (uint8_t)(dep_table->entries[i].cks_voffset
2072 * VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
2073 }
2074
2075 return 0;
2076 }
2077
2078 static int vega10_populate_avfs_parameters(struct pp_hwmgr *hwmgr)
2079 {
2080 struct vega10_hwmgr *data =
2081 (struct vega10_hwmgr *)(hwmgr->backend);
2082 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
2083 struct phm_ppt_v2_information *table_info =
2084 (struct phm_ppt_v2_information *)(hwmgr->pptable);
2085 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table =
2086 table_info->vdd_dep_on_sclk;
2087 struct pp_atomfwctrl_avfs_parameters avfs_params = {0};
2088 int result = 0;
2089 uint32_t i;
2090
2091 pp_table->MinVoltageVid = (uint8_t)0xff;
2092 pp_table->MaxVoltageVid = (uint8_t)0;
2093
2094 if (data->smu_features[GNLD_AVFS].supported) {
2095 result = pp_atomfwctrl_get_avfs_information(hwmgr, &avfs_params);
2096 if (!result) {
2097 pp_table->MinVoltageVid = (uint8_t)
2098 convert_to_vid((uint16_t)(avfs_params.ulMinVddc));
2099 pp_table->MaxVoltageVid = (uint8_t)
2100 convert_to_vid((uint16_t)(avfs_params.ulMaxVddc));
2101
2102 pp_table->AConstant[0] = cpu_to_le32(avfs_params.ulMeanNsigmaAcontant0);
2103 pp_table->AConstant[1] = cpu_to_le32(avfs_params.ulMeanNsigmaAcontant1);
2104 pp_table->AConstant[2] = cpu_to_le32(avfs_params.ulMeanNsigmaAcontant2);
2105 pp_table->DC_tol_sigma = cpu_to_le16(avfs_params.usMeanNsigmaDcTolSigma);
2106 pp_table->Platform_mean = cpu_to_le16(avfs_params.usMeanNsigmaPlatformMean);
2107 pp_table->Platform_sigma = cpu_to_le16(avfs_params.usMeanNsigmaDcTolSigma);
2108 pp_table->PSM_Age_CompFactor = cpu_to_le16(avfs_params.usPsmAgeComfactor);
2109
2110 pp_table->BtcGbVdroopTableCksOff.a0 =
2111 cpu_to_le32(avfs_params.ulGbVdroopTableCksoffA0);
2112 pp_table->BtcGbVdroopTableCksOff.a0_shift = 20;
2113 pp_table->BtcGbVdroopTableCksOff.a1 =
2114 cpu_to_le32(avfs_params.ulGbVdroopTableCksoffA1);
2115 pp_table->BtcGbVdroopTableCksOff.a1_shift = 20;
2116 pp_table->BtcGbVdroopTableCksOff.a2 =
2117 cpu_to_le32(avfs_params.ulGbVdroopTableCksoffA2);
2118 pp_table->BtcGbVdroopTableCksOff.a2_shift = 20;
2119
2120 pp_table->OverrideBtcGbCksOn = avfs_params.ucEnableGbVdroopTableCkson;
2121 pp_table->BtcGbVdroopTableCksOn.a0 =
2122 cpu_to_le32(avfs_params.ulGbVdroopTableCksonA0);
2123 pp_table->BtcGbVdroopTableCksOn.a0_shift = 20;
2124 pp_table->BtcGbVdroopTableCksOn.a1 =
2125 cpu_to_le32(avfs_params.ulGbVdroopTableCksonA1);
2126 pp_table->BtcGbVdroopTableCksOn.a1_shift = 20;
2127 pp_table->BtcGbVdroopTableCksOn.a2 =
2128 cpu_to_le32(avfs_params.ulGbVdroopTableCksonA2);
2129 pp_table->BtcGbVdroopTableCksOn.a2_shift = 20;
2130
2131 pp_table->AvfsGbCksOn.m1 =
2132 cpu_to_le32(avfs_params.ulGbFuseTableCksonM1);
2133 pp_table->AvfsGbCksOn.m2 =
2134 cpu_to_le32(avfs_params.ulGbFuseTableCksonM2);
2135 pp_table->AvfsGbCksOn.b =
2136 cpu_to_le32(avfs_params.ulGbFuseTableCksonB);
2137 pp_table->AvfsGbCksOn.m1_shift = 24;
2138 pp_table->AvfsGbCksOn.m2_shift = 12;
2139 pp_table->AvfsGbCksOn.b_shift = 0;
2140
2141 pp_table->OverrideAvfsGbCksOn =
2142 avfs_params.ucEnableGbFuseTableCkson;
2143 pp_table->AvfsGbCksOff.m1 =
2144 cpu_to_le32(avfs_params.ulGbFuseTableCksoffM1);
2145 pp_table->AvfsGbCksOff.m2 =
2146 cpu_to_le32(avfs_params.ulGbFuseTableCksoffM2);
2147 pp_table->AvfsGbCksOff.b =
2148 cpu_to_le32(avfs_params.ulGbFuseTableCksoffB);
2149 pp_table->AvfsGbCksOff.m1_shift = 24;
2150 pp_table->AvfsGbCksOff.m2_shift = 12;
2151 pp_table->AvfsGbCksOff.b_shift = 0;
2152
2153 for (i = 0; i < dep_table->count; i++)
2154 pp_table->StaticVoltageOffsetVid[i] =
2155 convert_to_vid((uint8_t)(dep_table->entries[i].sclk_offset));
2156
2157 if ((PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2158 data->disp_clk_quad_eqn_a) &&
2159 (PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2160 data->disp_clk_quad_eqn_b)) {
2161 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m1 =
2162 (int32_t)data->disp_clk_quad_eqn_a;
2163 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m2 =
2164 (int32_t)data->disp_clk_quad_eqn_b;
2165 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].b =
2166 (int32_t)data->disp_clk_quad_eqn_c;
2167 } else {
2168 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m1 =
2169 (int32_t)avfs_params.ulDispclk2GfxclkM1;
2170 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m2 =
2171 (int32_t)avfs_params.ulDispclk2GfxclkM2;
2172 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].b =
2173 (int32_t)avfs_params.ulDispclk2GfxclkB;
2174 }
2175
2176 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m1_shift = 24;
2177 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m2_shift = 12;
2178 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].b_shift = 12;
2179
2180 if ((PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2181 data->dcef_clk_quad_eqn_a) &&
2182 (PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2183 data->dcef_clk_quad_eqn_b)) {
2184 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m1 =
2185 (int32_t)data->dcef_clk_quad_eqn_a;
2186 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m2 =
2187 (int32_t)data->dcef_clk_quad_eqn_b;
2188 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].b =
2189 (int32_t)data->dcef_clk_quad_eqn_c;
2190 } else {
2191 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m1 =
2192 (int32_t)avfs_params.ulDcefclk2GfxclkM1;
2193 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m2 =
2194 (int32_t)avfs_params.ulDcefclk2GfxclkM2;
2195 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].b =
2196 (int32_t)avfs_params.ulDcefclk2GfxclkB;
2197 }
2198
2199 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m1_shift = 24;
2200 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m2_shift = 12;
2201 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].b_shift = 12;
2202
2203 if ((PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2204 data->pixel_clk_quad_eqn_a) &&
2205 (PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2206 data->pixel_clk_quad_eqn_b)) {
2207 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m1 =
2208 (int32_t)data->pixel_clk_quad_eqn_a;
2209 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m2 =
2210 (int32_t)data->pixel_clk_quad_eqn_b;
2211 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].b =
2212 (int32_t)data->pixel_clk_quad_eqn_c;
2213 } else {
2214 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m1 =
2215 (int32_t)avfs_params.ulPixelclk2GfxclkM1;
2216 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m2 =
2217 (int32_t)avfs_params.ulPixelclk2GfxclkM2;
2218 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].b =
2219 (int32_t)avfs_params.ulPixelclk2GfxclkB;
2220 }
2221
2222 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m1_shift = 24;
2223 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m2_shift = 12;
2224 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].b_shift = 12;
2225 if ((PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2226 data->phy_clk_quad_eqn_a) &&
2227 (PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2228 data->phy_clk_quad_eqn_b)) {
2229 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m1 =
2230 (int32_t)data->phy_clk_quad_eqn_a;
2231 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m2 =
2232 (int32_t)data->phy_clk_quad_eqn_b;
2233 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].b =
2234 (int32_t)data->phy_clk_quad_eqn_c;
2235 } else {
2236 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m1 =
2237 (int32_t)avfs_params.ulPhyclk2GfxclkM1;
2238 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m2 =
2239 (int32_t)avfs_params.ulPhyclk2GfxclkM2;
2240 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].b =
2241 (int32_t)avfs_params.ulPhyclk2GfxclkB;
2242 }
2243
2244 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m1_shift = 24;
2245 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m2_shift = 12;
2246 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].b_shift = 12;
2247
2248 pp_table->AcgBtcGbVdroopTable.a0 = avfs_params.ulAcgGbVdroopTableA0;
2249 pp_table->AcgBtcGbVdroopTable.a0_shift = 20;
2250 pp_table->AcgBtcGbVdroopTable.a1 = avfs_params.ulAcgGbVdroopTableA1;
2251 pp_table->AcgBtcGbVdroopTable.a1_shift = 20;
2252 pp_table->AcgBtcGbVdroopTable.a2 = avfs_params.ulAcgGbVdroopTableA2;
2253 pp_table->AcgBtcGbVdroopTable.a2_shift = 20;
2254
2255 pp_table->AcgAvfsGb.m1 = avfs_params.ulAcgGbFuseTableM1;
2256 pp_table->AcgAvfsGb.m2 = avfs_params.ulAcgGbFuseTableM2;
2257 pp_table->AcgAvfsGb.b = avfs_params.ulAcgGbFuseTableB;
2258 pp_table->AcgAvfsGb.m1_shift = 0;
2259 pp_table->AcgAvfsGb.m2_shift = 0;
2260 pp_table->AcgAvfsGb.b_shift = 0;
2261
2262 } else {
2263 data->smu_features[GNLD_AVFS].supported = false;
2264 }
2265 }
2266
2267 return 0;
2268 }
2269
2270 static int vega10_acg_enable(struct pp_hwmgr *hwmgr)
2271 {
2272 struct vega10_hwmgr *data =
2273 (struct vega10_hwmgr *)(hwmgr->backend);
2274 uint32_t agc_btc_response;
2275
2276 if (data->smu_features[GNLD_ACG].supported) {
2277 if (0 == vega10_enable_smc_features(hwmgr, true,
2278 data->smu_features[GNLD_DPM_PREFETCHER].smu_feature_bitmap))
2279 data->smu_features[GNLD_DPM_PREFETCHER].enabled = true;
2280
2281 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_InitializeAcg);
2282
2283 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgBtc);
2284 vega10_read_arg_from_smc(hwmgr, &agc_btc_response);
2285
2286 if (1 == agc_btc_response) {
2287 if (1 == data->acg_loop_state)
2288 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgInClosedLoop);
2289 else if (2 == data->acg_loop_state)
2290 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgInOpenLoop);
2291 if (0 == vega10_enable_smc_features(hwmgr, true,
2292 data->smu_features[GNLD_ACG].smu_feature_bitmap))
2293 data->smu_features[GNLD_ACG].enabled = true;
2294 } else {
2295 pr_info("[ACG_Enable] ACG BTC Returned Failed Status!\n");
2296 data->smu_features[GNLD_ACG].enabled = false;
2297 }
2298 }
2299
2300 return 0;
2301 }
2302
2303 static int vega10_acg_disable(struct pp_hwmgr *hwmgr)
2304 {
2305 struct vega10_hwmgr *data =
2306 (struct vega10_hwmgr *)(hwmgr->backend);
2307
2308 if (data->smu_features[GNLD_ACG].supported &&
2309 data->smu_features[GNLD_ACG].enabled)
2310 if (!vega10_enable_smc_features(hwmgr, false,
2311 data->smu_features[GNLD_ACG].smu_feature_bitmap))
2312 data->smu_features[GNLD_ACG].enabled = false;
2313
2314 return 0;
2315 }
2316
2317 static int vega10_populate_gpio_parameters(struct pp_hwmgr *hwmgr)
2318 {
2319 struct vega10_hwmgr *data =
2320 (struct vega10_hwmgr *)(hwmgr->backend);
2321 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
2322 struct pp_atomfwctrl_gpio_parameters gpio_params = {0};
2323 int result;
2324
2325 result = pp_atomfwctrl_get_gpio_information(hwmgr, &gpio_params);
2326 if (!result) {
2327 if (PP_CAP(PHM_PlatformCaps_RegulatorHot) &&
2328 data->registry_data.regulator_hot_gpio_support) {
2329 pp_table->VR0HotGpio = gpio_params.ucVR0HotGpio;
2330 pp_table->VR0HotPolarity = gpio_params.ucVR0HotPolarity;
2331 pp_table->VR1HotGpio = gpio_params.ucVR1HotGpio;
2332 pp_table->VR1HotPolarity = gpio_params.ucVR1HotPolarity;
2333 } else {
2334 pp_table->VR0HotGpio = 0;
2335 pp_table->VR0HotPolarity = 0;
2336 pp_table->VR1HotGpio = 0;
2337 pp_table->VR1HotPolarity = 0;
2338 }
2339
2340 if (PP_CAP(PHM_PlatformCaps_AutomaticDCTransition) &&
2341 data->registry_data.ac_dc_switch_gpio_support) {
2342 pp_table->AcDcGpio = gpio_params.ucAcDcGpio;
2343 pp_table->AcDcPolarity = gpio_params.ucAcDcPolarity;
2344 } else {
2345 pp_table->AcDcGpio = 0;
2346 pp_table->AcDcPolarity = 0;
2347 }
2348 }
2349
2350 return result;
2351 }
2352
2353 static int vega10_avfs_enable(struct pp_hwmgr *hwmgr, bool enable)
2354 {
2355 struct vega10_hwmgr *data =
2356 (struct vega10_hwmgr *)(hwmgr->backend);
2357
2358 if (data->smu_features[GNLD_AVFS].supported) {
2359 if (enable) {
2360 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
2361 true,
2362 data->smu_features[GNLD_AVFS].smu_feature_bitmap),
2363 "[avfs_control] Attempt to Enable AVFS feature Failed!",
2364 return -1);
2365 data->smu_features[GNLD_AVFS].enabled = true;
2366 } else {
2367 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
2368 false,
2369 data->smu_features[GNLD_AVFS].smu_feature_bitmap),
2370 "[avfs_control] Attempt to Disable AVFS feature Failed!",
2371 return -1);
2372 data->smu_features[GNLD_AVFS].enabled = false;
2373 }
2374 }
2375
2376 return 0;
2377 }
2378
2379 static int vega10_populate_and_upload_avfs_fuse_override(struct pp_hwmgr *hwmgr)
2380 {
2381 int result = 0;
2382
2383 uint64_t serial_number = 0;
2384 uint32_t top32, bottom32;
2385 struct phm_fuses_default fuse;
2386
2387 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
2388 AvfsFuseOverride_t *avfs_fuse_table = &(data->smc_state_table.avfs_fuse_override_table);
2389
2390 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32);
2391 vega10_read_arg_from_smc(hwmgr, &top32);
2392
2393 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32);
2394 vega10_read_arg_from_smc(hwmgr, &bottom32);
2395
2396 serial_number = ((uint64_t)bottom32 << 32) | top32;
2397
2398 if (pp_override_get_default_fuse_value(serial_number, &fuse) == 0) {
2399 avfs_fuse_table->VFT0_b = fuse.VFT0_b;
2400 avfs_fuse_table->VFT0_m1 = fuse.VFT0_m1;
2401 avfs_fuse_table->VFT0_m2 = fuse.VFT0_m2;
2402 avfs_fuse_table->VFT1_b = fuse.VFT1_b;
2403 avfs_fuse_table->VFT1_m1 = fuse.VFT1_m1;
2404 avfs_fuse_table->VFT1_m2 = fuse.VFT1_m2;
2405 avfs_fuse_table->VFT2_b = fuse.VFT2_b;
2406 avfs_fuse_table->VFT2_m1 = fuse.VFT2_m1;
2407 avfs_fuse_table->VFT2_m2 = fuse.VFT2_m2;
2408 result = vega10_copy_table_to_smc(hwmgr,
2409 (uint8_t *)avfs_fuse_table, AVFSFUSETABLE);
2410 PP_ASSERT_WITH_CODE(!result,
2411 "Failed to upload FuseOVerride!",
2412 );
2413 }
2414
2415 return result;
2416 }
2417
2418 static int vega10_save_default_power_profile(struct pp_hwmgr *hwmgr)
2419 {
2420 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
2421 struct vega10_single_dpm_table *dpm_table = &(data->dpm_table.gfx_table);
2422 uint32_t min_level;
2423
2424 hwmgr->default_gfx_power_profile.type = AMD_PP_GFX_PROFILE;
2425 hwmgr->default_compute_power_profile.type = AMD_PP_COMPUTE_PROFILE;
2426
2427 /* Optimize compute power profile: Use only highest
2428 * 2 power levels (if more than 2 are available)
2429 */
2430 if (dpm_table->count > 2)
2431 min_level = dpm_table->count - 2;
2432 else if (dpm_table->count == 2)
2433 min_level = 1;
2434 else
2435 min_level = 0;
2436
2437 hwmgr->default_compute_power_profile.min_sclk =
2438 dpm_table->dpm_levels[min_level].value;
2439
2440 hwmgr->gfx_power_profile = hwmgr->default_gfx_power_profile;
2441 hwmgr->compute_power_profile = hwmgr->default_compute_power_profile;
2442
2443 return 0;
2444 }
2445
2446 /**
2447 * Initializes the SMC table and uploads it
2448 *
2449 * @param hwmgr the address of the powerplay hardware manager.
2450 * @param pInput the pointer to input data (PowerState)
2451 * @return always 0
2452 */
2453 static int vega10_init_smc_table(struct pp_hwmgr *hwmgr)
2454 {
2455 int result;
2456 struct vega10_hwmgr *data =
2457 (struct vega10_hwmgr *)(hwmgr->backend);
2458 struct phm_ppt_v2_information *table_info =
2459 (struct phm_ppt_v2_information *)(hwmgr->pptable);
2460 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
2461 struct pp_atomfwctrl_voltage_table voltage_table;
2462 struct pp_atomfwctrl_bios_boot_up_values boot_up_values;
2463
2464 result = vega10_setup_default_dpm_tables(hwmgr);
2465 PP_ASSERT_WITH_CODE(!result,
2466 "Failed to setup default DPM tables!",
2467 return result);
2468
2469 pp_atomfwctrl_get_voltage_table_v4(hwmgr, VOLTAGE_TYPE_VDDC,
2470 VOLTAGE_OBJ_SVID2, &voltage_table);
2471 pp_table->MaxVidStep = voltage_table.max_vid_step;
2472
2473 pp_table->GfxDpmVoltageMode =
2474 (uint8_t)(table_info->uc_gfx_dpm_voltage_mode);
2475 pp_table->SocDpmVoltageMode =
2476 (uint8_t)(table_info->uc_soc_dpm_voltage_mode);
2477 pp_table->UclkDpmVoltageMode =
2478 (uint8_t)(table_info->uc_uclk_dpm_voltage_mode);
2479 pp_table->UvdDpmVoltageMode =
2480 (uint8_t)(table_info->uc_uvd_dpm_voltage_mode);
2481 pp_table->VceDpmVoltageMode =
2482 (uint8_t)(table_info->uc_vce_dpm_voltage_mode);
2483 pp_table->Mp0DpmVoltageMode =
2484 (uint8_t)(table_info->uc_mp0_dpm_voltage_mode);
2485
2486 pp_table->DisplayDpmVoltageMode =
2487 (uint8_t)(table_info->uc_dcef_dpm_voltage_mode);
2488
2489 data->vddc_voltage_table.psi0_enable = voltage_table.psi0_enable;
2490 data->vddc_voltage_table.psi1_enable = voltage_table.psi1_enable;
2491
2492 if (data->registry_data.ulv_support &&
2493 table_info->us_ulv_voltage_offset) {
2494 result = vega10_populate_ulv_state(hwmgr);
2495 PP_ASSERT_WITH_CODE(!result,
2496 "Failed to initialize ULV state!",
2497 return result);
2498 }
2499
2500 result = vega10_populate_smc_link_levels(hwmgr);
2501 PP_ASSERT_WITH_CODE(!result,
2502 "Failed to initialize Link Level!",
2503 return result);
2504
2505 result = vega10_populate_all_graphic_levels(hwmgr);
2506 PP_ASSERT_WITH_CODE(!result,
2507 "Failed to initialize Graphics Level!",
2508 return result);
2509
2510 result = vega10_populate_all_memory_levels(hwmgr);
2511 PP_ASSERT_WITH_CODE(!result,
2512 "Failed to initialize Memory Level!",
2513 return result);
2514
2515 result = vega10_populate_all_display_clock_levels(hwmgr);
2516 PP_ASSERT_WITH_CODE(!result,
2517 "Failed to initialize Display Level!",
2518 return result);
2519
2520 result = vega10_populate_smc_vce_levels(hwmgr);
2521 PP_ASSERT_WITH_CODE(!result,
2522 "Failed to initialize VCE Level!",
2523 return result);
2524
2525 result = vega10_populate_smc_uvd_levels(hwmgr);
2526 PP_ASSERT_WITH_CODE(!result,
2527 "Failed to initialize UVD Level!",
2528 return result);
2529
2530 if (data->registry_data.clock_stretcher_support) {
2531 result = vega10_populate_clock_stretcher_table(hwmgr);
2532 PP_ASSERT_WITH_CODE(!result,
2533 "Failed to populate Clock Stretcher Table!",
2534 return result);
2535 }
2536
2537 result = pp_atomfwctrl_get_vbios_bootup_values(hwmgr, &boot_up_values);
2538 if (!result) {
2539 data->vbios_boot_state.vddc = boot_up_values.usVddc;
2540 data->vbios_boot_state.vddci = boot_up_values.usVddci;
2541 data->vbios_boot_state.mvddc = boot_up_values.usMvddc;
2542 data->vbios_boot_state.gfx_clock = boot_up_values.ulGfxClk;
2543 data->vbios_boot_state.mem_clock = boot_up_values.ulUClk;
2544 data->vbios_boot_state.soc_clock = boot_up_values.ulSocClk;
2545 data->vbios_boot_state.dcef_clock = boot_up_values.ulDCEFClk;
2546 if (0 != boot_up_values.usVddc) {
2547 smum_send_msg_to_smc_with_parameter(hwmgr,
2548 PPSMC_MSG_SetFloorSocVoltage,
2549 (boot_up_values.usVddc * 4));
2550 data->vbios_boot_state.bsoc_vddc_lock = true;
2551 } else {
2552 data->vbios_boot_state.bsoc_vddc_lock = false;
2553 }
2554 smum_send_msg_to_smc_with_parameter(hwmgr,
2555 PPSMC_MSG_SetMinDeepSleepDcefclk,
2556 (uint32_t)(data->vbios_boot_state.dcef_clock / 100));
2557 }
2558
2559 result = vega10_populate_avfs_parameters(hwmgr);
2560 PP_ASSERT_WITH_CODE(!result,
2561 "Failed to initialize AVFS Parameters!",
2562 return result);
2563
2564 result = vega10_populate_gpio_parameters(hwmgr);
2565 PP_ASSERT_WITH_CODE(!result,
2566 "Failed to initialize GPIO Parameters!",
2567 return result);
2568
2569 pp_table->GfxclkAverageAlpha = (uint8_t)
2570 (data->gfxclk_average_alpha);
2571 pp_table->SocclkAverageAlpha = (uint8_t)
2572 (data->socclk_average_alpha);
2573 pp_table->UclkAverageAlpha = (uint8_t)
2574 (data->uclk_average_alpha);
2575 pp_table->GfxActivityAverageAlpha = (uint8_t)
2576 (data->gfx_activity_average_alpha);
2577
2578 vega10_populate_and_upload_avfs_fuse_override(hwmgr);
2579
2580 result = vega10_copy_table_to_smc(hwmgr,
2581 (uint8_t *)pp_table, PPTABLE);
2582 PP_ASSERT_WITH_CODE(!result,
2583 "Failed to upload PPtable!", return result);
2584
2585 result = vega10_avfs_enable(hwmgr, true);
2586 PP_ASSERT_WITH_CODE(!result, "Attempt to enable AVFS feature Failed!",
2587 return result);
2588 vega10_acg_enable(hwmgr);
2589 vega10_save_default_power_profile(hwmgr);
2590
2591 return 0;
2592 }
2593
2594 static int vega10_enable_thermal_protection(struct pp_hwmgr *hwmgr)
2595 {
2596 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
2597
2598 if (data->smu_features[GNLD_THERMAL].supported) {
2599 if (data->smu_features[GNLD_THERMAL].enabled)
2600 pr_info("THERMAL Feature Already enabled!");
2601
2602 PP_ASSERT_WITH_CODE(
2603 !vega10_enable_smc_features(hwmgr,
2604 true,
2605 data->smu_features[GNLD_THERMAL].smu_feature_bitmap),
2606 "Enable THERMAL Feature Failed!",
2607 return -1);
2608 data->smu_features[GNLD_THERMAL].enabled = true;
2609 }
2610
2611 return 0;
2612 }
2613
2614 static int vega10_disable_thermal_protection(struct pp_hwmgr *hwmgr)
2615 {
2616 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
2617
2618 if (data->smu_features[GNLD_THERMAL].supported) {
2619 if (!data->smu_features[GNLD_THERMAL].enabled)
2620 pr_info("THERMAL Feature Already disabled!");
2621
2622 PP_ASSERT_WITH_CODE(
2623 !vega10_enable_smc_features(hwmgr,
2624 false,
2625 data->smu_features[GNLD_THERMAL].smu_feature_bitmap),
2626 "disable THERMAL Feature Failed!",
2627 return -1);
2628 data->smu_features[GNLD_THERMAL].enabled = false;
2629 }
2630
2631 return 0;
2632 }
2633
2634 static int vega10_enable_vrhot_feature(struct pp_hwmgr *hwmgr)
2635 {
2636 struct vega10_hwmgr *data =
2637 (struct vega10_hwmgr *)(hwmgr->backend);
2638
2639 if (PP_CAP(PHM_PlatformCaps_RegulatorHot)) {
2640 if (data->smu_features[GNLD_VR0HOT].supported) {
2641 PP_ASSERT_WITH_CODE(
2642 !vega10_enable_smc_features(hwmgr,
2643 true,
2644 data->smu_features[GNLD_VR0HOT].smu_feature_bitmap),
2645 "Attempt to Enable VR0 Hot feature Failed!",
2646 return -1);
2647 data->smu_features[GNLD_VR0HOT].enabled = true;
2648 } else {
2649 if (data->smu_features[GNLD_VR1HOT].supported) {
2650 PP_ASSERT_WITH_CODE(
2651 !vega10_enable_smc_features(hwmgr,
2652 true,
2653 data->smu_features[GNLD_VR1HOT].smu_feature_bitmap),
2654 "Attempt to Enable VR0 Hot feature Failed!",
2655 return -1);
2656 data->smu_features[GNLD_VR1HOT].enabled = true;
2657 }
2658 }
2659 }
2660 return 0;
2661 }
2662
2663 static int vega10_enable_ulv(struct pp_hwmgr *hwmgr)
2664 {
2665 struct vega10_hwmgr *data =
2666 (struct vega10_hwmgr *)(hwmgr->backend);
2667
2668 if (data->registry_data.ulv_support) {
2669 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
2670 true, data->smu_features[GNLD_ULV].smu_feature_bitmap),
2671 "Enable ULV Feature Failed!",
2672 return -1);
2673 data->smu_features[GNLD_ULV].enabled = true;
2674 }
2675
2676 return 0;
2677 }
2678
2679 static int vega10_disable_ulv(struct pp_hwmgr *hwmgr)
2680 {
2681 struct vega10_hwmgr *data =
2682 (struct vega10_hwmgr *)(hwmgr->backend);
2683
2684 if (data->registry_data.ulv_support) {
2685 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
2686 false, data->smu_features[GNLD_ULV].smu_feature_bitmap),
2687 "disable ULV Feature Failed!",
2688 return -EINVAL);
2689 data->smu_features[GNLD_ULV].enabled = false;
2690 }
2691
2692 return 0;
2693 }
2694
2695 static int vega10_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
2696 {
2697 struct vega10_hwmgr *data =
2698 (struct vega10_hwmgr *)(hwmgr->backend);
2699
2700 if (data->smu_features[GNLD_DS_GFXCLK].supported) {
2701 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
2702 true, data->smu_features[GNLD_DS_GFXCLK].smu_feature_bitmap),
2703 "Attempt to Enable DS_GFXCLK Feature Failed!",
2704 return -EINVAL);
2705 data->smu_features[GNLD_DS_GFXCLK].enabled = true;
2706 }
2707
2708 if (data->smu_features[GNLD_DS_SOCCLK].supported) {
2709 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
2710 true, data->smu_features[GNLD_DS_SOCCLK].smu_feature_bitmap),
2711 "Attempt to Enable DS_SOCCLK Feature Failed!",
2712 return -EINVAL);
2713 data->smu_features[GNLD_DS_SOCCLK].enabled = true;
2714 }
2715
2716 if (data->smu_features[GNLD_DS_LCLK].supported) {
2717 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
2718 true, data->smu_features[GNLD_DS_LCLK].smu_feature_bitmap),
2719 "Attempt to Enable DS_LCLK Feature Failed!",
2720 return -EINVAL);
2721 data->smu_features[GNLD_DS_LCLK].enabled = true;
2722 }
2723
2724 if (data->smu_features[GNLD_DS_DCEFCLK].supported) {
2725 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
2726 true, data->smu_features[GNLD_DS_DCEFCLK].smu_feature_bitmap),
2727 "Attempt to Enable DS_DCEFCLK Feature Failed!",
2728 return -EINVAL);
2729 data->smu_features[GNLD_DS_DCEFCLK].enabled = true;
2730 }
2731
2732 return 0;
2733 }
2734
2735 static int vega10_disable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
2736 {
2737 struct vega10_hwmgr *data =
2738 (struct vega10_hwmgr *)(hwmgr->backend);
2739
2740 if (data->smu_features[GNLD_DS_GFXCLK].supported) {
2741 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
2742 false, data->smu_features[GNLD_DS_GFXCLK].smu_feature_bitmap),
2743 "Attempt to disable DS_GFXCLK Feature Failed!",
2744 return -EINVAL);
2745 data->smu_features[GNLD_DS_GFXCLK].enabled = false;
2746 }
2747
2748 if (data->smu_features[GNLD_DS_SOCCLK].supported) {
2749 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
2750 false, data->smu_features[GNLD_DS_SOCCLK].smu_feature_bitmap),
2751 "Attempt to disable DS_ Feature Failed!",
2752 return -EINVAL);
2753 data->smu_features[GNLD_DS_SOCCLK].enabled = false;
2754 }
2755
2756 if (data->smu_features[GNLD_DS_LCLK].supported) {
2757 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
2758 false, data->smu_features[GNLD_DS_LCLK].smu_feature_bitmap),
2759 "Attempt to disable DS_LCLK Feature Failed!",
2760 return -EINVAL);
2761 data->smu_features[GNLD_DS_LCLK].enabled = false;
2762 }
2763
2764 if (data->smu_features[GNLD_DS_DCEFCLK].supported) {
2765 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
2766 false, data->smu_features[GNLD_DS_DCEFCLK].smu_feature_bitmap),
2767 "Attempt to disable DS_DCEFCLK Feature Failed!",
2768 return -EINVAL);
2769 data->smu_features[GNLD_DS_DCEFCLK].enabled = false;
2770 }
2771
2772 return 0;
2773 }
2774
2775 static int vega10_stop_dpm(struct pp_hwmgr *hwmgr, uint32_t bitmap)
2776 {
2777 struct vega10_hwmgr *data =
2778 (struct vega10_hwmgr *)(hwmgr->backend);
2779 uint32_t i, feature_mask = 0;
2780
2781
2782 if(data->smu_features[GNLD_LED_DISPLAY].supported == true){
2783 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
2784 false, data->smu_features[GNLD_LED_DISPLAY].smu_feature_bitmap),
2785 "Attempt to disable LED DPM feature failed!", return -EINVAL);
2786 data->smu_features[GNLD_LED_DISPLAY].enabled = false;
2787 }
2788
2789 for (i = 0; i < GNLD_DPM_MAX; i++) {
2790 if (data->smu_features[i].smu_feature_bitmap & bitmap) {
2791 if (data->smu_features[i].supported) {
2792 if (data->smu_features[i].enabled) {
2793 feature_mask |= data->smu_features[i].
2794 smu_feature_bitmap;
2795 data->smu_features[i].enabled = false;
2796 }
2797 }
2798 }
2799 }
2800
2801 vega10_enable_smc_features(hwmgr, false, feature_mask);
2802
2803 return 0;
2804 }
2805
2806 /**
2807 * @brief Tell SMC to enabled the supported DPMs.
2808 *
2809 * @param hwmgr - the address of the powerplay hardware manager.
2810 * @Param bitmap - bitmap for the features to enabled.
2811 * @return 0 on at least one DPM is successfully enabled.
2812 */
2813 static int vega10_start_dpm(struct pp_hwmgr *hwmgr, uint32_t bitmap)
2814 {
2815 struct vega10_hwmgr *data =
2816 (struct vega10_hwmgr *)(hwmgr->backend);
2817 uint32_t i, feature_mask = 0;
2818
2819 for (i = 0; i < GNLD_DPM_MAX; i++) {
2820 if (data->smu_features[i].smu_feature_bitmap & bitmap) {
2821 if (data->smu_features[i].supported) {
2822 if (!data->smu_features[i].enabled) {
2823 feature_mask |= data->smu_features[i].
2824 smu_feature_bitmap;
2825 data->smu_features[i].enabled = true;
2826 }
2827 }
2828 }
2829 }
2830
2831 if (vega10_enable_smc_features(hwmgr,
2832 true, feature_mask)) {
2833 for (i = 0; i < GNLD_DPM_MAX; i++) {
2834 if (data->smu_features[i].smu_feature_bitmap &
2835 feature_mask)
2836 data->smu_features[i].enabled = false;
2837 }
2838 }
2839
2840 if(data->smu_features[GNLD_LED_DISPLAY].supported == true){
2841 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
2842 true, data->smu_features[GNLD_LED_DISPLAY].smu_feature_bitmap),
2843 "Attempt to Enable LED DPM feature Failed!", return -EINVAL);
2844 data->smu_features[GNLD_LED_DISPLAY].enabled = true;
2845 }
2846
2847 if (data->vbios_boot_state.bsoc_vddc_lock) {
2848 smum_send_msg_to_smc_with_parameter(hwmgr,
2849 PPSMC_MSG_SetFloorSocVoltage, 0);
2850 data->vbios_boot_state.bsoc_vddc_lock = false;
2851 }
2852
2853 if (PP_CAP(PHM_PlatformCaps_Falcon_QuickTransition)) {
2854 if (data->smu_features[GNLD_ACDC].supported) {
2855 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
2856 true, data->smu_features[GNLD_ACDC].smu_feature_bitmap),
2857 "Attempt to Enable DS_GFXCLK Feature Failed!",
2858 return -1);
2859 data->smu_features[GNLD_ACDC].enabled = true;
2860 }
2861 }
2862
2863 return 0;
2864 }
2865
2866 static int vega10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
2867 {
2868 struct vega10_hwmgr *data =
2869 (struct vega10_hwmgr *)(hwmgr->backend);
2870 int tmp_result, result = 0;
2871
2872 tmp_result = smum_send_msg_to_smc_with_parameter(hwmgr,
2873 PPSMC_MSG_ConfigureTelemetry, data->config_telemetry);
2874 PP_ASSERT_WITH_CODE(!tmp_result,
2875 "Failed to configure telemetry!",
2876 return tmp_result);
2877
2878 smum_send_msg_to_smc_with_parameter(hwmgr,
2879 PPSMC_MSG_NumOfDisplays, 0);
2880
2881 tmp_result = (!vega10_is_dpm_running(hwmgr)) ? 0 : -1;
2882 PP_ASSERT_WITH_CODE(!tmp_result,
2883 "DPM is already running right , skipping re-enablement!",
2884 return 0);
2885
2886 if ((data->smu_version == 0x001c2c00) ||
2887 (data->smu_version == 0x001c2d00)) {
2888 tmp_result = smum_send_msg_to_smc_with_parameter(hwmgr,
2889 PPSMC_MSG_UpdatePkgPwrPidAlpha, 1);
2890 PP_ASSERT_WITH_CODE(!tmp_result,
2891 "Failed to set package power PID!",
2892 return tmp_result);
2893 }
2894
2895 tmp_result = vega10_construct_voltage_tables(hwmgr);
2896 PP_ASSERT_WITH_CODE(!tmp_result,
2897 "Failed to contruct voltage tables!",
2898 result = tmp_result);
2899
2900 tmp_result = vega10_init_smc_table(hwmgr);
2901 PP_ASSERT_WITH_CODE(!tmp_result,
2902 "Failed to initialize SMC table!",
2903 result = tmp_result);
2904
2905 if (PP_CAP(PHM_PlatformCaps_ThermalController)) {
2906 tmp_result = vega10_enable_thermal_protection(hwmgr);
2907 PP_ASSERT_WITH_CODE(!tmp_result,
2908 "Failed to enable thermal protection!",
2909 result = tmp_result);
2910 }
2911
2912 tmp_result = vega10_enable_vrhot_feature(hwmgr);
2913 PP_ASSERT_WITH_CODE(!tmp_result,
2914 "Failed to enable VR hot feature!",
2915 result = tmp_result);
2916
2917 tmp_result = vega10_enable_deep_sleep_master_switch(hwmgr);
2918 PP_ASSERT_WITH_CODE(!tmp_result,
2919 "Failed to enable deep sleep master switch!",
2920 result = tmp_result);
2921
2922 tmp_result = vega10_start_dpm(hwmgr, SMC_DPM_FEATURES);
2923 PP_ASSERT_WITH_CODE(!tmp_result,
2924 "Failed to start DPM!", result = tmp_result);
2925
2926 /* enable didt, do not abort if failed didt */
2927 tmp_result = vega10_enable_didt_config(hwmgr);
2928 PP_ASSERT(!tmp_result,
2929 "Failed to enable didt config!");
2930
2931 tmp_result = vega10_enable_power_containment(hwmgr);
2932 PP_ASSERT_WITH_CODE(!tmp_result,
2933 "Failed to enable power containment!",
2934 result = tmp_result);
2935
2936 tmp_result = vega10_power_control_set_level(hwmgr);
2937 PP_ASSERT_WITH_CODE(!tmp_result,
2938 "Failed to power control set level!",
2939 result = tmp_result);
2940
2941 tmp_result = vega10_enable_ulv(hwmgr);
2942 PP_ASSERT_WITH_CODE(!tmp_result,
2943 "Failed to enable ULV!",
2944 result = tmp_result);
2945
2946 return result;
2947 }
2948
2949 static int vega10_get_power_state_size(struct pp_hwmgr *hwmgr)
2950 {
2951 return sizeof(struct vega10_power_state);
2952 }
2953
2954 static int vega10_get_pp_table_entry_callback_func(struct pp_hwmgr *hwmgr,
2955 void *state, struct pp_power_state *power_state,
2956 void *pp_table, uint32_t classification_flag)
2957 {
2958 ATOM_Vega10_GFXCLK_Dependency_Record_V2 *patom_record_V2;
2959 struct vega10_power_state *vega10_power_state =
2960 cast_phw_vega10_power_state(&(power_state->hardware));
2961 struct vega10_performance_level *performance_level;
2962 ATOM_Vega10_State *state_entry = (ATOM_Vega10_State *)state;
2963 ATOM_Vega10_POWERPLAYTABLE *powerplay_table =
2964 (ATOM_Vega10_POWERPLAYTABLE *)pp_table;
2965 ATOM_Vega10_SOCCLK_Dependency_Table *socclk_dep_table =
2966 (ATOM_Vega10_SOCCLK_Dependency_Table *)
2967 (((unsigned long)powerplay_table) +
2968 le16_to_cpu(powerplay_table->usSocclkDependencyTableOffset));
2969 ATOM_Vega10_GFXCLK_Dependency_Table *gfxclk_dep_table =
2970 (ATOM_Vega10_GFXCLK_Dependency_Table *)
2971 (((unsigned long)powerplay_table) +
2972 le16_to_cpu(powerplay_table->usGfxclkDependencyTableOffset));
2973 ATOM_Vega10_MCLK_Dependency_Table *mclk_dep_table =
2974 (ATOM_Vega10_MCLK_Dependency_Table *)
2975 (((unsigned long)powerplay_table) +
2976 le16_to_cpu(powerplay_table->usMclkDependencyTableOffset));
2977
2978
2979 /* The following fields are not initialized here:
2980 * id orderedList allStatesList
2981 */
2982 power_state->classification.ui_label =
2983 (le16_to_cpu(state_entry->usClassification) &
2984 ATOM_PPLIB_CLASSIFICATION_UI_MASK) >>
2985 ATOM_PPLIB_CLASSIFICATION_UI_SHIFT;
2986 power_state->classification.flags = classification_flag;
2987 /* NOTE: There is a classification2 flag in BIOS
2988 * that is not being used right now
2989 */
2990 power_state->classification.temporary_state = false;
2991 power_state->classification.to_be_deleted = false;
2992
2993 power_state->validation.disallowOnDC =
2994 ((le32_to_cpu(state_entry->ulCapsAndSettings) &
2995 ATOM_Vega10_DISALLOW_ON_DC) != 0);
2996
2997 power_state->display.disableFrameModulation = false;
2998 power_state->display.limitRefreshrate = false;
2999 power_state->display.enableVariBright =
3000 ((le32_to_cpu(state_entry->ulCapsAndSettings) &
3001 ATOM_Vega10_ENABLE_VARIBRIGHT) != 0);
3002
3003 power_state->validation.supportedPowerLevels = 0;
3004 power_state->uvd_clocks.VCLK = 0;
3005 power_state->uvd_clocks.DCLK = 0;
3006 power_state->temperatures.min = 0;
3007 power_state->temperatures.max = 0;
3008
3009 performance_level = &(vega10_power_state->performance_levels
3010 [vega10_power_state->performance_level_count++]);
3011
3012 PP_ASSERT_WITH_CODE(
3013 (vega10_power_state->performance_level_count <
3014 NUM_GFXCLK_DPM_LEVELS),
3015 "Performance levels exceeds SMC limit!",
3016 return -1);
3017
3018 PP_ASSERT_WITH_CODE(
3019 (vega10_power_state->performance_level_count <=
3020 hwmgr->platform_descriptor.
3021 hardwareActivityPerformanceLevels),
3022 "Performance levels exceeds Driver limit!",
3023 return -1);
3024
3025 /* Performance levels are arranged from low to high. */
3026 performance_level->soc_clock = socclk_dep_table->entries
3027 [state_entry->ucSocClockIndexLow].ulClk;
3028 performance_level->gfx_clock = gfxclk_dep_table->entries
3029 [state_entry->ucGfxClockIndexLow].ulClk;
3030 performance_level->mem_clock = mclk_dep_table->entries
3031 [state_entry->ucMemClockIndexLow].ulMemClk;
3032
3033 performance_level = &(vega10_power_state->performance_levels
3034 [vega10_power_state->performance_level_count++]);
3035 performance_level->soc_clock = socclk_dep_table->entries
3036 [state_entry->ucSocClockIndexHigh].ulClk;
3037 if (gfxclk_dep_table->ucRevId == 0) {
3038 performance_level->gfx_clock = gfxclk_dep_table->entries
3039 [state_entry->ucGfxClockIndexHigh].ulClk;
3040 } else if (gfxclk_dep_table->ucRevId == 1) {
3041 patom_record_V2 = (ATOM_Vega10_GFXCLK_Dependency_Record_V2 *)gfxclk_dep_table->entries;
3042 performance_level->gfx_clock = patom_record_V2[state_entry->ucGfxClockIndexHigh].ulClk;
3043 }
3044
3045 performance_level->mem_clock = mclk_dep_table->entries
3046 [state_entry->ucMemClockIndexHigh].ulMemClk;
3047 return 0;
3048 }
3049
3050 static int vega10_get_pp_table_entry(struct pp_hwmgr *hwmgr,
3051 unsigned long entry_index, struct pp_power_state *state)
3052 {
3053 int result;
3054 struct vega10_power_state *ps;
3055
3056 state->hardware.magic = PhwVega10_Magic;
3057
3058 ps = cast_phw_vega10_power_state(&state->hardware);
3059
3060 result = vega10_get_powerplay_table_entry(hwmgr, entry_index, state,
3061 vega10_get_pp_table_entry_callback_func);
3062
3063 /*
3064 * This is the earliest time we have all the dependency table
3065 * and the VBIOS boot state
3066 */
3067 /* set DC compatible flag if this state supports DC */
3068 if (!state->validation.disallowOnDC)
3069 ps->dc_compatible = true;
3070
3071 ps->uvd_clks.vclk = state->uvd_clocks.VCLK;
3072 ps->uvd_clks.dclk = state->uvd_clocks.DCLK;
3073
3074 return 0;
3075 }
3076
3077 static int vega10_patch_boot_state(struct pp_hwmgr *hwmgr,
3078 struct pp_hw_power_state *hw_ps)
3079 {
3080 return 0;
3081 }
3082
3083 static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
3084 struct pp_power_state *request_ps,
3085 const struct pp_power_state *current_ps)
3086 {
3087 struct vega10_power_state *vega10_ps =
3088 cast_phw_vega10_power_state(&request_ps->hardware);
3089 uint32_t sclk;
3090 uint32_t mclk;
3091 struct PP_Clocks minimum_clocks = {0};
3092 bool disable_mclk_switching;
3093 bool disable_mclk_switching_for_frame_lock;
3094 bool disable_mclk_switching_for_vr;
3095 bool force_mclk_high;
3096 struct cgs_display_info info = {0};
3097 const struct phm_clock_and_voltage_limits *max_limits;
3098 uint32_t i;
3099 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
3100 struct phm_ppt_v2_information *table_info =
3101 (struct phm_ppt_v2_information *)(hwmgr->pptable);
3102 int32_t count;
3103 uint32_t stable_pstate_sclk_dpm_percentage;
3104 uint32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0;
3105 uint32_t latency;
3106
3107 data->battery_state = (PP_StateUILabel_Battery ==
3108 request_ps->classification.ui_label);
3109
3110 if (vega10_ps->performance_level_count != 2)
3111 pr_info("VI should always have 2 performance levels");
3112
3113 max_limits = (PP_PowerSource_AC == hwmgr->power_source) ?
3114 &(hwmgr->dyn_state.max_clock_voltage_on_ac) :
3115 &(hwmgr->dyn_state.max_clock_voltage_on_dc);
3116
3117 /* Cap clock DPM tables at DC MAX if it is in DC. */
3118 if (PP_PowerSource_DC == hwmgr->power_source) {
3119 for (i = 0; i < vega10_ps->performance_level_count; i++) {
3120 if (vega10_ps->performance_levels[i].mem_clock >
3121 max_limits->mclk)
3122 vega10_ps->performance_levels[i].mem_clock =
3123 max_limits->mclk;
3124 if (vega10_ps->performance_levels[i].gfx_clock >
3125 max_limits->sclk)
3126 vega10_ps->performance_levels[i].gfx_clock =
3127 max_limits->sclk;
3128 }
3129 }
3130
3131 vega10_ps->vce_clks.evclk = hwmgr->vce_arbiter.evclk;
3132 vega10_ps->vce_clks.ecclk = hwmgr->vce_arbiter.ecclk;
3133
3134 cgs_get_active_displays_info(hwmgr->device, &info);
3135
3136 /* result = PHM_CheckVBlankTime(hwmgr, &vblankTooShort);*/
3137 minimum_clocks.engineClock = hwmgr->display_config.min_core_set_clock;
3138 minimum_clocks.memoryClock = hwmgr->display_config.min_mem_set_clock;
3139
3140 if (PP_CAP(PHM_PlatformCaps_StablePState)) {
3141 stable_pstate_sclk_dpm_percentage =
3142 data->registry_data.stable_pstate_sclk_dpm_percentage;
3143 PP_ASSERT_WITH_CODE(
3144 data->registry_data.stable_pstate_sclk_dpm_percentage >= 1 &&
3145 data->registry_data.stable_pstate_sclk_dpm_percentage <= 100,
3146 "percent sclk value must range from 1% to 100%, setting default value",
3147 stable_pstate_sclk_dpm_percentage = 75);
3148
3149 max_limits = &(hwmgr->dyn_state.max_clock_voltage_on_ac);
3150 stable_pstate_sclk = (max_limits->sclk *
3151 stable_pstate_sclk_dpm_percentage) / 100;
3152
3153 for (count = table_info->vdd_dep_on_sclk->count - 1;
3154 count >= 0; count--) {
3155 if (stable_pstate_sclk >=
3156 table_info->vdd_dep_on_sclk->entries[count].clk) {
3157 stable_pstate_sclk =
3158 table_info->vdd_dep_on_sclk->entries[count].clk;
3159 break;
3160 }
3161 }
3162
3163 if (count < 0)
3164 stable_pstate_sclk = table_info->vdd_dep_on_sclk->entries[0].clk;
3165
3166 stable_pstate_mclk = max_limits->mclk;
3167
3168 minimum_clocks.engineClock = stable_pstate_sclk;
3169 minimum_clocks.memoryClock = stable_pstate_mclk;
3170 }
3171
3172 if (minimum_clocks.engineClock < hwmgr->gfx_arbiter.sclk)
3173 minimum_clocks.engineClock = hwmgr->gfx_arbiter.sclk;
3174
3175 if (minimum_clocks.memoryClock < hwmgr->gfx_arbiter.mclk)
3176 minimum_clocks.memoryClock = hwmgr->gfx_arbiter.mclk;
3177
3178 vega10_ps->sclk_threshold = hwmgr->gfx_arbiter.sclk_threshold;
3179
3180 if (hwmgr->gfx_arbiter.sclk_over_drive) {
3181 PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.sclk_over_drive <=
3182 hwmgr->platform_descriptor.overdriveLimit.engineClock),
3183 "Overdrive sclk exceeds limit",
3184 hwmgr->gfx_arbiter.sclk_over_drive =
3185 hwmgr->platform_descriptor.overdriveLimit.engineClock);
3186
3187 if (hwmgr->gfx_arbiter.sclk_over_drive >= hwmgr->gfx_arbiter.sclk)
3188 vega10_ps->performance_levels[1].gfx_clock =
3189 hwmgr->gfx_arbiter.sclk_over_drive;
3190 }
3191
3192 if (hwmgr->gfx_arbiter.mclk_over_drive) {
3193 PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.mclk_over_drive <=
3194 hwmgr->platform_descriptor.overdriveLimit.memoryClock),
3195 "Overdrive mclk exceeds limit",
3196 hwmgr->gfx_arbiter.mclk_over_drive =
3197 hwmgr->platform_descriptor.overdriveLimit.memoryClock);
3198
3199 if (hwmgr->gfx_arbiter.mclk_over_drive >= hwmgr->gfx_arbiter.mclk)
3200 vega10_ps->performance_levels[1].mem_clock =
3201 hwmgr->gfx_arbiter.mclk_over_drive;
3202 }
3203
3204 disable_mclk_switching_for_frame_lock = phm_cap_enabled(
3205 hwmgr->platform_descriptor.platformCaps,
3206 PHM_PlatformCaps_DisableMclkSwitchingForFrameLock);
3207 disable_mclk_switching_for_vr = PP_CAP(PHM_PlatformCaps_DisableMclkSwitchForVR);
3208 force_mclk_high = PP_CAP(PHM_PlatformCaps_ForceMclkHigh);
3209
3210 disable_mclk_switching = (info.display_count > 1) ||
3211 disable_mclk_switching_for_frame_lock ||
3212 disable_mclk_switching_for_vr ||
3213 force_mclk_high;
3214
3215 sclk = vega10_ps->performance_levels[0].gfx_clock;
3216 mclk = vega10_ps->performance_levels[0].mem_clock;
3217
3218 if (sclk < minimum_clocks.engineClock)
3219 sclk = (minimum_clocks.engineClock > max_limits->sclk) ?
3220 max_limits->sclk : minimum_clocks.engineClock;
3221
3222 if (mclk < minimum_clocks.memoryClock)
3223 mclk = (minimum_clocks.memoryClock > max_limits->mclk) ?
3224 max_limits->mclk : minimum_clocks.memoryClock;
3225
3226 vega10_ps->performance_levels[0].gfx_clock = sclk;
3227 vega10_ps->performance_levels[0].mem_clock = mclk;
3228
3229 if (vega10_ps->performance_levels[1].gfx_clock <
3230 vega10_ps->performance_levels[0].gfx_clock)
3231 vega10_ps->performance_levels[0].gfx_clock =
3232 vega10_ps->performance_levels[1].gfx_clock;
3233
3234 if (disable_mclk_switching) {
3235 /* Set Mclk the max of level 0 and level 1 */
3236 if (mclk < vega10_ps->performance_levels[1].mem_clock)
3237 mclk = vega10_ps->performance_levels[1].mem_clock;
3238
3239 /* Find the lowest MCLK frequency that is within
3240 * the tolerable latency defined in DAL
3241 */
3242 latency = 0;
3243 for (i = 0; i < data->mclk_latency_table.count; i++) {
3244 if ((data->mclk_latency_table.entries[i].latency <= latency) &&
3245 (data->mclk_latency_table.entries[i].frequency >=
3246 vega10_ps->performance_levels[0].mem_clock) &&
3247 (data->mclk_latency_table.entries[i].frequency <=
3248 vega10_ps->performance_levels[1].mem_clock))
3249 mclk = data->mclk_latency_table.entries[i].frequency;
3250 }
3251 vega10_ps->performance_levels[0].mem_clock = mclk;
3252 } else {
3253 if (vega10_ps->performance_levels[1].mem_clock <
3254 vega10_ps->performance_levels[0].mem_clock)
3255 vega10_ps->performance_levels[0].mem_clock =
3256 vega10_ps->performance_levels[1].mem_clock;
3257 }
3258
3259 if (PP_CAP(PHM_PlatformCaps_StablePState)) {
3260 for (i = 0; i < vega10_ps->performance_level_count; i++) {
3261 vega10_ps->performance_levels[i].gfx_clock = stable_pstate_sclk;
3262 vega10_ps->performance_levels[i].mem_clock = stable_pstate_mclk;
3263 }
3264 }
3265
3266 return 0;
3267 }
3268
3269 static int vega10_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, const void *input)
3270 {
3271 const struct phm_set_power_state_input *states =
3272 (const struct phm_set_power_state_input *)input;
3273 const struct vega10_power_state *vega10_ps =
3274 cast_const_phw_vega10_power_state(states->pnew_state);
3275 struct vega10_hwmgr *data =
3276 (struct vega10_hwmgr *)(hwmgr->backend);
3277 struct vega10_single_dpm_table *sclk_table =
3278 &(data->dpm_table.gfx_table);
3279 uint32_t sclk = vega10_ps->performance_levels
3280 [vega10_ps->performance_level_count - 1].gfx_clock;
3281 struct vega10_single_dpm_table *mclk_table =
3282 &(data->dpm_table.mem_table);
3283 uint32_t mclk = vega10_ps->performance_levels
3284 [vega10_ps->performance_level_count - 1].mem_clock;
3285 struct PP_Clocks min_clocks = {0};
3286 uint32_t i;
3287 struct cgs_display_info info = {0};
3288
3289 data->need_update_dpm_table = 0;
3290
3291 if (PP_CAP(PHM_PlatformCaps_ODNinACSupport) ||
3292 PP_CAP(PHM_PlatformCaps_ODNinDCSupport)) {
3293 for (i = 0; i < sclk_table->count; i++) {
3294 if (sclk == sclk_table->dpm_levels[i].value)
3295 break;
3296 }
3297
3298 if (!(data->apply_overdrive_next_settings_mask &
3299 DPMTABLE_OD_UPDATE_SCLK) && i >= sclk_table->count) {
3300 /* Check SCLK in DAL's minimum clocks
3301 * in case DeepSleep divider update is required.
3302 */
3303 if (data->display_timing.min_clock_in_sr !=
3304 min_clocks.engineClockInSR &&
3305 (min_clocks.engineClockInSR >=
3306 VEGA10_MINIMUM_ENGINE_CLOCK ||
3307 data->display_timing.min_clock_in_sr >=
3308 VEGA10_MINIMUM_ENGINE_CLOCK))
3309 data->need_update_dpm_table |= DPMTABLE_UPDATE_SCLK;
3310 }
3311
3312 cgs_get_active_displays_info(hwmgr->device, &info);
3313
3314 if (data->display_timing.num_existing_displays !=
3315 info.display_count)
3316 data->need_update_dpm_table |= DPMTABLE_UPDATE_MCLK;
3317 } else {
3318 for (i = 0; i < sclk_table->count; i++) {
3319 if (sclk == sclk_table->dpm_levels[i].value)
3320 break;
3321 }
3322
3323 if (i >= sclk_table->count)
3324 data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
3325 else {
3326 /* Check SCLK in DAL's minimum clocks
3327 * in case DeepSleep divider update is required.
3328 */
3329 if (data->display_timing.min_clock_in_sr !=
3330 min_clocks.engineClockInSR &&
3331 (min_clocks.engineClockInSR >=
3332 VEGA10_MINIMUM_ENGINE_CLOCK ||
3333 data->display_timing.min_clock_in_sr >=
3334 VEGA10_MINIMUM_ENGINE_CLOCK))
3335 data->need_update_dpm_table |= DPMTABLE_UPDATE_SCLK;
3336 }
3337
3338 for (i = 0; i < mclk_table->count; i++) {
3339 if (mclk == mclk_table->dpm_levels[i].value)
3340 break;
3341 }
3342
3343 cgs_get_active_displays_info(hwmgr->device, &info);
3344
3345 if (i >= mclk_table->count)
3346 data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
3347
3348 if (data->display_timing.num_existing_displays !=
3349 info.display_count ||
3350 i >= mclk_table->count)
3351 data->need_update_dpm_table |= DPMTABLE_UPDATE_MCLK;
3352 }
3353 return 0;
3354 }
3355
3356 static int vega10_populate_and_upload_sclk_mclk_dpm_levels(
3357 struct pp_hwmgr *hwmgr, const void *input)
3358 {
3359 int result = 0;
3360 const struct phm_set_power_state_input *states =
3361 (const struct phm_set_power_state_input *)input;
3362 const struct vega10_power_state *vega10_ps =
3363 cast_const_phw_vega10_power_state(states->pnew_state);
3364 struct vega10_hwmgr *data =
3365 (struct vega10_hwmgr *)(hwmgr->backend);
3366 uint32_t sclk = vega10_ps->performance_levels
3367 [vega10_ps->performance_level_count - 1].gfx_clock;
3368 uint32_t mclk = vega10_ps->performance_levels
3369 [vega10_ps->performance_level_count - 1].mem_clock;
3370 struct vega10_dpm_table *dpm_table = &data->dpm_table;
3371 struct vega10_dpm_table *golden_dpm_table =
3372 &data->golden_dpm_table;
3373 uint32_t dpm_count, clock_percent;
3374 uint32_t i;
3375
3376 if (PP_CAP(PHM_PlatformCaps_ODNinACSupport) ||
3377 PP_CAP(PHM_PlatformCaps_ODNinDCSupport)) {
3378
3379 if (!data->need_update_dpm_table &&
3380 !data->apply_optimized_settings &&
3381 !data->apply_overdrive_next_settings_mask)
3382 return 0;
3383
3384 if (data->apply_overdrive_next_settings_mask &
3385 DPMTABLE_OD_UPDATE_SCLK) {
3386 for (dpm_count = 0;
3387 dpm_count < dpm_table->gfx_table.count;
3388 dpm_count++) {
3389 dpm_table->gfx_table.dpm_levels[dpm_count].enabled =
3390 data->odn_dpm_table.odn_core_clock_dpm_levels.
3391 performance_level_entries[dpm_count].enabled;
3392 dpm_table->gfx_table.dpm_levels[dpm_count].value =
3393 data->odn_dpm_table.odn_core_clock_dpm_levels.
3394 performance_level_entries[dpm_count].clock;
3395 }
3396 }
3397
3398 if (data->apply_overdrive_next_settings_mask &
3399 DPMTABLE_OD_UPDATE_MCLK) {
3400 for (dpm_count = 0;
3401 dpm_count < dpm_table->mem_table.count;
3402 dpm_count++) {
3403 dpm_table->mem_table.dpm_levels[dpm_count].enabled =
3404 data->odn_dpm_table.odn_memory_clock_dpm_levels.
3405 performance_level_entries[dpm_count].enabled;
3406 dpm_table->mem_table.dpm_levels[dpm_count].value =
3407 data->odn_dpm_table.odn_memory_clock_dpm_levels.
3408 performance_level_entries[dpm_count].clock;
3409 }
3410 }
3411
3412 if ((data->need_update_dpm_table & DPMTABLE_UPDATE_SCLK) ||
3413 data->apply_optimized_settings ||
3414 (data->apply_overdrive_next_settings_mask &
3415 DPMTABLE_OD_UPDATE_SCLK)) {
3416 result = vega10_populate_all_graphic_levels(hwmgr);
3417 PP_ASSERT_WITH_CODE(!result,
3418 "Failed to populate SCLK during \
3419 PopulateNewDPMClocksStates Function!",
3420 return result);
3421 }
3422
3423 if ((data->need_update_dpm_table & DPMTABLE_UPDATE_MCLK) ||
3424 (data->apply_overdrive_next_settings_mask &
3425 DPMTABLE_OD_UPDATE_MCLK)){
3426 result = vega10_populate_all_memory_levels(hwmgr);
3427 PP_ASSERT_WITH_CODE(!result,
3428 "Failed to populate MCLK during \
3429 PopulateNewDPMClocksStates Function!",
3430 return result);
3431 }
3432 } else {
3433 if (!data->need_update_dpm_table &&
3434 !data->apply_optimized_settings)
3435 return 0;
3436
3437 if (data->need_update_dpm_table & DPMTABLE_OD_UPDATE_SCLK &&
3438 data->smu_features[GNLD_DPM_GFXCLK].supported) {
3439 dpm_table->
3440 gfx_table.dpm_levels[dpm_table->gfx_table.count - 1].
3441 value = sclk;
3442 if (PP_CAP(PHM_PlatformCaps_OD6PlusinACSupport) ||
3443 PP_CAP(PHM_PlatformCaps_OD6PlusinDCSupport)) {
3444 /* Need to do calculation based on the golden DPM table
3445 * as the Heatmap GPU Clock axis is also based on
3446 * the default values
3447 */
3448 PP_ASSERT_WITH_CODE(
3449 golden_dpm_table->gfx_table.dpm_levels
3450 [golden_dpm_table->gfx_table.count - 1].value,
3451 "Divide by 0!",
3452 return -1);
3453
3454 dpm_count = dpm_table->gfx_table.count < 2 ?
3455 0 : dpm_table->gfx_table.count - 2;
3456 for (i = dpm_count; i > 1; i--) {
3457 if (sclk > golden_dpm_table->gfx_table.dpm_levels
3458 [golden_dpm_table->gfx_table.count - 1].value) {
3459 clock_percent =
3460 ((sclk - golden_dpm_table->gfx_table.dpm_levels
3461 [golden_dpm_table->gfx_table.count - 1].value) *
3462 100) /
3463 golden_dpm_table->gfx_table.dpm_levels
3464 [golden_dpm_table->gfx_table.count - 1].value;
3465
3466 dpm_table->gfx_table.dpm_levels[i].value =
3467 golden_dpm_table->gfx_table.dpm_levels[i].value +
3468 (golden_dpm_table->gfx_table.dpm_levels[i].value *
3469 clock_percent) / 100;
3470 } else if (golden_dpm_table->
3471 gfx_table.dpm_levels[dpm_table->gfx_table.count-1].value >
3472 sclk) {
3473 clock_percent =
3474 ((golden_dpm_table->gfx_table.dpm_levels
3475 [golden_dpm_table->gfx_table.count - 1].value -
3476 sclk) * 100) /
3477 golden_dpm_table->gfx_table.dpm_levels
3478 [golden_dpm_table->gfx_table.count-1].value;
3479
3480 dpm_table->gfx_table.dpm_levels[i].value =
3481 golden_dpm_table->gfx_table.dpm_levels[i].value -
3482 (golden_dpm_table->gfx_table.dpm_levels[i].value *
3483 clock_percent) / 100;
3484 } else
3485 dpm_table->gfx_table.dpm_levels[i].value =
3486 golden_dpm_table->gfx_table.dpm_levels[i].value;
3487 }
3488 }
3489 }
3490
3491 if (data->need_update_dpm_table & DPMTABLE_OD_UPDATE_MCLK &&
3492 data->smu_features[GNLD_DPM_UCLK].supported) {
3493 dpm_table->
3494 mem_table.dpm_levels[dpm_table->mem_table.count - 1].
3495 value = mclk;
3496
3497 if (PP_CAP(PHM_PlatformCaps_OD6PlusinACSupport) ||
3498 PP_CAP(PHM_PlatformCaps_OD6PlusinDCSupport)) {
3499
3500 PP_ASSERT_WITH_CODE(
3501 golden_dpm_table->mem_table.dpm_levels
3502 [golden_dpm_table->mem_table.count - 1].value,
3503 "Divide by 0!",
3504 return -1);
3505
3506 dpm_count = dpm_table->mem_table.count < 2 ?
3507 0 : dpm_table->mem_table.count - 2;
3508 for (i = dpm_count; i > 1; i--) {
3509 if (mclk > golden_dpm_table->mem_table.dpm_levels
3510 [golden_dpm_table->mem_table.count-1].value) {
3511 clock_percent = ((mclk -
3512 golden_dpm_table->mem_table.dpm_levels
3513 [golden_dpm_table->mem_table.count-1].value) *
3514 100) /
3515 golden_dpm_table->mem_table.dpm_levels
3516 [golden_dpm_table->mem_table.count-1].value;
3517
3518 dpm_table->mem_table.dpm_levels[i].value =
3519 golden_dpm_table->mem_table.dpm_levels[i].value +
3520 (golden_dpm_table->mem_table.dpm_levels[i].value *
3521 clock_percent) / 100;
3522 } else if (golden_dpm_table->mem_table.dpm_levels
3523 [dpm_table->mem_table.count-1].value > mclk) {
3524 clock_percent = ((golden_dpm_table->mem_table.dpm_levels
3525 [golden_dpm_table->mem_table.count-1].value - mclk) *
3526 100) /
3527 golden_dpm_table->mem_table.dpm_levels
3528 [golden_dpm_table->mem_table.count-1].value;
3529
3530 dpm_table->mem_table.dpm_levels[i].value =
3531 golden_dpm_table->mem_table.dpm_levels[i].value -
3532 (golden_dpm_table->mem_table.dpm_levels[i].value *
3533 clock_percent) / 100;
3534 } else
3535 dpm_table->mem_table.dpm_levels[i].value =
3536 golden_dpm_table->mem_table.dpm_levels[i].value;
3537 }
3538 }
3539 }
3540
3541 if ((data->need_update_dpm_table &
3542 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK)) ||
3543 data->apply_optimized_settings) {
3544 result = vega10_populate_all_graphic_levels(hwmgr);
3545 PP_ASSERT_WITH_CODE(!result,
3546 "Failed to populate SCLK during \
3547 PopulateNewDPMClocksStates Function!",
3548 return result);
3549 }
3550
3551 if (data->need_update_dpm_table &
3552 (DPMTABLE_OD_UPDATE_MCLK + DPMTABLE_UPDATE_MCLK)) {
3553 result = vega10_populate_all_memory_levels(hwmgr);
3554 PP_ASSERT_WITH_CODE(!result,
3555 "Failed to populate MCLK during \
3556 PopulateNewDPMClocksStates Function!",
3557 return result);
3558 }
3559 }
3560 return result;
3561 }
3562
3563 static int vega10_trim_single_dpm_states(struct pp_hwmgr *hwmgr,
3564 struct vega10_single_dpm_table *dpm_table,
3565 uint32_t low_limit, uint32_t high_limit)
3566 {
3567 uint32_t i;
3568
3569 for (i = 0; i < dpm_table->count; i++) {
3570 if ((dpm_table->dpm_levels[i].value < low_limit) ||
3571 (dpm_table->dpm_levels[i].value > high_limit))
3572 dpm_table->dpm_levels[i].enabled = false;
3573 else
3574 dpm_table->dpm_levels[i].enabled = true;
3575 }
3576 return 0;
3577 }
3578
3579 static int vega10_trim_single_dpm_states_with_mask(struct pp_hwmgr *hwmgr,
3580 struct vega10_single_dpm_table *dpm_table,
3581 uint32_t low_limit, uint32_t high_limit,
3582 uint32_t disable_dpm_mask)
3583 {
3584 uint32_t i;
3585
3586 for (i = 0; i < dpm_table->count; i++) {
3587 if ((dpm_table->dpm_levels[i].value < low_limit) ||
3588 (dpm_table->dpm_levels[i].value > high_limit))
3589 dpm_table->dpm_levels[i].enabled = false;
3590 else if (!((1 << i) & disable_dpm_mask))
3591 dpm_table->dpm_levels[i].enabled = false;
3592 else
3593 dpm_table->dpm_levels[i].enabled = true;
3594 }
3595 return 0;
3596 }
3597
3598 static int vega10_trim_dpm_states(struct pp_hwmgr *hwmgr,
3599 const struct vega10_power_state *vega10_ps)
3600 {
3601 struct vega10_hwmgr *data =
3602 (struct vega10_hwmgr *)(hwmgr->backend);
3603 uint32_t high_limit_count;
3604
3605 PP_ASSERT_WITH_CODE((vega10_ps->performance_level_count >= 1),
3606 "power state did not have any performance level",
3607 return -1);
3608
3609 high_limit_count = (vega10_ps->performance_level_count == 1) ? 0 : 1;
3610
3611 vega10_trim_single_dpm_states(hwmgr,
3612 &(data->dpm_table.soc_table),
3613 vega10_ps->performance_levels[0].soc_clock,
3614 vega10_ps->performance_levels[high_limit_count].soc_clock);
3615
3616 vega10_trim_single_dpm_states_with_mask(hwmgr,
3617 &(data->dpm_table.gfx_table),
3618 vega10_ps->performance_levels[0].gfx_clock,
3619 vega10_ps->performance_levels[high_limit_count].gfx_clock,
3620 data->disable_dpm_mask);
3621
3622 vega10_trim_single_dpm_states(hwmgr,
3623 &(data->dpm_table.mem_table),
3624 vega10_ps->performance_levels[0].mem_clock,
3625 vega10_ps->performance_levels[high_limit_count].mem_clock);
3626
3627 return 0;
3628 }
3629
3630 static uint32_t vega10_find_lowest_dpm_level(
3631 struct vega10_single_dpm_table *table)
3632 {
3633 uint32_t i;
3634
3635 for (i = 0; i < table->count; i++) {
3636 if (table->dpm_levels[i].enabled)
3637 break;
3638 }
3639
3640 return i;
3641 }
3642
3643 static uint32_t vega10_find_highest_dpm_level(
3644 struct vega10_single_dpm_table *table)
3645 {
3646 uint32_t i = 0;
3647
3648 if (table->count <= MAX_REGULAR_DPM_NUMBER) {
3649 for (i = table->count; i > 0; i--) {
3650 if (table->dpm_levels[i - 1].enabled)
3651 return i - 1;
3652 }
3653 } else {
3654 pr_info("DPM Table Has Too Many Entries!");
3655 return MAX_REGULAR_DPM_NUMBER - 1;
3656 }
3657
3658 return i;
3659 }
3660
3661 static void vega10_apply_dal_minimum_voltage_request(
3662 struct pp_hwmgr *hwmgr)
3663 {
3664 return;
3665 }
3666
3667 static int vega10_get_soc_index_for_max_uclk(struct pp_hwmgr *hwmgr)
3668 {
3669 struct phm_ppt_v1_clock_voltage_dependency_table *vdd_dep_table_on_mclk;
3670 struct phm_ppt_v2_information *table_info =
3671 (struct phm_ppt_v2_information *)(hwmgr->pptable);
3672
3673 vdd_dep_table_on_mclk = table_info->vdd_dep_on_mclk;
3674
3675 return vdd_dep_table_on_mclk->entries[NUM_UCLK_DPM_LEVELS - 1].vddInd + 1;
3676 }
3677
3678 static int vega10_upload_dpm_bootup_level(struct pp_hwmgr *hwmgr)
3679 {
3680 struct vega10_hwmgr *data =
3681 (struct vega10_hwmgr *)(hwmgr->backend);
3682 uint32_t socclk_idx;
3683
3684 vega10_apply_dal_minimum_voltage_request(hwmgr);
3685
3686 if (!data->registry_data.sclk_dpm_key_disabled) {
3687 if (data->smc_state_table.gfx_boot_level !=
3688 data->dpm_table.gfx_table.dpm_state.soft_min_level) {
3689 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter(
3690 hwmgr,
3691 PPSMC_MSG_SetSoftMinGfxclkByIndex,
3692 data->smc_state_table.gfx_boot_level),
3693 "Failed to set soft min sclk index!",
3694 return -EINVAL);
3695 data->dpm_table.gfx_table.dpm_state.soft_min_level =
3696 data->smc_state_table.gfx_boot_level;
3697 }
3698 }
3699
3700 if (!data->registry_data.mclk_dpm_key_disabled) {
3701 if (data->smc_state_table.mem_boot_level !=
3702 data->dpm_table.mem_table.dpm_state.soft_min_level) {
3703 if (data->smc_state_table.mem_boot_level == NUM_UCLK_DPM_LEVELS - 1) {
3704 socclk_idx = vega10_get_soc_index_for_max_uclk(hwmgr);
3705 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter(
3706 hwmgr,
3707 PPSMC_MSG_SetSoftMinSocclkByIndex,
3708 socclk_idx),
3709 "Failed to set soft min uclk index!",
3710 return -EINVAL);
3711 } else {
3712 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter(
3713 hwmgr,
3714 PPSMC_MSG_SetSoftMinUclkByIndex,
3715 data->smc_state_table.mem_boot_level),
3716 "Failed to set soft min uclk index!",
3717 return -EINVAL);
3718 }
3719 data->dpm_table.mem_table.dpm_state.soft_min_level =
3720 data->smc_state_table.mem_boot_level;
3721 }
3722 }
3723
3724 return 0;
3725 }
3726
3727 static int vega10_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
3728 {
3729 struct vega10_hwmgr *data =
3730 (struct vega10_hwmgr *)(hwmgr->backend);
3731
3732 vega10_apply_dal_minimum_voltage_request(hwmgr);
3733
3734 if (!data->registry_data.sclk_dpm_key_disabled) {
3735 if (data->smc_state_table.gfx_max_level !=
3736 data->dpm_table.gfx_table.dpm_state.soft_max_level) {
3737 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter(
3738 hwmgr,
3739 PPSMC_MSG_SetSoftMaxGfxclkByIndex,
3740 data->smc_state_table.gfx_max_level),
3741 "Failed to set soft max sclk index!",
3742 return -EINVAL);
3743 data->dpm_table.gfx_table.dpm_state.soft_max_level =
3744 data->smc_state_table.gfx_max_level;
3745 }
3746 }
3747
3748 if (!data->registry_data.mclk_dpm_key_disabled) {
3749 if (data->smc_state_table.mem_max_level !=
3750 data->dpm_table.mem_table.dpm_state.soft_max_level) {
3751 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter(
3752 hwmgr,
3753 PPSMC_MSG_SetSoftMaxUclkByIndex,
3754 data->smc_state_table.mem_max_level),
3755 "Failed to set soft max mclk index!",
3756 return -EINVAL);
3757 data->dpm_table.mem_table.dpm_state.soft_max_level =
3758 data->smc_state_table.mem_max_level;
3759 }
3760 }
3761
3762 return 0;
3763 }
3764
3765 static int vega10_generate_dpm_level_enable_mask(
3766 struct pp_hwmgr *hwmgr, const void *input)
3767 {
3768 struct vega10_hwmgr *data =
3769 (struct vega10_hwmgr *)(hwmgr->backend);
3770 const struct phm_set_power_state_input *states =
3771 (const struct phm_set_power_state_input *)input;
3772 const struct vega10_power_state *vega10_ps =
3773 cast_const_phw_vega10_power_state(states->pnew_state);
3774 int i;
3775
3776 PP_ASSERT_WITH_CODE(!vega10_trim_dpm_states(hwmgr, vega10_ps),
3777 "Attempt to Trim DPM States Failed!",
3778 return -1);
3779
3780 data->smc_state_table.gfx_boot_level =
3781 vega10_find_lowest_dpm_level(&(data->dpm_table.gfx_table));
3782 data->smc_state_table.gfx_max_level =
3783 vega10_find_highest_dpm_level(&(data->dpm_table.gfx_table));
3784 data->smc_state_table.mem_boot_level =
3785 vega10_find_lowest_dpm_level(&(data->dpm_table.mem_table));
3786 data->smc_state_table.mem_max_level =
3787 vega10_find_highest_dpm_level(&(data->dpm_table.mem_table));
3788
3789 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr),
3790 "Attempt to upload DPM Bootup Levels Failed!",
3791 return -1);
3792 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr),
3793 "Attempt to upload DPM Max Levels Failed!",
3794 return -1);
3795 for(i = data->smc_state_table.gfx_boot_level; i < data->smc_state_table.gfx_max_level; i++)
3796 data->dpm_table.gfx_table.dpm_levels[i].enabled = true;
3797
3798
3799 for(i = data->smc_state_table.mem_boot_level; i < data->smc_state_table.mem_max_level; i++)
3800 data->dpm_table.mem_table.dpm_levels[i].enabled = true;
3801
3802 return 0;
3803 }
3804
3805 int vega10_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
3806 {
3807 struct vega10_hwmgr *data =
3808 (struct vega10_hwmgr *)(hwmgr->backend);
3809
3810 if (data->smu_features[GNLD_DPM_VCE].supported) {
3811 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
3812 enable,
3813 data->smu_features[GNLD_DPM_VCE].smu_feature_bitmap),
3814 "Attempt to Enable/Disable DPM VCE Failed!",
3815 return -1);
3816 data->smu_features[GNLD_DPM_VCE].enabled = enable;
3817 }
3818
3819 return 0;
3820 }
3821
3822 static int vega10_update_sclk_threshold(struct pp_hwmgr *hwmgr)
3823 {
3824 struct vega10_hwmgr *data =
3825 (struct vega10_hwmgr *)(hwmgr->backend);
3826 int result = 0;
3827 uint32_t low_sclk_interrupt_threshold = 0;
3828
3829 if (PP_CAP(PHM_PlatformCaps_SclkThrottleLowNotification) &&
3830 (hwmgr->gfx_arbiter.sclk_threshold !=
3831 data->low_sclk_interrupt_threshold)) {
3832 data->low_sclk_interrupt_threshold =
3833 hwmgr->gfx_arbiter.sclk_threshold;
3834 low_sclk_interrupt_threshold =
3835 data->low_sclk_interrupt_threshold;
3836
3837 data->smc_state_table.pp_table.LowGfxclkInterruptThreshold =
3838 cpu_to_le32(low_sclk_interrupt_threshold);
3839
3840 /* This message will also enable SmcToHost Interrupt */
3841 result = smum_send_msg_to_smc_with_parameter(hwmgr,
3842 PPSMC_MSG_SetLowGfxclkInterruptThreshold,
3843 (uint32_t)low_sclk_interrupt_threshold);
3844 }
3845
3846 return result;
3847 }
3848
3849 static int vega10_set_power_state_tasks(struct pp_hwmgr *hwmgr,
3850 const void *input)
3851 {
3852 int tmp_result, result = 0;
3853 struct vega10_hwmgr *data =
3854 (struct vega10_hwmgr *)(hwmgr->backend);
3855 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
3856
3857 tmp_result = vega10_find_dpm_states_clocks_in_dpm_table(hwmgr, input);
3858 PP_ASSERT_WITH_CODE(!tmp_result,
3859 "Failed to find DPM states clocks in DPM table!",
3860 result = tmp_result);
3861
3862 tmp_result = vega10_populate_and_upload_sclk_mclk_dpm_levels(hwmgr, input);
3863 PP_ASSERT_WITH_CODE(!tmp_result,
3864 "Failed to populate and upload SCLK MCLK DPM levels!",
3865 result = tmp_result);
3866
3867 tmp_result = vega10_generate_dpm_level_enable_mask(hwmgr, input);
3868 PP_ASSERT_WITH_CODE(!tmp_result,
3869 "Failed to generate DPM level enabled mask!",
3870 result = tmp_result);
3871
3872 tmp_result = vega10_update_sclk_threshold(hwmgr);
3873 PP_ASSERT_WITH_CODE(!tmp_result,
3874 "Failed to update SCLK threshold!",
3875 result = tmp_result);
3876
3877 result = vega10_copy_table_to_smc(hwmgr,
3878 (uint8_t *)pp_table, PPTABLE);
3879 PP_ASSERT_WITH_CODE(!result,
3880 "Failed to upload PPtable!", return result);
3881
3882 data->apply_optimized_settings = false;
3883 data->apply_overdrive_next_settings_mask = 0;
3884
3885 return 0;
3886 }
3887
3888 static uint32_t vega10_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
3889 {
3890 struct pp_power_state *ps;
3891 struct vega10_power_state *vega10_ps;
3892
3893 if (hwmgr == NULL)
3894 return -EINVAL;
3895
3896 ps = hwmgr->request_ps;
3897
3898 if (ps == NULL)
3899 return -EINVAL;
3900
3901 vega10_ps = cast_phw_vega10_power_state(&ps->hardware);
3902
3903 if (low)
3904 return vega10_ps->performance_levels[0].gfx_clock;
3905 else
3906 return vega10_ps->performance_levels
3907 [vega10_ps->performance_level_count - 1].gfx_clock;
3908 }
3909
3910 static uint32_t vega10_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
3911 {
3912 struct pp_power_state *ps;
3913 struct vega10_power_state *vega10_ps;
3914
3915 if (hwmgr == NULL)
3916 return -EINVAL;
3917
3918 ps = hwmgr->request_ps;
3919
3920 if (ps == NULL)
3921 return -EINVAL;
3922
3923 vega10_ps = cast_phw_vega10_power_state(&ps->hardware);
3924
3925 if (low)
3926 return vega10_ps->performance_levels[0].mem_clock;
3927 else
3928 return vega10_ps->performance_levels
3929 [vega10_ps->performance_level_count-1].mem_clock;
3930 }
3931
3932 static int vega10_get_gpu_power(struct pp_hwmgr *hwmgr,
3933 struct pp_gpu_power *query)
3934 {
3935 uint32_t value;
3936
3937 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr,
3938 PPSMC_MSG_GetCurrPkgPwr),
3939 "Failed to get current package power!",
3940 return -EINVAL);
3941
3942 vega10_read_arg_from_smc(hwmgr, &value);
3943 /* power value is an integer */
3944 query->average_gpu_power = value << 8;
3945
3946 return 0;
3947 }
3948
3949 static int vega10_read_sensor(struct pp_hwmgr *hwmgr, int idx,
3950 void *value, int *size)
3951 {
3952 uint32_t sclk_idx, mclk_idx, activity_percent = 0;
3953 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
3954 struct vega10_dpm_table *dpm_table = &data->dpm_table;
3955 int ret = 0;
3956
3957 switch (idx) {
3958 case AMDGPU_PP_SENSOR_GFX_SCLK:
3959 ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentGfxclkIndex);
3960 if (!ret) {
3961 vega10_read_arg_from_smc(hwmgr, &sclk_idx);
3962 *((uint32_t *)value) = dpm_table->gfx_table.dpm_levels[sclk_idx].value;
3963 *size = 4;
3964 }
3965 break;
3966 case AMDGPU_PP_SENSOR_GFX_MCLK:
3967 ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentUclkIndex);
3968 if (!ret) {
3969 vega10_read_arg_from_smc(hwmgr, &mclk_idx);
3970 *((uint32_t *)value) = dpm_table->mem_table.dpm_levels[mclk_idx].value;
3971 *size = 4;
3972 }
3973 break;
3974 case AMDGPU_PP_SENSOR_GPU_LOAD:
3975 ret = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetAverageGfxActivity, 0);
3976 if (!ret) {
3977 vega10_read_arg_from_smc(hwmgr, &activity_percent);
3978 *((uint32_t *)value) = activity_percent > 100 ? 100 : activity_percent;
3979 *size = 4;
3980 }
3981 break;
3982 case AMDGPU_PP_SENSOR_GPU_TEMP:
3983 *((uint32_t *)value) = vega10_thermal_get_temperature(hwmgr);
3984 *size = 4;
3985 break;
3986 case AMDGPU_PP_SENSOR_UVD_POWER:
3987 *((uint32_t *)value) = data->uvd_power_gated ? 0 : 1;
3988 *size = 4;
3989 break;
3990 case AMDGPU_PP_SENSOR_VCE_POWER:
3991 *((uint32_t *)value) = data->vce_power_gated ? 0 : 1;
3992 *size = 4;
3993 break;
3994 case AMDGPU_PP_SENSOR_GPU_POWER:
3995 if (*size < sizeof(struct pp_gpu_power))
3996 ret = -EINVAL;
3997 else {
3998 *size = sizeof(struct pp_gpu_power);
3999 ret = vega10_get_gpu_power(hwmgr, (struct pp_gpu_power *)value);
4000 }
4001 break;
4002 default:
4003 ret = -EINVAL;
4004 break;
4005 }
4006 return ret;
4007 }
4008
4009 static int vega10_notify_smc_display_change(struct pp_hwmgr *hwmgr,
4010 bool has_disp)
4011 {
4012 return smum_send_msg_to_smc_with_parameter(hwmgr,
4013 PPSMC_MSG_SetUclkFastSwitch,
4014 has_disp ? 0 : 1);
4015 }
4016
4017 int vega10_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
4018 struct pp_display_clock_request *clock_req)
4019 {
4020 int result = 0;
4021 enum amd_pp_clock_type clk_type = clock_req->clock_type;
4022 uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000;
4023 DSPCLK_e clk_select = 0;
4024 uint32_t clk_request = 0;
4025
4026 switch (clk_type) {
4027 case amd_pp_dcef_clock:
4028 clk_select = DSPCLK_DCEFCLK;
4029 break;
4030 case amd_pp_disp_clock:
4031 clk_select = DSPCLK_DISPCLK;
4032 break;
4033 case amd_pp_pixel_clock:
4034 clk_select = DSPCLK_PIXCLK;
4035 break;
4036 case amd_pp_phy_clock:
4037 clk_select = DSPCLK_PHYCLK;
4038 break;
4039 default:
4040 pr_info("[DisplayClockVoltageRequest]Invalid Clock Type!");
4041 result = -1;
4042 break;
4043 }
4044
4045 if (!result) {
4046 clk_request = (clk_freq << 16) | clk_select;
4047 result = smum_send_msg_to_smc_with_parameter(hwmgr,
4048 PPSMC_MSG_RequestDisplayClockByFreq,
4049 clk_request);
4050 }
4051
4052 return result;
4053 }
4054
4055 static uint8_t vega10_get_uclk_index(struct pp_hwmgr *hwmgr,
4056 struct phm_ppt_v1_clock_voltage_dependency_table *mclk_table,
4057 uint32_t frequency)
4058 {
4059 uint8_t count;
4060 uint8_t i;
4061
4062 if (mclk_table == NULL || mclk_table->count == 0)
4063 return 0;
4064
4065 count = (uint8_t)(mclk_table->count);
4066
4067 for(i = 0; i < count; i++) {
4068 if(mclk_table->entries[i].clk >= frequency)
4069 return i;
4070 }
4071
4072 return i-1;
4073 }
4074
4075 static int vega10_notify_smc_display_config_after_ps_adjustment(
4076 struct pp_hwmgr *hwmgr)
4077 {
4078 struct vega10_hwmgr *data =
4079 (struct vega10_hwmgr *)(hwmgr->backend);
4080 struct vega10_single_dpm_table *dpm_table =
4081 &data->dpm_table.dcef_table;
4082 struct phm_ppt_v2_information *table_info =
4083 (struct phm_ppt_v2_information *)hwmgr->pptable;
4084 struct phm_ppt_v1_clock_voltage_dependency_table *mclk_table = table_info->vdd_dep_on_mclk;
4085 uint32_t idx;
4086 uint32_t num_active_disps = 0;
4087 struct cgs_display_info info = {0};
4088 struct PP_Clocks min_clocks = {0};
4089 uint32_t i;
4090 struct pp_display_clock_request clock_req;
4091
4092 info.mode_info = NULL;
4093
4094 cgs_get_active_displays_info(hwmgr->device, &info);
4095
4096 num_active_disps = info.display_count;
4097
4098 if (num_active_disps > 1)
4099 vega10_notify_smc_display_change(hwmgr, false);
4100 else
4101 vega10_notify_smc_display_change(hwmgr, true);
4102
4103 min_clocks.dcefClock = hwmgr->display_config.min_dcef_set_clk;
4104 min_clocks.dcefClockInSR = hwmgr->display_config.min_dcef_deep_sleep_set_clk;
4105 min_clocks.memoryClock = hwmgr->display_config.min_mem_set_clock;
4106
4107 for (i = 0; i < dpm_table->count; i++) {
4108 if (dpm_table->dpm_levels[i].value == min_clocks.dcefClock)
4109 break;
4110 }
4111
4112 if (i < dpm_table->count) {
4113 clock_req.clock_type = amd_pp_dcef_clock;
4114 clock_req.clock_freq_in_khz = dpm_table->dpm_levels[i].value;
4115 if (!vega10_display_clock_voltage_request(hwmgr, &clock_req)) {
4116 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter(
4117 hwmgr, PPSMC_MSG_SetMinDeepSleepDcefclk,
4118 min_clocks.dcefClockInSR /100),
4119 "Attempt to set divider for DCEFCLK Failed!",);
4120 } else {
4121 pr_info("Attempt to set Hard Min for DCEFCLK Failed!");
4122 }
4123 } else {
4124 pr_debug("Cannot find requested DCEFCLK!");
4125 }
4126
4127 if (min_clocks.memoryClock != 0) {
4128 idx = vega10_get_uclk_index(hwmgr, mclk_table, min_clocks.memoryClock);
4129 smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSoftMinUclkByIndex, idx);
4130 data->dpm_table.mem_table.dpm_state.soft_min_level= idx;
4131 }
4132
4133 return 0;
4134 }
4135
4136 static int vega10_force_dpm_highest(struct pp_hwmgr *hwmgr)
4137 {
4138 struct vega10_hwmgr *data =
4139 (struct vega10_hwmgr *)(hwmgr->backend);
4140
4141 data->smc_state_table.gfx_boot_level =
4142 data->smc_state_table.gfx_max_level =
4143 vega10_find_highest_dpm_level(&(data->dpm_table.gfx_table));
4144 data->smc_state_table.mem_boot_level =
4145 data->smc_state_table.mem_max_level =
4146 vega10_find_highest_dpm_level(&(data->dpm_table.mem_table));
4147
4148 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr),
4149 "Failed to upload boot level to highest!",
4150 return -1);
4151
4152 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr),
4153 "Failed to upload dpm max level to highest!",
4154 return -1);
4155
4156 return 0;
4157 }
4158
4159 static int vega10_force_dpm_lowest(struct pp_hwmgr *hwmgr)
4160 {
4161 struct vega10_hwmgr *data =
4162 (struct vega10_hwmgr *)(hwmgr->backend);
4163
4164 data->smc_state_table.gfx_boot_level =
4165 data->smc_state_table.gfx_max_level =
4166 vega10_find_lowest_dpm_level(&(data->dpm_table.gfx_table));
4167 data->smc_state_table.mem_boot_level =
4168 data->smc_state_table.mem_max_level =
4169 vega10_find_lowest_dpm_level(&(data->dpm_table.mem_table));
4170
4171 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr),
4172 "Failed to upload boot level to highest!",
4173 return -1);
4174
4175 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr),
4176 "Failed to upload dpm max level to highest!",
4177 return -1);
4178
4179 return 0;
4180
4181 }
4182
4183 static int vega10_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
4184 {
4185 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
4186
4187 data->smc_state_table.gfx_boot_level =
4188 vega10_find_lowest_dpm_level(&(data->dpm_table.gfx_table));
4189 data->smc_state_table.gfx_max_level =
4190 vega10_find_highest_dpm_level(&(data->dpm_table.gfx_table));
4191 data->smc_state_table.mem_boot_level =
4192 vega10_find_lowest_dpm_level(&(data->dpm_table.mem_table));
4193 data->smc_state_table.mem_max_level =
4194 vega10_find_highest_dpm_level(&(data->dpm_table.mem_table));
4195
4196 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr),
4197 "Failed to upload DPM Bootup Levels!",
4198 return -1);
4199
4200 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr),
4201 "Failed to upload DPM Max Levels!",
4202 return -1);
4203 return 0;
4204 }
4205
4206 static int vega10_get_profiling_clk_mask(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level,
4207 uint32_t *sclk_mask, uint32_t *mclk_mask, uint32_t *soc_mask)
4208 {
4209 struct phm_ppt_v2_information *table_info =
4210 (struct phm_ppt_v2_information *)(hwmgr->pptable);
4211
4212 if (table_info->vdd_dep_on_sclk->count > VEGA10_UMD_PSTATE_GFXCLK_LEVEL &&
4213 table_info->vdd_dep_on_socclk->count > VEGA10_UMD_PSTATE_SOCCLK_LEVEL &&
4214 table_info->vdd_dep_on_mclk->count > VEGA10_UMD_PSTATE_MCLK_LEVEL) {
4215 *sclk_mask = VEGA10_UMD_PSTATE_GFXCLK_LEVEL;
4216 *soc_mask = VEGA10_UMD_PSTATE_SOCCLK_LEVEL;
4217 *mclk_mask = VEGA10_UMD_PSTATE_MCLK_LEVEL;
4218 }
4219
4220 if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
4221 *sclk_mask = 0;
4222 } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) {
4223 *mclk_mask = 0;
4224 } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
4225 *sclk_mask = table_info->vdd_dep_on_sclk->count - 1;
4226 *soc_mask = table_info->vdd_dep_on_socclk->count - 1;
4227 *mclk_mask = table_info->vdd_dep_on_mclk->count - 1;
4228 }
4229 return 0;
4230 }
4231
4232 static void vega10_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
4233 {
4234 switch (mode) {
4235 case AMD_FAN_CTRL_NONE:
4236 vega10_fan_ctrl_set_fan_speed_percent(hwmgr, 100);
4237 break;
4238 case AMD_FAN_CTRL_MANUAL:
4239 if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl))
4240 vega10_fan_ctrl_stop_smc_fan_control(hwmgr);
4241 break;
4242 case AMD_FAN_CTRL_AUTO:
4243 if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl))
4244 vega10_fan_ctrl_start_smc_fan_control(hwmgr);
4245 break;
4246 default:
4247 break;
4248 }
4249 }
4250
4251 static int vega10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
4252 enum amd_dpm_forced_level level)
4253 {
4254 int ret = 0;
4255 uint32_t sclk_mask = 0;
4256 uint32_t mclk_mask = 0;
4257 uint32_t soc_mask = 0;
4258
4259 switch (level) {
4260 case AMD_DPM_FORCED_LEVEL_HIGH:
4261 ret = vega10_force_dpm_highest(hwmgr);
4262 break;
4263 case AMD_DPM_FORCED_LEVEL_LOW:
4264 ret = vega10_force_dpm_lowest(hwmgr);
4265 break;
4266 case AMD_DPM_FORCED_LEVEL_AUTO:
4267 ret = vega10_unforce_dpm_levels(hwmgr);
4268 break;
4269 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
4270 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
4271 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
4272 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
4273 ret = vega10_get_profiling_clk_mask(hwmgr, level, &sclk_mask, &mclk_mask, &soc_mask);
4274 if (ret)
4275 return ret;
4276 vega10_force_clock_level(hwmgr, PP_SCLK, 1<<sclk_mask);
4277 vega10_force_clock_level(hwmgr, PP_MCLK, 1<<mclk_mask);
4278 break;
4279 case AMD_DPM_FORCED_LEVEL_MANUAL:
4280 case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
4281 default:
4282 break;
4283 }
4284
4285 if (!ret) {
4286 if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
4287 vega10_set_fan_control_mode(hwmgr, AMD_FAN_CTRL_NONE);
4288 else if (level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
4289 vega10_set_fan_control_mode(hwmgr, AMD_FAN_CTRL_AUTO);
4290 }
4291 return ret;
4292 }
4293
4294 static uint32_t vega10_get_fan_control_mode(struct pp_hwmgr *hwmgr)
4295 {
4296 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
4297
4298 if (data->smu_features[GNLD_FAN_CONTROL].enabled == false)
4299 return AMD_FAN_CTRL_MANUAL;
4300 else
4301 return AMD_FAN_CTRL_AUTO;
4302 }
4303
4304 static int vega10_get_dal_power_level(struct pp_hwmgr *hwmgr,
4305 struct amd_pp_simple_clock_info *info)
4306 {
4307 struct phm_ppt_v2_information *table_info =
4308 (struct phm_ppt_v2_information *)hwmgr->pptable;
4309 struct phm_clock_and_voltage_limits *max_limits =
4310 &table_info->max_clock_voltage_on_ac;
4311
4312 info->engine_max_clock = max_limits->sclk;
4313 info->memory_max_clock = max_limits->mclk;
4314
4315 return 0;
4316 }
4317
4318 static void vega10_get_sclks(struct pp_hwmgr *hwmgr,
4319 struct pp_clock_levels_with_latency *clocks)
4320 {
4321 struct phm_ppt_v2_information *table_info =
4322 (struct phm_ppt_v2_information *)hwmgr->pptable;
4323 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table =
4324 table_info->vdd_dep_on_sclk;
4325 uint32_t i;
4326
4327 for (i = 0; i < dep_table->count; i++) {
4328 if (dep_table->entries[i].clk) {
4329 clocks->data[clocks->num_levels].clocks_in_khz =
4330 dep_table->entries[i].clk;
4331 clocks->num_levels++;
4332 }
4333 }
4334
4335 }
4336
4337 static uint32_t vega10_get_mem_latency(struct pp_hwmgr *hwmgr,
4338 uint32_t clock)
4339 {
4340 if (clock >= MEM_FREQ_LOW_LATENCY &&
4341 clock < MEM_FREQ_HIGH_LATENCY)
4342 return MEM_LATENCY_HIGH;
4343 else if (clock >= MEM_FREQ_HIGH_LATENCY)
4344 return MEM_LATENCY_LOW;
4345 else
4346 return MEM_LATENCY_ERR;
4347 }
4348
4349 static void vega10_get_memclocks(struct pp_hwmgr *hwmgr,
4350 struct pp_clock_levels_with_latency *clocks)
4351 {
4352 struct phm_ppt_v2_information *table_info =
4353 (struct phm_ppt_v2_information *)hwmgr->pptable;
4354 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table =
4355 table_info->vdd_dep_on_mclk;
4356 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
4357 uint32_t i;
4358
4359 clocks->num_levels = 0;
4360 data->mclk_latency_table.count = 0;
4361
4362 for (i = 0; i < dep_table->count; i++) {
4363 if (dep_table->entries[i].clk) {
4364 clocks->data[clocks->num_levels].clocks_in_khz =
4365 data->mclk_latency_table.entries
4366 [data->mclk_latency_table.count].frequency =
4367 dep_table->entries[i].clk;
4368 clocks->data[clocks->num_levels].latency_in_us =
4369 data->mclk_latency_table.entries
4370 [data->mclk_latency_table.count].latency =
4371 vega10_get_mem_latency(hwmgr,
4372 dep_table->entries[i].clk);
4373 clocks->num_levels++;
4374 data->mclk_latency_table.count++;
4375 }
4376 }
4377 }
4378
4379 static void vega10_get_dcefclocks(struct pp_hwmgr *hwmgr,
4380 struct pp_clock_levels_with_latency *clocks)
4381 {
4382 struct phm_ppt_v2_information *table_info =
4383 (struct phm_ppt_v2_information *)hwmgr->pptable;
4384 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table =
4385 table_info->vdd_dep_on_dcefclk;
4386 uint32_t i;
4387
4388 for (i = 0; i < dep_table->count; i++) {
4389 clocks->data[i].clocks_in_khz = dep_table->entries[i].clk;
4390 clocks->data[i].latency_in_us = 0;
4391 clocks->num_levels++;
4392 }
4393 }
4394
4395 static void vega10_get_socclocks(struct pp_hwmgr *hwmgr,
4396 struct pp_clock_levels_with_latency *clocks)
4397 {
4398 struct phm_ppt_v2_information *table_info =
4399 (struct phm_ppt_v2_information *)hwmgr->pptable;
4400 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table =
4401 table_info->vdd_dep_on_socclk;
4402 uint32_t i;
4403
4404 for (i = 0; i < dep_table->count; i++) {
4405 clocks->data[i].clocks_in_khz = dep_table->entries[i].clk;
4406 clocks->data[i].latency_in_us = 0;
4407 clocks->num_levels++;
4408 }
4409 }
4410
4411 static int vega10_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr,
4412 enum amd_pp_clock_type type,
4413 struct pp_clock_levels_with_latency *clocks)
4414 {
4415 switch (type) {
4416 case amd_pp_sys_clock:
4417 vega10_get_sclks(hwmgr, clocks);
4418 break;
4419 case amd_pp_mem_clock:
4420 vega10_get_memclocks(hwmgr, clocks);
4421 break;
4422 case amd_pp_dcef_clock:
4423 vega10_get_dcefclocks(hwmgr, clocks);
4424 break;
4425 case amd_pp_soc_clock:
4426 vega10_get_socclocks(hwmgr, clocks);
4427 break;
4428 default:
4429 return -1;
4430 }
4431
4432 return 0;
4433 }
4434
4435 static int vega10_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr,
4436 enum amd_pp_clock_type type,
4437 struct pp_clock_levels_with_voltage *clocks)
4438 {
4439 struct phm_ppt_v2_information *table_info =
4440 (struct phm_ppt_v2_information *)hwmgr->pptable;
4441 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table;
4442 uint32_t i;
4443
4444 switch (type) {
4445 case amd_pp_mem_clock:
4446 dep_table = table_info->vdd_dep_on_mclk;
4447 break;
4448 case amd_pp_dcef_clock:
4449 dep_table = table_info->vdd_dep_on_dcefclk;
4450 break;
4451 case amd_pp_disp_clock:
4452 dep_table = table_info->vdd_dep_on_dispclk;
4453 break;
4454 case amd_pp_pixel_clock:
4455 dep_table = table_info->vdd_dep_on_pixclk;
4456 break;
4457 case amd_pp_phy_clock:
4458 dep_table = table_info->vdd_dep_on_phyclk;
4459 break;
4460 default:
4461 return -1;
4462 }
4463
4464 for (i = 0; i < dep_table->count; i++) {
4465 clocks->data[i].clocks_in_khz = dep_table->entries[i].clk;
4466 clocks->data[i].voltage_in_mv = (uint32_t)(table_info->vddc_lookup_table->
4467 entries[dep_table->entries[i].vddInd].us_vdd);
4468 clocks->num_levels++;
4469 }
4470
4471 if (i < dep_table->count)
4472 return -1;
4473
4474 return 0;
4475 }
4476
4477 static int vega10_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
4478 struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges)
4479 {
4480 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
4481 Watermarks_t *table = &(data->smc_state_table.water_marks_table);
4482 int result = 0;
4483 uint32_t i;
4484
4485 if (!data->registry_data.disable_water_mark) {
4486 for (i = 0; i < wm_with_clock_ranges->num_wm_sets_dmif; i++) {
4487 table->WatermarkRow[WM_DCEFCLK][i].MinClock =
4488 cpu_to_le16((uint16_t)
4489 (wm_with_clock_ranges->wm_sets_dmif[i].wm_min_dcefclk_in_khz) /
4490 100);
4491 table->WatermarkRow[WM_DCEFCLK][i].MaxClock =
4492 cpu_to_le16((uint16_t)
4493 (wm_with_clock_ranges->wm_sets_dmif[i].wm_max_dcefclk_in_khz) /
4494 100);
4495 table->WatermarkRow[WM_DCEFCLK][i].MinUclk =
4496 cpu_to_le16((uint16_t)
4497 (wm_with_clock_ranges->wm_sets_dmif[i].wm_min_memclk_in_khz) /
4498 100);
4499 table->WatermarkRow[WM_DCEFCLK][i].MaxUclk =
4500 cpu_to_le16((uint16_t)
4501 (wm_with_clock_ranges->wm_sets_dmif[i].wm_max_memclk_in_khz) /
4502 100);
4503 table->WatermarkRow[WM_DCEFCLK][i].WmSetting = (uint8_t)
4504 wm_with_clock_ranges->wm_sets_dmif[i].wm_set_id;
4505 }
4506
4507 for (i = 0; i < wm_with_clock_ranges->num_wm_sets_mcif; i++) {
4508 table->WatermarkRow[WM_SOCCLK][i].MinClock =
4509 cpu_to_le16((uint16_t)
4510 (wm_with_clock_ranges->wm_sets_mcif[i].wm_min_socclk_in_khz) /
4511 100);
4512 table->WatermarkRow[WM_SOCCLK][i].MaxClock =
4513 cpu_to_le16((uint16_t)
4514 (wm_with_clock_ranges->wm_sets_mcif[i].wm_max_socclk_in_khz) /
4515 100);
4516 table->WatermarkRow[WM_SOCCLK][i].MinUclk =
4517 cpu_to_le16((uint16_t)
4518 (wm_with_clock_ranges->wm_sets_mcif[i].wm_min_memclk_in_khz) /
4519 100);
4520 table->WatermarkRow[WM_SOCCLK][i].MaxUclk =
4521 cpu_to_le16((uint16_t)
4522 (wm_with_clock_ranges->wm_sets_mcif[i].wm_max_memclk_in_khz) /
4523 100);
4524 table->WatermarkRow[WM_SOCCLK][i].WmSetting = (uint8_t)
4525 wm_with_clock_ranges->wm_sets_mcif[i].wm_set_id;
4526 }
4527 data->water_marks_bitmap = WaterMarksExist;
4528 }
4529
4530 return result;
4531 }
4532
4533 static int vega10_force_clock_level(struct pp_hwmgr *hwmgr,
4534 enum pp_clock_type type, uint32_t mask)
4535 {
4536 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
4537 int i;
4538
4539 if (hwmgr->request_dpm_level & (AMD_DPM_FORCED_LEVEL_AUTO |
4540 AMD_DPM_FORCED_LEVEL_LOW |
4541 AMD_DPM_FORCED_LEVEL_HIGH))
4542 return -EINVAL;
4543
4544 switch (type) {
4545 case PP_SCLK:
4546 for (i = 0; i < 32; i++) {
4547 if (mask & (1 << i))
4548 break;
4549 }
4550 data->smc_state_table.gfx_boot_level = i;
4551
4552 for (i = 31; i >= 0; i--) {
4553 if (mask & (1 << i))
4554 break;
4555 }
4556 data->smc_state_table.gfx_max_level = i;
4557
4558 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr),
4559 "Failed to upload boot level to lowest!",
4560 return -EINVAL);
4561
4562 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr),
4563 "Failed to upload dpm max level to highest!",
4564 return -EINVAL);
4565 break;
4566
4567 case PP_MCLK:
4568 for (i = 0; i < 32; i++) {
4569 if (mask & (1 << i))
4570 break;
4571 }
4572 data->smc_state_table.mem_boot_level = i;
4573
4574 for (i = 31; i >= 0; i--) {
4575 if (mask & (1 << i))
4576 break;
4577 }
4578 data->smc_state_table.mem_max_level = i;
4579
4580 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr),
4581 "Failed to upload boot level to lowest!",
4582 return -EINVAL);
4583
4584 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr),
4585 "Failed to upload dpm max level to highest!",
4586 return -EINVAL);
4587
4588 break;
4589
4590 case PP_PCIE:
4591 default:
4592 break;
4593 }
4594
4595 return 0;
4596 }
4597
4598 static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
4599 enum pp_clock_type type, char *buf)
4600 {
4601 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
4602 struct vega10_single_dpm_table *sclk_table = &(data->dpm_table.gfx_table);
4603 struct vega10_single_dpm_table *mclk_table = &(data->dpm_table.mem_table);
4604 struct vega10_pcie_table *pcie_table = &(data->dpm_table.pcie_table);
4605 int i, now, size = 0;
4606
4607 switch (type) {
4608 case PP_SCLK:
4609 if (data->registry_data.sclk_dpm_key_disabled)
4610 break;
4611
4612 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr,
4613 PPSMC_MSG_GetCurrentGfxclkIndex),
4614 "Attempt to get current sclk index Failed!",
4615 return -1);
4616 PP_ASSERT_WITH_CODE(!vega10_read_arg_from_smc(hwmgr,
4617 &now),
4618 "Attempt to read sclk index Failed!",
4619 return -1);
4620
4621 for (i = 0; i < sclk_table->count; i++)
4622 size += sprintf(buf + size, "%d: %uMhz %s\n",
4623 i, sclk_table->dpm_levels[i].value / 100,
4624 (i == now) ? "*" : "");
4625 break;
4626 case PP_MCLK:
4627 if (data->registry_data.mclk_dpm_key_disabled)
4628 break;
4629
4630 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr,
4631 PPSMC_MSG_GetCurrentUclkIndex),
4632 "Attempt to get current mclk index Failed!",
4633 return -1);
4634 PP_ASSERT_WITH_CODE(!vega10_read_arg_from_smc(hwmgr,
4635 &now),
4636 "Attempt to read mclk index Failed!",
4637 return -1);
4638
4639 for (i = 0; i < mclk_table->count; i++)
4640 size += sprintf(buf + size, "%d: %uMhz %s\n",
4641 i, mclk_table->dpm_levels[i].value / 100,
4642 (i == now) ? "*" : "");
4643 break;
4644 case PP_PCIE:
4645 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr,
4646 PPSMC_MSG_GetCurrentLinkIndex),
4647 "Attempt to get current mclk index Failed!",
4648 return -1);
4649 PP_ASSERT_WITH_CODE(!vega10_read_arg_from_smc(hwmgr,
4650 &now),
4651 "Attempt to read mclk index Failed!",
4652 return -1);
4653
4654 for (i = 0; i < pcie_table->count; i++)
4655 size += sprintf(buf + size, "%d: %s %s\n", i,
4656 (pcie_table->pcie_gen[i] == 0) ? "2.5GB, x1" :
4657 (pcie_table->pcie_gen[i] == 1) ? "5.0GB, x16" :
4658 (pcie_table->pcie_gen[i] == 2) ? "8.0GB, x16" : "",
4659 (i == now) ? "*" : "");
4660 break;
4661 default:
4662 break;
4663 }
4664 return size;
4665 }
4666
4667 static int vega10_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
4668 {
4669 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
4670 int result = 0;
4671 uint32_t num_turned_on_displays = 1;
4672 Watermarks_t *wm_table = &(data->smc_state_table.water_marks_table);
4673 struct cgs_display_info info = {0};
4674
4675 if ((data->water_marks_bitmap & WaterMarksExist) &&
4676 !(data->water_marks_bitmap & WaterMarksLoaded)) {
4677 result = vega10_copy_table_to_smc(hwmgr,
4678 (uint8_t *)wm_table, WMTABLE);
4679 PP_ASSERT_WITH_CODE(result, "Failed to update WMTABLE!", return EINVAL);
4680 data->water_marks_bitmap |= WaterMarksLoaded;
4681 }
4682
4683 if (data->water_marks_bitmap & WaterMarksLoaded) {
4684 cgs_get_active_displays_info(hwmgr->device, &info);
4685 num_turned_on_displays = info.display_count;
4686 smum_send_msg_to_smc_with_parameter(hwmgr,
4687 PPSMC_MSG_NumOfDisplays, num_turned_on_displays);
4688 }
4689
4690 return result;
4691 }
4692
4693 int vega10_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable)
4694 {
4695 struct vega10_hwmgr *data =
4696 (struct vega10_hwmgr *)(hwmgr->backend);
4697
4698 if (data->smu_features[GNLD_DPM_UVD].supported) {
4699 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
4700 enable,
4701 data->smu_features[GNLD_DPM_UVD].smu_feature_bitmap),
4702 "Attempt to Enable/Disable DPM UVD Failed!",
4703 return -1);
4704 data->smu_features[GNLD_DPM_UVD].enabled = enable;
4705 }
4706 return 0;
4707 }
4708
4709 static void vega10_power_gate_vce(struct pp_hwmgr *hwmgr, bool bgate)
4710 {
4711 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
4712
4713 data->vce_power_gated = bgate;
4714 vega10_enable_disable_vce_dpm(hwmgr, !bgate);
4715 }
4716
4717 static void vega10_power_gate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
4718 {
4719 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
4720
4721 data->uvd_power_gated = bgate;
4722 vega10_enable_disable_uvd_dpm(hwmgr, !bgate);
4723 }
4724
4725 static inline bool vega10_are_power_levels_equal(
4726 const struct vega10_performance_level *pl1,
4727 const struct vega10_performance_level *pl2)
4728 {
4729 return ((pl1->soc_clock == pl2->soc_clock) &&
4730 (pl1->gfx_clock == pl2->gfx_clock) &&
4731 (pl1->mem_clock == pl2->mem_clock));
4732 }
4733
4734 static int vega10_check_states_equal(struct pp_hwmgr *hwmgr,
4735 const struct pp_hw_power_state *pstate1,
4736 const struct pp_hw_power_state *pstate2, bool *equal)
4737 {
4738 const struct vega10_power_state *psa;
4739 const struct vega10_power_state *psb;
4740 int i;
4741
4742 if (pstate1 == NULL || pstate2 == NULL || equal == NULL)
4743 return -EINVAL;
4744
4745 psa = cast_const_phw_vega10_power_state(pstate1);
4746 psb = cast_const_phw_vega10_power_state(pstate2);
4747 /* If the two states don't even have the same number of performance levels they cannot be the same state. */
4748 if (psa->performance_level_count != psb->performance_level_count) {
4749 *equal = false;
4750 return 0;
4751 }
4752
4753 for (i = 0; i < psa->performance_level_count; i++) {
4754 if (!vega10_are_power_levels_equal(&(psa->performance_levels[i]), &(psb->performance_levels[i]))) {
4755 /* If we have found even one performance level pair that is different the states are different. */
4756 *equal = false;
4757 return 0;
4758 }
4759 }
4760
4761 /* If all performance levels are the same try to use the UVD clocks to break the tie.*/
4762 *equal = ((psa->uvd_clks.vclk == psb->uvd_clks.vclk) && (psa->uvd_clks.dclk == psb->uvd_clks.dclk));
4763 *equal &= ((psa->vce_clks.evclk == psb->vce_clks.evclk) && (psa->vce_clks.ecclk == psb->vce_clks.ecclk));
4764 *equal &= (psa->sclk_threshold == psb->sclk_threshold);
4765
4766 return 0;
4767 }
4768
4769 static bool
4770 vega10_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
4771 {
4772 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
4773 bool is_update_required = false;
4774 struct cgs_display_info info = {0, 0, NULL};
4775
4776 cgs_get_active_displays_info(hwmgr->device, &info);
4777
4778 if (data->display_timing.num_existing_displays != info.display_count)
4779 is_update_required = true;
4780
4781 if (PP_CAP(PHM_PlatformCaps_SclkDeepSleep)) {
4782 if (data->display_timing.min_clock_in_sr != hwmgr->display_config.min_core_set_clock_in_sr)
4783 is_update_required = true;
4784 }
4785
4786 return is_update_required;
4787 }
4788
4789 static int vega10_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
4790 {
4791 int tmp_result, result = 0;
4792
4793 tmp_result = (vega10_is_dpm_running(hwmgr)) ? 0 : -1;
4794 PP_ASSERT_WITH_CODE(tmp_result == 0,
4795 "DPM is not running right now, no need to disable DPM!",
4796 return 0);
4797
4798 if (PP_CAP(PHM_PlatformCaps_ThermalController))
4799 vega10_disable_thermal_protection(hwmgr);
4800
4801 tmp_result = vega10_disable_power_containment(hwmgr);
4802 PP_ASSERT_WITH_CODE((tmp_result == 0),
4803 "Failed to disable power containment!", result = tmp_result);
4804
4805 tmp_result = vega10_disable_didt_config(hwmgr);
4806 PP_ASSERT_WITH_CODE((tmp_result == 0),
4807 "Failed to disable didt config!", result = tmp_result);
4808
4809 tmp_result = vega10_avfs_enable(hwmgr, false);
4810 PP_ASSERT_WITH_CODE((tmp_result == 0),
4811 "Failed to disable AVFS!", result = tmp_result);
4812
4813 tmp_result = vega10_stop_dpm(hwmgr, SMC_DPM_FEATURES);
4814 PP_ASSERT_WITH_CODE((tmp_result == 0),
4815 "Failed to stop DPM!", result = tmp_result);
4816
4817 tmp_result = vega10_disable_deep_sleep_master_switch(hwmgr);
4818 PP_ASSERT_WITH_CODE((tmp_result == 0),
4819 "Failed to disable deep sleep!", result = tmp_result);
4820
4821 tmp_result = vega10_disable_ulv(hwmgr);
4822 PP_ASSERT_WITH_CODE((tmp_result == 0),
4823 "Failed to disable ulv!", result = tmp_result);
4824
4825 tmp_result = vega10_acg_disable(hwmgr);
4826 PP_ASSERT_WITH_CODE((tmp_result == 0),
4827 "Failed to disable acg!", result = tmp_result);
4828 return result;
4829 }
4830
4831 static int vega10_power_off_asic(struct pp_hwmgr *hwmgr)
4832 {
4833 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
4834 int result;
4835
4836 result = vega10_disable_dpm_tasks(hwmgr);
4837 PP_ASSERT_WITH_CODE((0 == result),
4838 "[disable_dpm_tasks] Failed to disable DPM!",
4839 );
4840 data->water_marks_bitmap &= ~(WaterMarksLoaded);
4841
4842 return result;
4843 }
4844
4845 static void vega10_find_min_clock_index(struct pp_hwmgr *hwmgr,
4846 uint32_t *sclk_idx, uint32_t *mclk_idx,
4847 uint32_t min_sclk, uint32_t min_mclk)
4848 {
4849 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
4850 struct vega10_dpm_table *dpm_table = &(data->dpm_table);
4851 uint32_t i;
4852
4853 for (i = 0; i < dpm_table->gfx_table.count; i++) {
4854 if (dpm_table->gfx_table.dpm_levels[i].enabled &&
4855 dpm_table->gfx_table.dpm_levels[i].value >= min_sclk) {
4856 *sclk_idx = i;
4857 break;
4858 }
4859 }
4860
4861 for (i = 0; i < dpm_table->mem_table.count; i++) {
4862 if (dpm_table->mem_table.dpm_levels[i].enabled &&
4863 dpm_table->mem_table.dpm_levels[i].value >= min_mclk) {
4864 *mclk_idx = i;
4865 break;
4866 }
4867 }
4868 }
4869
4870 static int vega10_set_power_profile_state(struct pp_hwmgr *hwmgr,
4871 struct amd_pp_profile *request)
4872 {
4873 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
4874 uint32_t sclk_idx = ~0, mclk_idx = ~0;
4875
4876 if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_AUTO)
4877 return -EINVAL;
4878
4879 vega10_find_min_clock_index(hwmgr, &sclk_idx, &mclk_idx,
4880 request->min_sclk, request->min_mclk);
4881
4882 if (sclk_idx != ~0) {
4883 if (!data->registry_data.sclk_dpm_key_disabled)
4884 PP_ASSERT_WITH_CODE(
4885 !smum_send_msg_to_smc_with_parameter(
4886 hwmgr,
4887 PPSMC_MSG_SetSoftMinGfxclkByIndex,
4888 sclk_idx),
4889 "Failed to set soft min sclk index!",
4890 return -EINVAL);
4891 }
4892
4893 if (mclk_idx != ~0) {
4894 if (!data->registry_data.mclk_dpm_key_disabled)
4895 PP_ASSERT_WITH_CODE(
4896 !smum_send_msg_to_smc_with_parameter(
4897 hwmgr,
4898 PPSMC_MSG_SetSoftMinUclkByIndex,
4899 mclk_idx),
4900 "Failed to set soft min mclk index!",
4901 return -EINVAL);
4902 }
4903
4904 return 0;
4905 }
4906
4907 static int vega10_get_sclk_od(struct pp_hwmgr *hwmgr)
4908 {
4909 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
4910 struct vega10_single_dpm_table *sclk_table = &(data->dpm_table.gfx_table);
4911 struct vega10_single_dpm_table *golden_sclk_table =
4912 &(data->golden_dpm_table.gfx_table);
4913 int value;
4914
4915 value = (sclk_table->dpm_levels[sclk_table->count - 1].value -
4916 golden_sclk_table->dpm_levels
4917 [golden_sclk_table->count - 1].value) *
4918 100 /
4919 golden_sclk_table->dpm_levels
4920 [golden_sclk_table->count - 1].value;
4921
4922 return value;
4923 }
4924
4925 static int vega10_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
4926 {
4927 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
4928 struct vega10_single_dpm_table *golden_sclk_table =
4929 &(data->golden_dpm_table.gfx_table);
4930 struct pp_power_state *ps;
4931 struct vega10_power_state *vega10_ps;
4932
4933 ps = hwmgr->request_ps;
4934
4935 if (ps == NULL)
4936 return -EINVAL;
4937
4938 vega10_ps = cast_phw_vega10_power_state(&ps->hardware);
4939
4940 vega10_ps->performance_levels
4941 [vega10_ps->performance_level_count - 1].gfx_clock =
4942 golden_sclk_table->dpm_levels
4943 [golden_sclk_table->count - 1].value *
4944 value / 100 +
4945 golden_sclk_table->dpm_levels
4946 [golden_sclk_table->count - 1].value;
4947
4948 if (vega10_ps->performance_levels
4949 [vega10_ps->performance_level_count - 1].gfx_clock >
4950 hwmgr->platform_descriptor.overdriveLimit.engineClock)
4951 vega10_ps->performance_levels
4952 [vega10_ps->performance_level_count - 1].gfx_clock =
4953 hwmgr->platform_descriptor.overdriveLimit.engineClock;
4954
4955 return 0;
4956 }
4957
4958 static int vega10_get_mclk_od(struct pp_hwmgr *hwmgr)
4959 {
4960 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
4961 struct vega10_single_dpm_table *mclk_table = &(data->dpm_table.mem_table);
4962 struct vega10_single_dpm_table *golden_mclk_table =
4963 &(data->golden_dpm_table.mem_table);
4964 int value;
4965
4966 value = (mclk_table->dpm_levels
4967 [mclk_table->count - 1].value -
4968 golden_mclk_table->dpm_levels
4969 [golden_mclk_table->count - 1].value) *
4970 100 /
4971 golden_mclk_table->dpm_levels
4972 [golden_mclk_table->count - 1].value;
4973
4974 return value;
4975 }
4976
4977 static int vega10_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
4978 {
4979 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
4980 struct vega10_single_dpm_table *golden_mclk_table =
4981 &(data->golden_dpm_table.mem_table);
4982 struct pp_power_state *ps;
4983 struct vega10_power_state *vega10_ps;
4984
4985 ps = hwmgr->request_ps;
4986
4987 if (ps == NULL)
4988 return -EINVAL;
4989
4990 vega10_ps = cast_phw_vega10_power_state(&ps->hardware);
4991
4992 vega10_ps->performance_levels
4993 [vega10_ps->performance_level_count - 1].mem_clock =
4994 golden_mclk_table->dpm_levels
4995 [golden_mclk_table->count - 1].value *
4996 value / 100 +
4997 golden_mclk_table->dpm_levels
4998 [golden_mclk_table->count - 1].value;
4999
5000 if (vega10_ps->performance_levels
5001 [vega10_ps->performance_level_count - 1].mem_clock >
5002 hwmgr->platform_descriptor.overdriveLimit.memoryClock)
5003 vega10_ps->performance_levels
5004 [vega10_ps->performance_level_count - 1].mem_clock =
5005 hwmgr->platform_descriptor.overdriveLimit.memoryClock;
5006
5007 return 0;
5008 }
5009
5010 static int vega10_notify_cac_buffer_info(struct pp_hwmgr *hwmgr,
5011 uint32_t virtual_addr_low,
5012 uint32_t virtual_addr_hi,
5013 uint32_t mc_addr_low,
5014 uint32_t mc_addr_hi,
5015 uint32_t size)
5016 {
5017 smum_send_msg_to_smc_with_parameter(hwmgr,
5018 PPSMC_MSG_SetSystemVirtualDramAddrHigh,
5019 virtual_addr_hi);
5020 smum_send_msg_to_smc_with_parameter(hwmgr,
5021 PPSMC_MSG_SetSystemVirtualDramAddrLow,
5022 virtual_addr_low);
5023 smum_send_msg_to_smc_with_parameter(hwmgr,
5024 PPSMC_MSG_DramLogSetDramAddrHigh,
5025 mc_addr_hi);
5026
5027 smum_send_msg_to_smc_with_parameter(hwmgr,
5028 PPSMC_MSG_DramLogSetDramAddrLow,
5029 mc_addr_low);
5030
5031 smum_send_msg_to_smc_with_parameter(hwmgr,
5032 PPSMC_MSG_DramLogSetDramSize,
5033 size);
5034 return 0;
5035 }
5036
5037 static int vega10_register_thermal_interrupt(struct pp_hwmgr *hwmgr,
5038 const void *info)
5039 {
5040 struct cgs_irq_src_funcs *irq_src =
5041 (struct cgs_irq_src_funcs *)info;
5042
5043 if (hwmgr->thermal_controller.ucType ==
5044 ATOM_VEGA10_PP_THERMALCONTROLLER_VEGA10 ||
5045 hwmgr->thermal_controller.ucType ==
5046 ATOM_VEGA10_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL) {
5047 PP_ASSERT_WITH_CODE(!cgs_add_irq_source(hwmgr->device,
5048 0xf, /* AMDGPU_IH_CLIENTID_THM */
5049 0, 0, irq_src[0].set, irq_src[0].handler, hwmgr),
5050 "Failed to register high thermal interrupt!",
5051 return -EINVAL);
5052 PP_ASSERT_WITH_CODE(!cgs_add_irq_source(hwmgr->device,
5053 0xf, /* AMDGPU_IH_CLIENTID_THM */
5054 1, 0, irq_src[1].set, irq_src[1].handler, hwmgr),
5055 "Failed to register low thermal interrupt!",
5056 return -EINVAL);
5057 }
5058
5059 /* Register CTF(GPIO_19) interrupt */
5060 PP_ASSERT_WITH_CODE(!cgs_add_irq_source(hwmgr->device,
5061 0x16, /* AMDGPU_IH_CLIENTID_ROM_SMUIO, */
5062 83, 0, irq_src[2].set, irq_src[2].handler, hwmgr),
5063 "Failed to register CTF thermal interrupt!",
5064 return -EINVAL);
5065
5066 return 0;
5067 }
5068
5069 static const struct pp_hwmgr_func vega10_hwmgr_funcs = {
5070 .backend_init = vega10_hwmgr_backend_init,
5071 .backend_fini = vega10_hwmgr_backend_fini,
5072 .asic_setup = vega10_setup_asic_task,
5073 .dynamic_state_management_enable = vega10_enable_dpm_tasks,
5074 .dynamic_state_management_disable = vega10_disable_dpm_tasks,
5075 .get_num_of_pp_table_entries =
5076 vega10_get_number_of_powerplay_table_entries,
5077 .get_power_state_size = vega10_get_power_state_size,
5078 .get_pp_table_entry = vega10_get_pp_table_entry,
5079 .patch_boot_state = vega10_patch_boot_state,
5080 .apply_state_adjust_rules = vega10_apply_state_adjust_rules,
5081 .power_state_set = vega10_set_power_state_tasks,
5082 .get_sclk = vega10_dpm_get_sclk,
5083 .get_mclk = vega10_dpm_get_mclk,
5084 .notify_smc_display_config_after_ps_adjustment =
5085 vega10_notify_smc_display_config_after_ps_adjustment,
5086 .force_dpm_level = vega10_dpm_force_dpm_level,
5087 .get_temperature = vega10_thermal_get_temperature,
5088 .stop_thermal_controller = vega10_thermal_stop_thermal_controller,
5089 .get_fan_speed_info = vega10_fan_ctrl_get_fan_speed_info,
5090 .get_fan_speed_percent = vega10_fan_ctrl_get_fan_speed_percent,
5091 .set_fan_speed_percent = vega10_fan_ctrl_set_fan_speed_percent,
5092 .reset_fan_speed_to_default =
5093 vega10_fan_ctrl_reset_fan_speed_to_default,
5094 .get_fan_speed_rpm = vega10_fan_ctrl_get_fan_speed_rpm,
5095 .set_fan_speed_rpm = vega10_fan_ctrl_set_fan_speed_rpm,
5096 .uninitialize_thermal_controller =
5097 vega10_thermal_ctrl_uninitialize_thermal_controller,
5098 .set_fan_control_mode = vega10_set_fan_control_mode,
5099 .get_fan_control_mode = vega10_get_fan_control_mode,
5100 .read_sensor = vega10_read_sensor,
5101 .get_dal_power_level = vega10_get_dal_power_level,
5102 .get_clock_by_type_with_latency = vega10_get_clock_by_type_with_latency,
5103 .get_clock_by_type_with_voltage = vega10_get_clock_by_type_with_voltage,
5104 .set_watermarks_for_clocks_ranges = vega10_set_watermarks_for_clocks_ranges,
5105 .display_clock_voltage_request = vega10_display_clock_voltage_request,
5106 .force_clock_level = vega10_force_clock_level,
5107 .print_clock_levels = vega10_print_clock_levels,
5108 .display_config_changed = vega10_display_configuration_changed_task,
5109 .powergate_uvd = vega10_power_gate_uvd,
5110 .powergate_vce = vega10_power_gate_vce,
5111 .check_states_equal = vega10_check_states_equal,
5112 .check_smc_update_required_for_display_configuration =
5113 vega10_check_smc_update_required_for_display_configuration,
5114 .power_off_asic = vega10_power_off_asic,
5115 .disable_smc_firmware_ctf = vega10_thermal_disable_alert,
5116 .set_power_profile_state = vega10_set_power_profile_state,
5117 .get_sclk_od = vega10_get_sclk_od,
5118 .set_sclk_od = vega10_set_sclk_od,
5119 .get_mclk_od = vega10_get_mclk_od,
5120 .set_mclk_od = vega10_set_mclk_od,
5121 .avfs_control = vega10_avfs_enable,
5122 .notify_cac_buffer_info = vega10_notify_cac_buffer_info,
5123 .register_internal_thermal_interrupt = vega10_register_thermal_interrupt,
5124 .start_thermal_controller = vega10_start_thermal_controller,
5125 };
5126
5127 int vega10_hwmgr_init(struct pp_hwmgr *hwmgr)
5128 {
5129 hwmgr->hwmgr_func = &vega10_hwmgr_funcs;
5130 hwmgr->pptable_func = &vega10_pptable_funcs;
5131
5132 return 0;
5133 }