]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blob - drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
Merge tag 'sh-pfc-for-v5.1-tag2' of git://git.kernel.org/pub/scm/linux/kernel/git...
[mirror_ubuntu-focal-kernel.git] / drivers / gpu / drm / amd / powerplay / hwmgr / smu10_hwmgr.c
1 /*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23 #include "pp_debug.h"
24 #include <linux/types.h>
25 #include <linux/kernel.h>
26 #include <linux/slab.h>
27 #include "atom-types.h"
28 #include "atombios.h"
29 #include "processpptables.h"
30 #include "cgs_common.h"
31 #include "smumgr.h"
32 #include "hwmgr.h"
33 #include "hardwaremanager.h"
34 #include "rv_ppsmc.h"
35 #include "smu10_hwmgr.h"
36 #include "power_state.h"
37 #include "soc15_common.h"
38
39 #define SMU10_MAX_DEEPSLEEP_DIVIDER_ID 5
40 #define SMU10_MINIMUM_ENGINE_CLOCK 800 /* 8Mhz, the low boundary of engine clock allowed on this chip */
41 #define SCLK_MIN_DIV_INTV_SHIFT 12
42 #define SMU10_DISPCLK_BYPASS_THRESHOLD 10000 /* 100Mhz */
43 #define SMC_RAM_END 0x40000
44
45 #define mmPWR_MISC_CNTL_STATUS 0x0183
46 #define mmPWR_MISC_CNTL_STATUS_BASE_IDX 0
47 #define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN__SHIFT 0x0
48 #define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS__SHIFT 0x1
49 #define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK 0x00000001L
50 #define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS_MASK 0x00000006L
51
52 static const unsigned long SMU10_Magic = (unsigned long) PHM_Rv_Magic;
53
54
55 static int smu10_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
56 struct pp_display_clock_request *clock_req)
57 {
58 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
59 enum amd_pp_clock_type clk_type = clock_req->clock_type;
60 uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000;
61 PPSMC_Msg msg;
62
63 switch (clk_type) {
64 case amd_pp_dcf_clock:
65 if (clk_freq == smu10_data->dcf_actual_hard_min_freq)
66 return 0;
67 msg = PPSMC_MSG_SetHardMinDcefclkByFreq;
68 smu10_data->dcf_actual_hard_min_freq = clk_freq;
69 break;
70 case amd_pp_soc_clock:
71 msg = PPSMC_MSG_SetHardMinSocclkByFreq;
72 break;
73 case amd_pp_f_clock:
74 if (clk_freq == smu10_data->f_actual_hard_min_freq)
75 return 0;
76 smu10_data->f_actual_hard_min_freq = clk_freq;
77 msg = PPSMC_MSG_SetHardMinFclkByFreq;
78 break;
79 default:
80 pr_info("[DisplayClockVoltageRequest]Invalid Clock Type!");
81 return -EINVAL;
82 }
83 smum_send_msg_to_smc_with_parameter(hwmgr, msg, clk_freq);
84
85 return 0;
86 }
87
88 static struct smu10_power_state *cast_smu10_ps(struct pp_hw_power_state *hw_ps)
89 {
90 if (SMU10_Magic != hw_ps->magic)
91 return NULL;
92
93 return (struct smu10_power_state *)hw_ps;
94 }
95
96 static const struct smu10_power_state *cast_const_smu10_ps(
97 const struct pp_hw_power_state *hw_ps)
98 {
99 if (SMU10_Magic != hw_ps->magic)
100 return NULL;
101
102 return (struct smu10_power_state *)hw_ps;
103 }
104
105 static int smu10_initialize_dpm_defaults(struct pp_hwmgr *hwmgr)
106 {
107 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
108
109 smu10_data->dce_slow_sclk_threshold = 30000;
110 smu10_data->thermal_auto_throttling_treshold = 0;
111 smu10_data->is_nb_dpm_enabled = 1;
112 smu10_data->dpm_flags = 1;
113 smu10_data->need_min_deep_sleep_dcefclk = true;
114 smu10_data->num_active_display = 0;
115 smu10_data->deep_sleep_dcefclk = 0;
116
117 if (hwmgr->feature_mask & PP_GFXOFF_MASK)
118 smu10_data->gfx_off_controled_by_driver = true;
119 else
120 smu10_data->gfx_off_controled_by_driver = false;
121
122 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
123 PHM_PlatformCaps_SclkDeepSleep);
124
125 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
126 PHM_PlatformCaps_SclkThrottleLowNotification);
127
128 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
129 PHM_PlatformCaps_PowerPlaySupport);
130 return 0;
131 }
132
133 static int smu10_construct_max_power_limits_table(struct pp_hwmgr *hwmgr,
134 struct phm_clock_and_voltage_limits *table)
135 {
136 return 0;
137 }
138
139 static int smu10_init_dynamic_state_adjustment_rule_settings(
140 struct pp_hwmgr *hwmgr)
141 {
142 uint32_t table_size =
143 sizeof(struct phm_clock_voltage_dependency_table) +
144 (7 * sizeof(struct phm_clock_voltage_dependency_record));
145
146 struct phm_clock_voltage_dependency_table *table_clk_vlt =
147 kzalloc(table_size, GFP_KERNEL);
148
149 if (NULL == table_clk_vlt) {
150 pr_err("Can not allocate memory!\n");
151 return -ENOMEM;
152 }
153
154 table_clk_vlt->count = 8;
155 table_clk_vlt->entries[0].clk = PP_DAL_POWERLEVEL_0;
156 table_clk_vlt->entries[0].v = 0;
157 table_clk_vlt->entries[1].clk = PP_DAL_POWERLEVEL_1;
158 table_clk_vlt->entries[1].v = 1;
159 table_clk_vlt->entries[2].clk = PP_DAL_POWERLEVEL_2;
160 table_clk_vlt->entries[2].v = 2;
161 table_clk_vlt->entries[3].clk = PP_DAL_POWERLEVEL_3;
162 table_clk_vlt->entries[3].v = 3;
163 table_clk_vlt->entries[4].clk = PP_DAL_POWERLEVEL_4;
164 table_clk_vlt->entries[4].v = 4;
165 table_clk_vlt->entries[5].clk = PP_DAL_POWERLEVEL_5;
166 table_clk_vlt->entries[5].v = 5;
167 table_clk_vlt->entries[6].clk = PP_DAL_POWERLEVEL_6;
168 table_clk_vlt->entries[6].v = 6;
169 table_clk_vlt->entries[7].clk = PP_DAL_POWERLEVEL_7;
170 table_clk_vlt->entries[7].v = 7;
171 hwmgr->dyn_state.vddc_dep_on_dal_pwrl = table_clk_vlt;
172
173 return 0;
174 }
175
176 static int smu10_get_system_info_data(struct pp_hwmgr *hwmgr)
177 {
178 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)hwmgr->backend;
179
180 smu10_data->sys_info.htc_hyst_lmt = 5;
181 smu10_data->sys_info.htc_tmp_lmt = 203;
182
183 if (smu10_data->thermal_auto_throttling_treshold == 0)
184 smu10_data->thermal_auto_throttling_treshold = 203;
185
186 smu10_construct_max_power_limits_table (hwmgr,
187 &hwmgr->dyn_state.max_clock_voltage_on_ac);
188
189 smu10_init_dynamic_state_adjustment_rule_settings(hwmgr);
190
191 return 0;
192 }
193
194 static int smu10_construct_boot_state(struct pp_hwmgr *hwmgr)
195 {
196 return 0;
197 }
198
199 static int smu10_set_clock_limit(struct pp_hwmgr *hwmgr, const void *input)
200 {
201 struct PP_Clocks clocks = {0};
202 struct pp_display_clock_request clock_req;
203
204 clocks.dcefClock = hwmgr->display_config->min_dcef_set_clk;
205 clock_req.clock_type = amd_pp_dcf_clock;
206 clock_req.clock_freq_in_khz = clocks.dcefClock * 10;
207
208 PP_ASSERT_WITH_CODE(!smu10_display_clock_voltage_request(hwmgr, &clock_req),
209 "Attempt to set DCF Clock Failed!", return -EINVAL);
210
211 return 0;
212 }
213
214 static inline uint32_t convert_10k_to_mhz(uint32_t clock)
215 {
216 return (clock + 99) / 100;
217 }
218
219 static int smu10_set_min_deep_sleep_dcefclk(struct pp_hwmgr *hwmgr, uint32_t clock)
220 {
221 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
222
223 if (smu10_data->need_min_deep_sleep_dcefclk &&
224 smu10_data->deep_sleep_dcefclk != convert_10k_to_mhz(clock)) {
225 smu10_data->deep_sleep_dcefclk = convert_10k_to_mhz(clock);
226 smum_send_msg_to_smc_with_parameter(hwmgr,
227 PPSMC_MSG_SetMinDeepSleepDcefclk,
228 smu10_data->deep_sleep_dcefclk);
229 }
230 return 0;
231 }
232
233 static int smu10_set_hard_min_dcefclk_by_freq(struct pp_hwmgr *hwmgr, uint32_t clock)
234 {
235 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
236
237 if (smu10_data->dcf_actual_hard_min_freq &&
238 smu10_data->dcf_actual_hard_min_freq != convert_10k_to_mhz(clock)) {
239 smu10_data->dcf_actual_hard_min_freq = convert_10k_to_mhz(clock);
240 smum_send_msg_to_smc_with_parameter(hwmgr,
241 PPSMC_MSG_SetHardMinDcefclkByFreq,
242 smu10_data->dcf_actual_hard_min_freq);
243 }
244 return 0;
245 }
246
247 static int smu10_set_hard_min_fclk_by_freq(struct pp_hwmgr *hwmgr, uint32_t clock)
248 {
249 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
250
251 if (smu10_data->f_actual_hard_min_freq &&
252 smu10_data->f_actual_hard_min_freq != convert_10k_to_mhz(clock)) {
253 smu10_data->f_actual_hard_min_freq = convert_10k_to_mhz(clock);
254 smum_send_msg_to_smc_with_parameter(hwmgr,
255 PPSMC_MSG_SetHardMinFclkByFreq,
256 smu10_data->f_actual_hard_min_freq);
257 }
258 return 0;
259 }
260
261 static int smu10_set_active_display_count(struct pp_hwmgr *hwmgr, uint32_t count)
262 {
263 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
264
265 if (smu10_data->num_active_display != count) {
266 smu10_data->num_active_display = count;
267 smum_send_msg_to_smc_with_parameter(hwmgr,
268 PPSMC_MSG_SetDisplayCount,
269 smu10_data->num_active_display);
270 }
271
272 return 0;
273 }
274
275 static int smu10_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input)
276 {
277 return smu10_set_clock_limit(hwmgr, input);
278 }
279
280 static int smu10_init_power_gate_state(struct pp_hwmgr *hwmgr)
281 {
282 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
283 struct amdgpu_device *adev = hwmgr->adev;
284
285 smu10_data->vcn_power_gated = true;
286 smu10_data->isp_tileA_power_gated = true;
287 smu10_data->isp_tileB_power_gated = true;
288
289 if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)
290 return smum_send_msg_to_smc_with_parameter(hwmgr,
291 PPSMC_MSG_SetGfxCGPG,
292 true);
293 else
294 return 0;
295 }
296
297
298 static int smu10_setup_asic_task(struct pp_hwmgr *hwmgr)
299 {
300 return smu10_init_power_gate_state(hwmgr);
301 }
302
303 static int smu10_reset_cc6_data(struct pp_hwmgr *hwmgr)
304 {
305 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
306
307 smu10_data->separation_time = 0;
308 smu10_data->cc6_disable = false;
309 smu10_data->pstate_disable = false;
310 smu10_data->cc6_setting_changed = false;
311
312 return 0;
313 }
314
315 static int smu10_power_off_asic(struct pp_hwmgr *hwmgr)
316 {
317 return smu10_reset_cc6_data(hwmgr);
318 }
319
320 static bool smu10_is_gfx_on(struct pp_hwmgr *hwmgr)
321 {
322 uint32_t reg;
323 struct amdgpu_device *adev = hwmgr->adev;
324
325 reg = RREG32_SOC15(PWR, 0, mmPWR_MISC_CNTL_STATUS);
326 if ((reg & PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS_MASK) ==
327 (0x2 << PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS__SHIFT))
328 return true;
329
330 return false;
331 }
332
333 static int smu10_disable_gfx_off(struct pp_hwmgr *hwmgr)
334 {
335 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
336
337 if (smu10_data->gfx_off_controled_by_driver) {
338 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableGfxOff);
339
340 /* confirm gfx is back to "on" state */
341 while (!smu10_is_gfx_on(hwmgr))
342 msleep(1);
343 }
344
345 return 0;
346 }
347
348 static int smu10_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
349 {
350 return 0;
351 }
352
353 static int smu10_enable_gfx_off(struct pp_hwmgr *hwmgr)
354 {
355 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
356
357 if (smu10_data->gfx_off_controled_by_driver)
358 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableGfxOff);
359
360 return 0;
361 }
362
363 static int smu10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
364 {
365 return 0;
366 }
367
368 static int smu10_gfx_off_control(struct pp_hwmgr *hwmgr, bool enable)
369 {
370 if (enable)
371 return smu10_enable_gfx_off(hwmgr);
372 else
373 return smu10_disable_gfx_off(hwmgr);
374 }
375
376 static int smu10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
377 struct pp_power_state *prequest_ps,
378 const struct pp_power_state *pcurrent_ps)
379 {
380 return 0;
381 }
382
383 /* temporary hardcoded clock voltage breakdown tables */
384 static const DpmClock_t VddDcfClk[]= {
385 { 300, 2600},
386 { 600, 3200},
387 { 600, 3600},
388 };
389
390 static const DpmClock_t VddSocClk[]= {
391 { 478, 2600},
392 { 722, 3200},
393 { 722, 3600},
394 };
395
396 static const DpmClock_t VddFClk[]= {
397 { 400, 2600},
398 {1200, 3200},
399 {1200, 3600},
400 };
401
402 static const DpmClock_t VddDispClk[]= {
403 { 435, 2600},
404 { 661, 3200},
405 {1086, 3600},
406 };
407
408 static const DpmClock_t VddDppClk[]= {
409 { 435, 2600},
410 { 661, 3200},
411 { 661, 3600},
412 };
413
414 static const DpmClock_t VddPhyClk[]= {
415 { 540, 2600},
416 { 810, 3200},
417 { 810, 3600},
418 };
419
420 static int smu10_get_clock_voltage_dependency_table(struct pp_hwmgr *hwmgr,
421 struct smu10_voltage_dependency_table **pptable,
422 uint32_t num_entry, const DpmClock_t *pclk_dependency_table)
423 {
424 uint32_t table_size, i;
425 struct smu10_voltage_dependency_table *ptable;
426
427 table_size = sizeof(uint32_t) + sizeof(struct smu10_voltage_dependency_table) * num_entry;
428 ptable = kzalloc(table_size, GFP_KERNEL);
429
430 if (NULL == ptable)
431 return -ENOMEM;
432
433 ptable->count = num_entry;
434
435 for (i = 0; i < ptable->count; i++) {
436 ptable->entries[i].clk = pclk_dependency_table->Freq * 100;
437 ptable->entries[i].vol = pclk_dependency_table->Vol;
438 pclk_dependency_table++;
439 }
440
441 *pptable = ptable;
442
443 return 0;
444 }
445
446
447 static int smu10_populate_clock_table(struct pp_hwmgr *hwmgr)
448 {
449 uint32_t result;
450
451 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
452 DpmClocks_t *table = &(smu10_data->clock_table);
453 struct smu10_clock_voltage_information *pinfo = &(smu10_data->clock_vol_info);
454
455 result = smum_smc_table_manager(hwmgr, (uint8_t *)table, SMU10_CLOCKTABLE, true);
456
457 PP_ASSERT_WITH_CODE((0 == result),
458 "Attempt to copy clock table from smc failed",
459 return result);
460
461 if (0 == result && table->DcefClocks[0].Freq != 0) {
462 smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_dcefclk,
463 NUM_DCEFCLK_DPM_LEVELS,
464 &smu10_data->clock_table.DcefClocks[0]);
465 smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_socclk,
466 NUM_SOCCLK_DPM_LEVELS,
467 &smu10_data->clock_table.SocClocks[0]);
468 smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_fclk,
469 NUM_FCLK_DPM_LEVELS,
470 &smu10_data->clock_table.FClocks[0]);
471 smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_mclk,
472 NUM_MEMCLK_DPM_LEVELS,
473 &smu10_data->clock_table.MemClocks[0]);
474 } else {
475 smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_dcefclk,
476 ARRAY_SIZE(VddDcfClk),
477 &VddDcfClk[0]);
478 smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_socclk,
479 ARRAY_SIZE(VddSocClk),
480 &VddSocClk[0]);
481 smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_fclk,
482 ARRAY_SIZE(VddFClk),
483 &VddFClk[0]);
484 }
485 smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_dispclk,
486 ARRAY_SIZE(VddDispClk),
487 &VddDispClk[0]);
488 smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_dppclk,
489 ARRAY_SIZE(VddDppClk), &VddDppClk[0]);
490 smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_phyclk,
491 ARRAY_SIZE(VddPhyClk), &VddPhyClk[0]);
492
493 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency);
494 result = smum_get_argument(hwmgr);
495 smu10_data->gfx_min_freq_limit = result / 10 * 1000;
496
497 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency);
498 result = smum_get_argument(hwmgr);
499 smu10_data->gfx_max_freq_limit = result / 10 * 1000;
500
501 return 0;
502 }
503
504 static int smu10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
505 {
506 int result = 0;
507 struct smu10_hwmgr *data;
508
509 data = kzalloc(sizeof(struct smu10_hwmgr), GFP_KERNEL);
510 if (data == NULL)
511 return -ENOMEM;
512
513 hwmgr->backend = data;
514
515 result = smu10_initialize_dpm_defaults(hwmgr);
516 if (result != 0) {
517 pr_err("smu10_initialize_dpm_defaults failed\n");
518 return result;
519 }
520
521 smu10_populate_clock_table(hwmgr);
522
523 result = smu10_get_system_info_data(hwmgr);
524 if (result != 0) {
525 pr_err("smu10_get_system_info_data failed\n");
526 return result;
527 }
528
529 smu10_construct_boot_state(hwmgr);
530
531 hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
532 SMU10_MAX_HARDWARE_POWERLEVELS;
533
534 hwmgr->platform_descriptor.hardwarePerformanceLevels =
535 SMU10_MAX_HARDWARE_POWERLEVELS;
536
537 hwmgr->platform_descriptor.vbiosInterruptId = 0;
538
539 hwmgr->platform_descriptor.clockStep.engineClock = 500;
540
541 hwmgr->platform_descriptor.clockStep.memoryClock = 500;
542
543 hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50;
544
545 hwmgr->pstate_sclk = SMU10_UMD_PSTATE_GFXCLK * 100;
546 hwmgr->pstate_mclk = SMU10_UMD_PSTATE_FCLK * 100;
547
548 return result;
549 }
550
551 static int smu10_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
552 {
553 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
554 struct smu10_clock_voltage_information *pinfo = &(smu10_data->clock_vol_info);
555
556 kfree(pinfo->vdd_dep_on_dcefclk);
557 pinfo->vdd_dep_on_dcefclk = NULL;
558 kfree(pinfo->vdd_dep_on_socclk);
559 pinfo->vdd_dep_on_socclk = NULL;
560 kfree(pinfo->vdd_dep_on_fclk);
561 pinfo->vdd_dep_on_fclk = NULL;
562 kfree(pinfo->vdd_dep_on_dispclk);
563 pinfo->vdd_dep_on_dispclk = NULL;
564 kfree(pinfo->vdd_dep_on_dppclk);
565 pinfo->vdd_dep_on_dppclk = NULL;
566 kfree(pinfo->vdd_dep_on_phyclk);
567 pinfo->vdd_dep_on_phyclk = NULL;
568
569 kfree(hwmgr->dyn_state.vddc_dep_on_dal_pwrl);
570 hwmgr->dyn_state.vddc_dep_on_dal_pwrl = NULL;
571
572 kfree(hwmgr->backend);
573 hwmgr->backend = NULL;
574
575 return 0;
576 }
577
578 static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
579 enum amd_dpm_forced_level level)
580 {
581 struct smu10_hwmgr *data = hwmgr->backend;
582 struct amdgpu_device *adev = hwmgr->adev;
583 uint32_t min_sclk = hwmgr->display_config->min_core_set_clock;
584 uint32_t min_mclk = hwmgr->display_config->min_mem_set_clock/100;
585
586 if (hwmgr->smu_version < 0x1E3700) {
587 pr_info("smu firmware version too old, can not set dpm level\n");
588 return 0;
589 }
590
591 /* Disable UMDPSTATE support on rv2 temporarily */
592 if ((adev->asic_type == CHIP_RAVEN) &&
593 (adev->rev_id >= 8))
594 return 0;
595
596 if (min_sclk < data->gfx_min_freq_limit)
597 min_sclk = data->gfx_min_freq_limit;
598
599 min_sclk /= 100; /* transfer 10KHz to MHz */
600 if (min_mclk < data->clock_table.FClocks[0].Freq)
601 min_mclk = data->clock_table.FClocks[0].Freq;
602
603 switch (level) {
604 case AMD_DPM_FORCED_LEVEL_HIGH:
605 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
606 smum_send_msg_to_smc_with_parameter(hwmgr,
607 PPSMC_MSG_SetHardMinGfxClk,
608 data->gfx_max_freq_limit/100);
609 smum_send_msg_to_smc_with_parameter(hwmgr,
610 PPSMC_MSG_SetHardMinFclkByFreq,
611 SMU10_UMD_PSTATE_PEAK_FCLK);
612 smum_send_msg_to_smc_with_parameter(hwmgr,
613 PPSMC_MSG_SetHardMinSocclkByFreq,
614 SMU10_UMD_PSTATE_PEAK_SOCCLK);
615 smum_send_msg_to_smc_with_parameter(hwmgr,
616 PPSMC_MSG_SetHardMinVcn,
617 SMU10_UMD_PSTATE_VCE);
618
619 smum_send_msg_to_smc_with_parameter(hwmgr,
620 PPSMC_MSG_SetSoftMaxGfxClk,
621 data->gfx_max_freq_limit/100);
622 smum_send_msg_to_smc_with_parameter(hwmgr,
623 PPSMC_MSG_SetSoftMaxFclkByFreq,
624 SMU10_UMD_PSTATE_PEAK_FCLK);
625 smum_send_msg_to_smc_with_parameter(hwmgr,
626 PPSMC_MSG_SetSoftMaxSocclkByFreq,
627 SMU10_UMD_PSTATE_PEAK_SOCCLK);
628 smum_send_msg_to_smc_with_parameter(hwmgr,
629 PPSMC_MSG_SetSoftMaxVcn,
630 SMU10_UMD_PSTATE_VCE);
631 break;
632 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
633 smum_send_msg_to_smc_with_parameter(hwmgr,
634 PPSMC_MSG_SetHardMinGfxClk,
635 min_sclk);
636 smum_send_msg_to_smc_with_parameter(hwmgr,
637 PPSMC_MSG_SetSoftMaxGfxClk,
638 min_sclk);
639 break;
640 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
641 smum_send_msg_to_smc_with_parameter(hwmgr,
642 PPSMC_MSG_SetHardMinFclkByFreq,
643 min_mclk);
644 smum_send_msg_to_smc_with_parameter(hwmgr,
645 PPSMC_MSG_SetSoftMaxFclkByFreq,
646 min_mclk);
647 break;
648 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
649 smum_send_msg_to_smc_with_parameter(hwmgr,
650 PPSMC_MSG_SetHardMinGfxClk,
651 SMU10_UMD_PSTATE_GFXCLK);
652 smum_send_msg_to_smc_with_parameter(hwmgr,
653 PPSMC_MSG_SetHardMinFclkByFreq,
654 SMU10_UMD_PSTATE_FCLK);
655 smum_send_msg_to_smc_with_parameter(hwmgr,
656 PPSMC_MSG_SetHardMinSocclkByFreq,
657 SMU10_UMD_PSTATE_SOCCLK);
658 smum_send_msg_to_smc_with_parameter(hwmgr,
659 PPSMC_MSG_SetHardMinVcn,
660 SMU10_UMD_PSTATE_VCE);
661
662 smum_send_msg_to_smc_with_parameter(hwmgr,
663 PPSMC_MSG_SetSoftMaxGfxClk,
664 SMU10_UMD_PSTATE_GFXCLK);
665 smum_send_msg_to_smc_with_parameter(hwmgr,
666 PPSMC_MSG_SetSoftMaxFclkByFreq,
667 SMU10_UMD_PSTATE_FCLK);
668 smum_send_msg_to_smc_with_parameter(hwmgr,
669 PPSMC_MSG_SetSoftMaxSocclkByFreq,
670 SMU10_UMD_PSTATE_SOCCLK);
671 smum_send_msg_to_smc_with_parameter(hwmgr,
672 PPSMC_MSG_SetSoftMaxVcn,
673 SMU10_UMD_PSTATE_VCE);
674 break;
675 case AMD_DPM_FORCED_LEVEL_AUTO:
676 smum_send_msg_to_smc_with_parameter(hwmgr,
677 PPSMC_MSG_SetHardMinGfxClk,
678 min_sclk);
679 smum_send_msg_to_smc_with_parameter(hwmgr,
680 PPSMC_MSG_SetHardMinFclkByFreq,
681 hwmgr->display_config->num_display > 3 ?
682 SMU10_UMD_PSTATE_PEAK_FCLK :
683 min_mclk);
684
685 smum_send_msg_to_smc_with_parameter(hwmgr,
686 PPSMC_MSG_SetHardMinSocclkByFreq,
687 SMU10_UMD_PSTATE_MIN_SOCCLK);
688 smum_send_msg_to_smc_with_parameter(hwmgr,
689 PPSMC_MSG_SetHardMinVcn,
690 SMU10_UMD_PSTATE_MIN_VCE);
691
692 smum_send_msg_to_smc_with_parameter(hwmgr,
693 PPSMC_MSG_SetSoftMaxGfxClk,
694 data->gfx_max_freq_limit/100);
695 smum_send_msg_to_smc_with_parameter(hwmgr,
696 PPSMC_MSG_SetSoftMaxFclkByFreq,
697 SMU10_UMD_PSTATE_PEAK_FCLK);
698 smum_send_msg_to_smc_with_parameter(hwmgr,
699 PPSMC_MSG_SetSoftMaxSocclkByFreq,
700 SMU10_UMD_PSTATE_PEAK_SOCCLK);
701 smum_send_msg_to_smc_with_parameter(hwmgr,
702 PPSMC_MSG_SetSoftMaxVcn,
703 SMU10_UMD_PSTATE_VCE);
704 break;
705 case AMD_DPM_FORCED_LEVEL_LOW:
706 smum_send_msg_to_smc_with_parameter(hwmgr,
707 PPSMC_MSG_SetHardMinGfxClk,
708 data->gfx_min_freq_limit/100);
709 smum_send_msg_to_smc_with_parameter(hwmgr,
710 PPSMC_MSG_SetSoftMaxGfxClk,
711 data->gfx_min_freq_limit/100);
712 smum_send_msg_to_smc_with_parameter(hwmgr,
713 PPSMC_MSG_SetHardMinFclkByFreq,
714 min_mclk);
715 smum_send_msg_to_smc_with_parameter(hwmgr,
716 PPSMC_MSG_SetSoftMaxFclkByFreq,
717 min_mclk);
718 break;
719 case AMD_DPM_FORCED_LEVEL_MANUAL:
720 case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
721 default:
722 break;
723 }
724 return 0;
725 }
726
727 static uint32_t smu10_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
728 {
729 struct smu10_hwmgr *data;
730
731 if (hwmgr == NULL)
732 return -EINVAL;
733
734 data = (struct smu10_hwmgr *)(hwmgr->backend);
735
736 if (low)
737 return data->clock_vol_info.vdd_dep_on_fclk->entries[0].clk;
738 else
739 return data->clock_vol_info.vdd_dep_on_fclk->entries[
740 data->clock_vol_info.vdd_dep_on_fclk->count - 1].clk;
741 }
742
743 static uint32_t smu10_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
744 {
745 struct smu10_hwmgr *data;
746
747 if (hwmgr == NULL)
748 return -EINVAL;
749
750 data = (struct smu10_hwmgr *)(hwmgr->backend);
751
752 if (low)
753 return data->gfx_min_freq_limit;
754 else
755 return data->gfx_max_freq_limit;
756 }
757
758 static int smu10_dpm_patch_boot_state(struct pp_hwmgr *hwmgr,
759 struct pp_hw_power_state *hw_ps)
760 {
761 return 0;
762 }
763
764 static int smu10_dpm_get_pp_table_entry_callback(
765 struct pp_hwmgr *hwmgr,
766 struct pp_hw_power_state *hw_ps,
767 unsigned int index,
768 const void *clock_info)
769 {
770 struct smu10_power_state *smu10_ps = cast_smu10_ps(hw_ps);
771
772 smu10_ps->levels[index].engine_clock = 0;
773
774 smu10_ps->levels[index].vddc_index = 0;
775 smu10_ps->level = index + 1;
776
777 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) {
778 smu10_ps->levels[index].ds_divider_index = 5;
779 smu10_ps->levels[index].ss_divider_index = 5;
780 }
781
782 return 0;
783 }
784
785 static int smu10_dpm_get_num_of_pp_table_entries(struct pp_hwmgr *hwmgr)
786 {
787 int result;
788 unsigned long ret = 0;
789
790 result = pp_tables_get_num_of_entries(hwmgr, &ret);
791
792 return result ? 0 : ret;
793 }
794
795 static int smu10_dpm_get_pp_table_entry(struct pp_hwmgr *hwmgr,
796 unsigned long entry, struct pp_power_state *ps)
797 {
798 int result;
799 struct smu10_power_state *smu10_ps;
800
801 ps->hardware.magic = SMU10_Magic;
802
803 smu10_ps = cast_smu10_ps(&(ps->hardware));
804
805 result = pp_tables_get_entry(hwmgr, entry, ps,
806 smu10_dpm_get_pp_table_entry_callback);
807
808 smu10_ps->uvd_clocks.vclk = ps->uvd_clocks.VCLK;
809 smu10_ps->uvd_clocks.dclk = ps->uvd_clocks.DCLK;
810
811 return result;
812 }
813
814 static int smu10_get_power_state_size(struct pp_hwmgr *hwmgr)
815 {
816 return sizeof(struct smu10_power_state);
817 }
818
819 static int smu10_set_cpu_power_state(struct pp_hwmgr *hwmgr)
820 {
821 return 0;
822 }
823
824
825 static int smu10_store_cc6_data(struct pp_hwmgr *hwmgr, uint32_t separation_time,
826 bool cc6_disable, bool pstate_disable, bool pstate_switch_disable)
827 {
828 struct smu10_hwmgr *data = (struct smu10_hwmgr *)(hwmgr->backend);
829
830 if (separation_time != data->separation_time ||
831 cc6_disable != data->cc6_disable ||
832 pstate_disable != data->pstate_disable) {
833 data->separation_time = separation_time;
834 data->cc6_disable = cc6_disable;
835 data->pstate_disable = pstate_disable;
836 data->cc6_setting_changed = true;
837 }
838 return 0;
839 }
840
841 static int smu10_get_dal_power_level(struct pp_hwmgr *hwmgr,
842 struct amd_pp_simple_clock_info *info)
843 {
844 return -EINVAL;
845 }
846
847 static int smu10_force_clock_level(struct pp_hwmgr *hwmgr,
848 enum pp_clock_type type, uint32_t mask)
849 {
850 struct smu10_hwmgr *data = hwmgr->backend;
851 struct smu10_voltage_dependency_table *mclk_table =
852 data->clock_vol_info.vdd_dep_on_fclk;
853 uint32_t low, high;
854
855 low = mask ? (ffs(mask) - 1) : 0;
856 high = mask ? (fls(mask) - 1) : 0;
857
858 switch (type) {
859 case PP_SCLK:
860 if (low > 2 || high > 2) {
861 pr_info("Currently sclk only support 3 levels on RV\n");
862 return -EINVAL;
863 }
864
865 smum_send_msg_to_smc_with_parameter(hwmgr,
866 PPSMC_MSG_SetHardMinGfxClk,
867 low == 2 ? data->gfx_max_freq_limit/100 :
868 low == 1 ? SMU10_UMD_PSTATE_GFXCLK :
869 data->gfx_min_freq_limit/100);
870
871 smum_send_msg_to_smc_with_parameter(hwmgr,
872 PPSMC_MSG_SetSoftMaxGfxClk,
873 high == 0 ? data->gfx_min_freq_limit/100 :
874 high == 1 ? SMU10_UMD_PSTATE_GFXCLK :
875 data->gfx_max_freq_limit/100);
876 break;
877
878 case PP_MCLK:
879 if (low > mclk_table->count - 1 || high > mclk_table->count - 1)
880 return -EINVAL;
881
882 smum_send_msg_to_smc_with_parameter(hwmgr,
883 PPSMC_MSG_SetHardMinFclkByFreq,
884 mclk_table->entries[low].clk/100);
885
886 smum_send_msg_to_smc_with_parameter(hwmgr,
887 PPSMC_MSG_SetSoftMaxFclkByFreq,
888 mclk_table->entries[high].clk/100);
889 break;
890
891 case PP_PCIE:
892 default:
893 break;
894 }
895 return 0;
896 }
897
898 static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr,
899 enum pp_clock_type type, char *buf)
900 {
901 struct smu10_hwmgr *data = (struct smu10_hwmgr *)(hwmgr->backend);
902 struct smu10_voltage_dependency_table *mclk_table =
903 data->clock_vol_info.vdd_dep_on_fclk;
904 uint32_t i, now, size = 0;
905
906 switch (type) {
907 case PP_SCLK:
908 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetGfxclkFrequency);
909 now = smum_get_argument(hwmgr);
910
911 /* driver only know min/max gfx_clk, Add level 1 for all other gfx clks */
912 if (now == data->gfx_max_freq_limit/100)
913 i = 2;
914 else if (now == data->gfx_min_freq_limit/100)
915 i = 0;
916 else
917 i = 1;
918
919 size += sprintf(buf + size, "0: %uMhz %s\n",
920 data->gfx_min_freq_limit/100,
921 i == 0 ? "*" : "");
922 size += sprintf(buf + size, "1: %uMhz %s\n",
923 i == 1 ? now : SMU10_UMD_PSTATE_GFXCLK,
924 i == 1 ? "*" : "");
925 size += sprintf(buf + size, "2: %uMhz %s\n",
926 data->gfx_max_freq_limit/100,
927 i == 2 ? "*" : "");
928 break;
929 case PP_MCLK:
930 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetFclkFrequency);
931 now = smum_get_argument(hwmgr);
932
933 for (i = 0; i < mclk_table->count; i++)
934 size += sprintf(buf + size, "%d: %uMhz %s\n",
935 i,
936 mclk_table->entries[i].clk / 100,
937 ((mclk_table->entries[i].clk / 100)
938 == now) ? "*" : "");
939 break;
940 default:
941 break;
942 }
943
944 return size;
945 }
946
947 static int smu10_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state,
948 PHM_PerformanceLevelDesignation designation, uint32_t index,
949 PHM_PerformanceLevel *level)
950 {
951 struct smu10_hwmgr *data;
952
953 if (level == NULL || hwmgr == NULL || state == NULL)
954 return -EINVAL;
955
956 data = (struct smu10_hwmgr *)(hwmgr->backend);
957
958 if (index == 0) {
959 level->memory_clock = data->clock_vol_info.vdd_dep_on_fclk->entries[0].clk;
960 level->coreClock = data->gfx_min_freq_limit;
961 } else {
962 level->memory_clock = data->clock_vol_info.vdd_dep_on_fclk->entries[
963 data->clock_vol_info.vdd_dep_on_fclk->count - 1].clk;
964 level->coreClock = data->gfx_max_freq_limit;
965 }
966
967 level->nonLocalMemoryFreq = 0;
968 level->nonLocalMemoryWidth = 0;
969
970 return 0;
971 }
972
973 static int smu10_get_current_shallow_sleep_clocks(struct pp_hwmgr *hwmgr,
974 const struct pp_hw_power_state *state, struct pp_clock_info *clock_info)
975 {
976 const struct smu10_power_state *ps = cast_const_smu10_ps(state);
977
978 clock_info->min_eng_clk = ps->levels[0].engine_clock / (1 << (ps->levels[0].ss_divider_index));
979 clock_info->max_eng_clk = ps->levels[ps->level - 1].engine_clock / (1 << (ps->levels[ps->level - 1].ss_divider_index));
980
981 return 0;
982 }
983
984 #define MEM_FREQ_LOW_LATENCY 25000
985 #define MEM_FREQ_HIGH_LATENCY 80000
986 #define MEM_LATENCY_HIGH 245
987 #define MEM_LATENCY_LOW 35
988 #define MEM_LATENCY_ERR 0xFFFF
989
990
991 static uint32_t smu10_get_mem_latency(struct pp_hwmgr *hwmgr,
992 uint32_t clock)
993 {
994 if (clock >= MEM_FREQ_LOW_LATENCY &&
995 clock < MEM_FREQ_HIGH_LATENCY)
996 return MEM_LATENCY_HIGH;
997 else if (clock >= MEM_FREQ_HIGH_LATENCY)
998 return MEM_LATENCY_LOW;
999 else
1000 return MEM_LATENCY_ERR;
1001 }
1002
1003 static int smu10_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr,
1004 enum amd_pp_clock_type type,
1005 struct pp_clock_levels_with_latency *clocks)
1006 {
1007 uint32_t i;
1008 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
1009 struct smu10_clock_voltage_information *pinfo = &(smu10_data->clock_vol_info);
1010 struct smu10_voltage_dependency_table *pclk_vol_table;
1011 bool latency_required = false;
1012
1013 if (pinfo == NULL)
1014 return -EINVAL;
1015
1016 switch (type) {
1017 case amd_pp_mem_clock:
1018 pclk_vol_table = pinfo->vdd_dep_on_mclk;
1019 latency_required = true;
1020 break;
1021 case amd_pp_f_clock:
1022 pclk_vol_table = pinfo->vdd_dep_on_fclk;
1023 latency_required = true;
1024 break;
1025 case amd_pp_dcf_clock:
1026 pclk_vol_table = pinfo->vdd_dep_on_dcefclk;
1027 break;
1028 case amd_pp_disp_clock:
1029 pclk_vol_table = pinfo->vdd_dep_on_dispclk;
1030 break;
1031 case amd_pp_phy_clock:
1032 pclk_vol_table = pinfo->vdd_dep_on_phyclk;
1033 break;
1034 case amd_pp_dpp_clock:
1035 pclk_vol_table = pinfo->vdd_dep_on_dppclk;
1036 break;
1037 default:
1038 return -EINVAL;
1039 }
1040
1041 if (pclk_vol_table == NULL || pclk_vol_table->count == 0)
1042 return -EINVAL;
1043
1044 clocks->num_levels = 0;
1045 for (i = 0; i < pclk_vol_table->count; i++) {
1046 clocks->data[i].clocks_in_khz = pclk_vol_table->entries[i].clk * 10;
1047 clocks->data[i].latency_in_us = latency_required ?
1048 smu10_get_mem_latency(hwmgr,
1049 pclk_vol_table->entries[i].clk) :
1050 0;
1051 clocks->num_levels++;
1052 }
1053
1054 return 0;
1055 }
1056
1057 static int smu10_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr,
1058 enum amd_pp_clock_type type,
1059 struct pp_clock_levels_with_voltage *clocks)
1060 {
1061 uint32_t i;
1062 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
1063 struct smu10_clock_voltage_information *pinfo = &(smu10_data->clock_vol_info);
1064 struct smu10_voltage_dependency_table *pclk_vol_table = NULL;
1065
1066 if (pinfo == NULL)
1067 return -EINVAL;
1068
1069 switch (type) {
1070 case amd_pp_mem_clock:
1071 pclk_vol_table = pinfo->vdd_dep_on_mclk;
1072 break;
1073 case amd_pp_f_clock:
1074 pclk_vol_table = pinfo->vdd_dep_on_fclk;
1075 break;
1076 case amd_pp_dcf_clock:
1077 pclk_vol_table = pinfo->vdd_dep_on_dcefclk;
1078 break;
1079 case amd_pp_soc_clock:
1080 pclk_vol_table = pinfo->vdd_dep_on_socclk;
1081 break;
1082 case amd_pp_disp_clock:
1083 pclk_vol_table = pinfo->vdd_dep_on_dispclk;
1084 break;
1085 case amd_pp_phy_clock:
1086 pclk_vol_table = pinfo->vdd_dep_on_phyclk;
1087 break;
1088 default:
1089 return -EINVAL;
1090 }
1091
1092 if (pclk_vol_table == NULL || pclk_vol_table->count == 0)
1093 return -EINVAL;
1094
1095 clocks->num_levels = 0;
1096 for (i = 0; i < pclk_vol_table->count; i++) {
1097 clocks->data[i].clocks_in_khz = pclk_vol_table->entries[i].clk * 10;
1098 clocks->data[i].voltage_in_mv = pclk_vol_table->entries[i].vol;
1099 clocks->num_levels++;
1100 }
1101
1102 return 0;
1103 }
1104
1105
1106
1107 static int smu10_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks)
1108 {
1109 clocks->engine_max_clock = 80000; /* driver can't get engine clock, temp hard code to 800MHz */
1110 return 0;
1111 }
1112
1113 static int smu10_thermal_get_temperature(struct pp_hwmgr *hwmgr)
1114 {
1115 struct amdgpu_device *adev = hwmgr->adev;
1116 uint32_t reg_value = RREG32_SOC15(THM, 0, mmTHM_TCON_CUR_TMP);
1117 int cur_temp =
1118 (reg_value & THM_TCON_CUR_TMP__CUR_TEMP_MASK) >> THM_TCON_CUR_TMP__CUR_TEMP__SHIFT;
1119
1120 if (cur_temp & THM_TCON_CUR_TMP__CUR_TEMP_RANGE_SEL_MASK)
1121 cur_temp = ((cur_temp / 8) - 49) * PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
1122 else
1123 cur_temp = (cur_temp / 8) * PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
1124
1125 return cur_temp;
1126 }
1127
1128 static int smu10_read_sensor(struct pp_hwmgr *hwmgr, int idx,
1129 void *value, int *size)
1130 {
1131 uint32_t sclk, mclk;
1132 int ret = 0;
1133
1134 switch (idx) {
1135 case AMDGPU_PP_SENSOR_GFX_SCLK:
1136 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetGfxclkFrequency);
1137 sclk = smum_get_argument(hwmgr);
1138 /* in units of 10KHZ */
1139 *((uint32_t *)value) = sclk * 100;
1140 *size = 4;
1141 break;
1142 case AMDGPU_PP_SENSOR_GFX_MCLK:
1143 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetFclkFrequency);
1144 mclk = smum_get_argument(hwmgr);
1145 /* in units of 10KHZ */
1146 *((uint32_t *)value) = mclk * 100;
1147 *size = 4;
1148 break;
1149 case AMDGPU_PP_SENSOR_GPU_TEMP:
1150 *((uint32_t *)value) = smu10_thermal_get_temperature(hwmgr);
1151 break;
1152 default:
1153 ret = -EINVAL;
1154 break;
1155 }
1156
1157 return ret;
1158 }
1159
1160 static int smu10_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
1161 void *clock_ranges)
1162 {
1163 struct smu10_hwmgr *data = hwmgr->backend;
1164 struct dm_pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges = clock_ranges;
1165 Watermarks_t *table = &(data->water_marks_table);
1166 int result = 0;
1167
1168 smu_set_watermarks_for_clocks_ranges(table,wm_with_clock_ranges);
1169 smum_smc_table_manager(hwmgr, (uint8_t *)table, (uint16_t)SMU10_WMTABLE, false);
1170 data->water_marks_exist = true;
1171 return result;
1172 }
1173
1174 static int smu10_smus_notify_pwe(struct pp_hwmgr *hwmgr)
1175 {
1176
1177 return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SetRccPfcPmeRestoreRegister);
1178 }
1179
1180 static int smu10_powergate_mmhub(struct pp_hwmgr *hwmgr)
1181 {
1182 return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PowerGateMmHub);
1183 }
1184
1185 static int smu10_powergate_sdma(struct pp_hwmgr *hwmgr, bool gate)
1186 {
1187 if (gate)
1188 return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PowerDownSdma);
1189 else
1190 return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PowerUpSdma);
1191 }
1192
1193 static void smu10_powergate_vcn(struct pp_hwmgr *hwmgr, bool bgate)
1194 {
1195 if (bgate) {
1196 amdgpu_device_ip_set_powergating_state(hwmgr->adev,
1197 AMD_IP_BLOCK_TYPE_VCN,
1198 AMD_PG_STATE_GATE);
1199 smum_send_msg_to_smc_with_parameter(hwmgr,
1200 PPSMC_MSG_PowerDownVcn, 0);
1201 } else {
1202 smum_send_msg_to_smc_with_parameter(hwmgr,
1203 PPSMC_MSG_PowerUpVcn, 0);
1204 amdgpu_device_ip_set_powergating_state(hwmgr->adev,
1205 AMD_IP_BLOCK_TYPE_VCN,
1206 AMD_PG_STATE_UNGATE);
1207 }
1208 }
1209
1210 static const struct pp_hwmgr_func smu10_hwmgr_funcs = {
1211 .backend_init = smu10_hwmgr_backend_init,
1212 .backend_fini = smu10_hwmgr_backend_fini,
1213 .asic_setup = NULL,
1214 .apply_state_adjust_rules = smu10_apply_state_adjust_rules,
1215 .force_dpm_level = smu10_dpm_force_dpm_level,
1216 .get_power_state_size = smu10_get_power_state_size,
1217 .powerdown_uvd = NULL,
1218 .powergate_uvd = smu10_powergate_vcn,
1219 .powergate_vce = NULL,
1220 .get_mclk = smu10_dpm_get_mclk,
1221 .get_sclk = smu10_dpm_get_sclk,
1222 .patch_boot_state = smu10_dpm_patch_boot_state,
1223 .get_pp_table_entry = smu10_dpm_get_pp_table_entry,
1224 .get_num_of_pp_table_entries = smu10_dpm_get_num_of_pp_table_entries,
1225 .set_cpu_power_state = smu10_set_cpu_power_state,
1226 .store_cc6_data = smu10_store_cc6_data,
1227 .force_clock_level = smu10_force_clock_level,
1228 .print_clock_levels = smu10_print_clock_levels,
1229 .get_dal_power_level = smu10_get_dal_power_level,
1230 .get_performance_level = smu10_get_performance_level,
1231 .get_current_shallow_sleep_clocks = smu10_get_current_shallow_sleep_clocks,
1232 .get_clock_by_type_with_latency = smu10_get_clock_by_type_with_latency,
1233 .get_clock_by_type_with_voltage = smu10_get_clock_by_type_with_voltage,
1234 .set_watermarks_for_clocks_ranges = smu10_set_watermarks_for_clocks_ranges,
1235 .get_max_high_clocks = smu10_get_max_high_clocks,
1236 .read_sensor = smu10_read_sensor,
1237 .set_active_display_count = smu10_set_active_display_count,
1238 .set_min_deep_sleep_dcefclk = smu10_set_min_deep_sleep_dcefclk,
1239 .dynamic_state_management_enable = smu10_enable_dpm_tasks,
1240 .power_off_asic = smu10_power_off_asic,
1241 .asic_setup = smu10_setup_asic_task,
1242 .power_state_set = smu10_set_power_state_tasks,
1243 .dynamic_state_management_disable = smu10_disable_dpm_tasks,
1244 .powergate_mmhub = smu10_powergate_mmhub,
1245 .smus_notify_pwe = smu10_smus_notify_pwe,
1246 .display_clock_voltage_request = smu10_display_clock_voltage_request,
1247 .powergate_gfx = smu10_gfx_off_control,
1248 .powergate_sdma = smu10_powergate_sdma,
1249 .set_hard_min_dcefclk_by_freq = smu10_set_hard_min_dcefclk_by_freq,
1250 .set_hard_min_fclk_by_freq = smu10_set_hard_min_fclk_by_freq,
1251 };
1252
1253 int smu10_init_function_pointers(struct pp_hwmgr *hwmgr)
1254 {
1255 hwmgr->hwmgr_func = &smu10_hwmgr_funcs;
1256 hwmgr->pptable_func = &pptable_funcs;
1257 return 0;
1258 }