]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/gpu/drm/amd/amdgpu/cz_dpm.c
drm/amdgpu: implement VCE two instances support
[mirror_ubuntu-jammy-kernel.git] / drivers / gpu / drm / amd / amdgpu / cz_dpm.c
CommitLineData
aaa36a97
AD
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include <linux/firmware.h>
25#include <linux/seq_file.h>
26#include "drmP.h"
27#include "amdgpu.h"
28#include "amdgpu_pm.h"
29#include "amdgpu_atombios.h"
30#include "vid.h"
31#include "vi_dpm.h"
32#include "amdgpu_dpm.h"
33#include "cz_dpm.h"
34#include "cz_ppsmc.h"
35#include "atom.h"
36
37#include "smu/smu_8_0_d.h"
38#include "smu/smu_8_0_sh_mask.h"
39#include "gca/gfx_8_0_d.h"
40#include "gca/gfx_8_0_sh_mask.h"
41#include "gmc/gmc_8_1_d.h"
42#include "bif/bif_5_1_d.h"
43#include "gfx_v8_0.h"
44
45static struct cz_ps *cz_get_ps(struct amdgpu_ps *rps)
46{
47 struct cz_ps *ps = rps->ps_priv;
48
49 return ps;
50}
51
52static struct cz_power_info *cz_get_pi(struct amdgpu_device *adev)
53{
54 struct cz_power_info *pi = adev->pm.dpm.priv;
55
56 return pi;
57}
58
59static uint16_t cz_convert_8bit_index_to_voltage(struct amdgpu_device *adev,
60 uint16_t voltage)
61{
62 uint16_t tmp = 6200 - voltage * 25;
63
64 return tmp;
65}
66
67static void cz_construct_max_power_limits_table(struct amdgpu_device *adev,
68 struct amdgpu_clock_and_voltage_limits *table)
69{
70 struct cz_power_info *pi = cz_get_pi(adev);
71 struct amdgpu_clock_voltage_dependency_table *dep_table =
72 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
73
74 if (dep_table->count > 0) {
75 table->sclk = dep_table->entries[dep_table->count - 1].clk;
76 table->vddc = cz_convert_8bit_index_to_voltage(adev,
77 dep_table->entries[dep_table->count - 1].v);
78 }
79
80 table->mclk = pi->sys_info.nbp_memory_clock[0];
81
82}
83
84union igp_info {
85 struct _ATOM_INTEGRATED_SYSTEM_INFO info;
86 struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7;
87 struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_8 info_8;
88 struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_9 info_9;
89};
90
91static int cz_parse_sys_info_table(struct amdgpu_device *adev)
92{
93 struct cz_power_info *pi = cz_get_pi(adev);
94 struct amdgpu_mode_info *mode_info = &adev->mode_info;
95 int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo);
96 union igp_info *igp_info;
97 u8 frev, crev;
98 u16 data_offset;
99 int i = 0;
100
101 if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
102 &frev, &crev, &data_offset)) {
103 igp_info = (union igp_info *)(mode_info->atom_context->bios +
104 data_offset);
105
106 if (crev != 9) {
107 DRM_ERROR("Unsupported IGP table: %d %d\n", frev, crev);
108 return -EINVAL;
109 }
110 pi->sys_info.bootup_sclk =
111 le32_to_cpu(igp_info->info_9.ulBootUpEngineClock);
112 pi->sys_info.bootup_uma_clk =
113 le32_to_cpu(igp_info->info_9.ulBootUpUMAClock);
114 pi->sys_info.dentist_vco_freq =
115 le32_to_cpu(igp_info->info_9.ulDentistVCOFreq);
116 pi->sys_info.bootup_nb_voltage_index =
117 le16_to_cpu(igp_info->info_9.usBootUpNBVoltage);
118
119 if (igp_info->info_9.ucHtcTmpLmt == 0)
120 pi->sys_info.htc_tmp_lmt = 203;
121 else
122 pi->sys_info.htc_tmp_lmt = igp_info->info_9.ucHtcTmpLmt;
123
124 if (igp_info->info_9.ucHtcHystLmt == 0)
125 pi->sys_info.htc_hyst_lmt = 5;
126 else
127 pi->sys_info.htc_hyst_lmt = igp_info->info_9.ucHtcHystLmt;
128
129 if (pi->sys_info.htc_tmp_lmt <= pi->sys_info.htc_hyst_lmt) {
130 DRM_ERROR("The htcTmpLmt should be larger than htcHystLmt.\n");
131 return -EINVAL;
132 }
133
134 if (le32_to_cpu(igp_info->info_9.ulSystemConfig) & (1 << 3) &&
135 pi->enable_nb_ps_policy)
136 pi->sys_info.nb_dpm_enable = true;
137 else
138 pi->sys_info.nb_dpm_enable = false;
139
140 for (i = 0; i < CZ_NUM_NBPSTATES; i++) {
141 if (i < CZ_NUM_NBPMEMORY_CLOCK)
142 pi->sys_info.nbp_memory_clock[i] =
143 le32_to_cpu(igp_info->info_9.ulNbpStateMemclkFreq[i]);
144 pi->sys_info.nbp_n_clock[i] =
145 le32_to_cpu(igp_info->info_9.ulNbpStateNClkFreq[i]);
146 }
147
148 for (i = 0; i < CZ_MAX_DISPLAY_CLOCK_LEVEL; i++)
149 pi->sys_info.display_clock[i] =
150 le32_to_cpu(igp_info->info_9.sDispClkVoltageMapping[i].ulMaximumSupportedCLK);
151
152 for (i = 0; i < CZ_NUM_NBPSTATES; i++)
153 pi->sys_info.nbp_voltage_index[i] =
154 le32_to_cpu(igp_info->info_9.usNBPStateVoltage[i]);
155
156 if (le32_to_cpu(igp_info->info_9.ulGPUCapInfo) &
157 SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS)
158 pi->caps_enable_dfs_bypass = true;
159
160 pi->sys_info.uma_channel_number =
161 igp_info->info_9.ucUMAChannelNumber;
162
163 cz_construct_max_power_limits_table(adev,
164 &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac);
165 }
166
167 return 0;
168}
169
170static void cz_patch_voltage_values(struct amdgpu_device *adev)
171{
172 int i;
173 struct amdgpu_uvd_clock_voltage_dependency_table *uvd_table =
174 &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table;
175 struct amdgpu_vce_clock_voltage_dependency_table *vce_table =
176 &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
177 struct amdgpu_clock_voltage_dependency_table *acp_table =
178 &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table;
179
180 if (uvd_table->count) {
181 for (i = 0; i < uvd_table->count; i++)
182 uvd_table->entries[i].v =
183 cz_convert_8bit_index_to_voltage(adev,
184 uvd_table->entries[i].v);
185 }
186
187 if (vce_table->count) {
188 for (i = 0; i < vce_table->count; i++)
189 vce_table->entries[i].v =
190 cz_convert_8bit_index_to_voltage(adev,
191 vce_table->entries[i].v);
192 }
193
194 if (acp_table->count) {
195 for (i = 0; i < acp_table->count; i++)
196 acp_table->entries[i].v =
197 cz_convert_8bit_index_to_voltage(adev,
198 acp_table->entries[i].v);
199 }
200
201}
202
203static void cz_construct_boot_state(struct amdgpu_device *adev)
204{
205 struct cz_power_info *pi = cz_get_pi(adev);
206
207 pi->boot_pl.sclk = pi->sys_info.bootup_sclk;
208 pi->boot_pl.vddc_index = pi->sys_info.bootup_nb_voltage_index;
209 pi->boot_pl.ds_divider_index = 0;
210 pi->boot_pl.ss_divider_index = 0;
211 pi->boot_pl.allow_gnb_slow = 1;
212 pi->boot_pl.force_nbp_state = 0;
213 pi->boot_pl.display_wm = 0;
214 pi->boot_pl.vce_wm = 0;
215
216}
217
218static void cz_patch_boot_state(struct amdgpu_device *adev,
219 struct cz_ps *ps)
220{
221 struct cz_power_info *pi = cz_get_pi(adev);
222
223 ps->num_levels = 1;
224 ps->levels[0] = pi->boot_pl;
225}
226
227union pplib_clock_info {
228 struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
229 struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
230 struct _ATOM_PPLIB_CZ_CLOCK_INFO carrizo;
231};
232
233static void cz_parse_pplib_clock_info(struct amdgpu_device *adev,
234 struct amdgpu_ps *rps, int index,
235 union pplib_clock_info *clock_info)
236{
237 struct cz_power_info *pi = cz_get_pi(adev);
238 struct cz_ps *ps = cz_get_ps(rps);
239 struct cz_pl *pl = &ps->levels[index];
240 struct amdgpu_clock_voltage_dependency_table *table =
241 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
242
243 pl->sclk = table->entries[clock_info->carrizo.index].clk;
244 pl->vddc_index = table->entries[clock_info->carrizo.index].v;
245
246 ps->num_levels = index + 1;
247
248 if (pi->caps_sclk_ds) {
249 pl->ds_divider_index = 5;
250 pl->ss_divider_index = 5;
251 }
252
253}
254
255static void cz_parse_pplib_non_clock_info(struct amdgpu_device *adev,
256 struct amdgpu_ps *rps,
257 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info,
258 u8 table_rev)
259{
260 struct cz_ps *ps = cz_get_ps(rps);
261
262 rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings);
263 rps->class = le16_to_cpu(non_clock_info->usClassification);
264 rps->class2 = le16_to_cpu(non_clock_info->usClassification2);
265
266 if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
267 rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
268 rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
269 } else {
270 rps->vclk = 0;
271 rps->dclk = 0;
272 }
273
274 if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) {
275 adev->pm.dpm.boot_ps = rps;
276 cz_patch_boot_state(adev, ps);
277 }
278 if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
279 adev->pm.dpm.uvd_ps = rps;
280
281}
282
283union power_info {
284 struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
285 struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
286 struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
287 struct _ATOM_PPLIB_POWERPLAYTABLE4 pplib4;
288 struct _ATOM_PPLIB_POWERPLAYTABLE5 pplib5;
289};
290
291union pplib_power_state {
292 struct _ATOM_PPLIB_STATE v1;
293 struct _ATOM_PPLIB_STATE_V2 v2;
294};
295
296static int cz_parse_power_table(struct amdgpu_device *adev)
297{
298 struct amdgpu_mode_info *mode_info = &adev->mode_info;
299 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
300 union pplib_power_state *power_state;
301 int i, j, k, non_clock_array_index, clock_array_index;
302 union pplib_clock_info *clock_info;
303 struct _StateArray *state_array;
304 struct _ClockInfoArray *clock_info_array;
305 struct _NonClockInfoArray *non_clock_info_array;
306 union power_info *power_info;
307 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
308 u16 data_offset;
309 u8 frev, crev;
310 u8 *power_state_offset;
311 struct cz_ps *ps;
312
313 if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
314 &frev, &crev, &data_offset))
315 return -EINVAL;
316 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
317
318 state_array = (struct _StateArray *)
319 (mode_info->atom_context->bios + data_offset +
320 le16_to_cpu(power_info->pplib.usStateArrayOffset));
321 clock_info_array = (struct _ClockInfoArray *)
322 (mode_info->atom_context->bios + data_offset +
323 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset));
324 non_clock_info_array = (struct _NonClockInfoArray *)
325 (mode_info->atom_context->bios + data_offset +
326 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
327
328 adev->pm.dpm.ps = kzalloc(sizeof(struct amdgpu_ps) *
329 state_array->ucNumEntries, GFP_KERNEL);
330
331 if (!adev->pm.dpm.ps)
332 return -ENOMEM;
333
334 power_state_offset = (u8 *)state_array->states;
335 adev->pm.dpm.platform_caps =
336 le32_to_cpu(power_info->pplib.ulPlatformCaps);
337 adev->pm.dpm.backbias_response_time =
338 le16_to_cpu(power_info->pplib.usBackbiasTime);
339 adev->pm.dpm.voltage_response_time =
340 le16_to_cpu(power_info->pplib.usVoltageTime);
341
342 for (i = 0; i < state_array->ucNumEntries; i++) {
343 power_state = (union pplib_power_state *)power_state_offset;
344 non_clock_array_index = power_state->v2.nonClockInfoIndex;
345 non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
346 &non_clock_info_array->nonClockInfo[non_clock_array_index];
347
348 ps = kzalloc(sizeof(struct cz_ps), GFP_KERNEL);
349 if (ps == NULL) {
350 kfree(adev->pm.dpm.ps);
351 return -ENOMEM;
352 }
353
354 adev->pm.dpm.ps[i].ps_priv = ps;
355 k = 0;
356 for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
357 clock_array_index = power_state->v2.clockInfoIndex[j];
358 if (clock_array_index >= clock_info_array->ucNumEntries)
359 continue;
360 if (k >= CZ_MAX_HARDWARE_POWERLEVELS)
361 break;
362 clock_info = (union pplib_clock_info *)
363 &clock_info_array->clockInfo[clock_array_index *
364 clock_info_array->ucEntrySize];
365 cz_parse_pplib_clock_info(adev, &adev->pm.dpm.ps[i],
366 k, clock_info);
367 k++;
368 }
369 cz_parse_pplib_non_clock_info(adev, &adev->pm.dpm.ps[i],
370 non_clock_info,
371 non_clock_info_array->ucEntrySize);
372 power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
373 }
374 adev->pm.dpm.num_ps = state_array->ucNumEntries;
375
376 return 0;
377}
378
379static int cz_process_firmware_header(struct amdgpu_device *adev)
380{
381 struct cz_power_info *pi = cz_get_pi(adev);
382 u32 tmp;
383 int ret;
384
385 ret = cz_read_smc_sram_dword(adev, SMU8_FIRMWARE_HEADER_LOCATION +
386 offsetof(struct SMU8_Firmware_Header,
387 DpmTable),
388 &tmp, pi->sram_end);
389
390 if (ret == 0)
391 pi->dpm_table_start = tmp;
392
393 return ret;
394}
395
396static int cz_dpm_init(struct amdgpu_device *adev)
397{
398 struct cz_power_info *pi;
399 int ret, i;
400
401 pi = kzalloc(sizeof(struct cz_power_info), GFP_KERNEL);
402 if (NULL == pi)
403 return -ENOMEM;
404
405 adev->pm.dpm.priv = pi;
406
407 ret = amdgpu_get_platform_caps(adev);
408 if (ret)
409 return ret;
410
411 ret = amdgpu_parse_extended_power_table(adev);
412 if (ret)
413 return ret;
414
415 pi->sram_end = SMC_RAM_END;
416
417 /* set up DPM defaults */
418 for (i = 0; i < CZ_MAX_HARDWARE_POWERLEVELS; i++)
419 pi->active_target[i] = CZ_AT_DFLT;
420
421 pi->mgcg_cgtt_local0 = 0x0;
422 pi->mgcg_cgtt_local1 = 0x0;
423 pi->clock_slow_down_step = 25000;
424 pi->skip_clock_slow_down = 1;
425 pi->enable_nb_ps_policy = 1;
426 pi->caps_power_containment = true;
427 pi->caps_cac = true;
428 pi->didt_enabled = false;
429 if (pi->didt_enabled) {
430 pi->caps_sq_ramping = true;
431 pi->caps_db_ramping = true;
432 pi->caps_td_ramping = true;
433 pi->caps_tcp_ramping = true;
434 }
435 pi->caps_sclk_ds = true;
436 pi->voting_clients = 0x00c00033;
437 pi->auto_thermal_throttling_enabled = true;
438 pi->bapm_enabled = false;
439 pi->disable_nb_ps3_in_battery = false;
440 pi->voltage_drop_threshold = 0;
441 pi->caps_sclk_throttle_low_notification = false;
442 pi->gfx_pg_threshold = 500;
443 pi->caps_fps = true;
444 /* uvd */
445 pi->caps_uvd_pg = (adev->pg_flags & AMDGPU_PG_SUPPORT_UVD) ? true : false;
446 pi->caps_uvd_dpm = true;
447 /* vce */
448 pi->caps_vce_pg = (adev->pg_flags & AMDGPU_PG_SUPPORT_VCE) ? true : false;
449 pi->caps_vce_dpm = true;
450 /* acp */
451 pi->caps_acp_pg = (adev->pg_flags & AMDGPU_PG_SUPPORT_ACP) ? true : false;
452 pi->caps_acp_dpm = true;
453
454 pi->caps_stable_power_state = false;
455 pi->nb_dpm_enabled_by_driver = true;
456 pi->nb_dpm_enabled = false;
457 pi->caps_voltage_island = false;
458 /* flags which indicate need to upload pptable */
459 pi->need_pptable_upload = true;
460
461 ret = cz_parse_sys_info_table(adev);
462 if (ret)
463 return ret;
464
465 cz_patch_voltage_values(adev);
466 cz_construct_boot_state(adev);
467
468 ret = cz_parse_power_table(adev);
469 if (ret)
470 return ret;
471
472 ret = cz_process_firmware_header(adev);
473 if (ret)
474 return ret;
475
476 pi->dpm_enabled = true;
477
478 return 0;
479}
480
481static void cz_dpm_fini(struct amdgpu_device *adev)
482{
483 int i;
484
485 for (i = 0; i < adev->pm.dpm.num_ps; i++)
486 kfree(adev->pm.dpm.ps[i].ps_priv);
487
488 kfree(adev->pm.dpm.ps);
489 kfree(adev->pm.dpm.priv);
490 amdgpu_free_extended_power_table(adev);
491}
492
493static void
494cz_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev,
495 struct seq_file *m)
496{
497 struct amdgpu_clock_voltage_dependency_table *table =
498 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
499 u32 current_index =
500 (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) &
501 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK) >>
502 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT;
503 u32 sclk, tmp;
504 u16 vddc;
505
506 if (current_index >= NUM_SCLK_LEVELS) {
507 seq_printf(m, "invalid dpm profile %d\n", current_index);
508 } else {
509 sclk = table->entries[current_index].clk;
510 tmp = (RREG32_SMC(ixSMU_VOLTAGE_STATUS) &
511 SMU_VOLTAGE_STATUS__SMU_VOLTAGE_CURRENT_LEVEL_MASK) >>
512 SMU_VOLTAGE_STATUS__SMU_VOLTAGE_CURRENT_LEVEL__SHIFT;
513 vddc = cz_convert_8bit_index_to_voltage(adev, (u16)tmp);
514 seq_printf(m, "power level %d sclk: %u vddc: %u\n",
515 current_index, sclk, vddc);
516 }
517}
518
519static void cz_dpm_print_power_state(struct amdgpu_device *adev,
520 struct amdgpu_ps *rps)
521{
522 int i;
523 struct cz_ps *ps = cz_get_ps(rps);
524
525 amdgpu_dpm_print_class_info(rps->class, rps->class2);
526 amdgpu_dpm_print_cap_info(rps->caps);
527
528 DRM_INFO("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
529 for (i = 0; i < ps->num_levels; i++) {
530 struct cz_pl *pl = &ps->levels[i];
531
532 DRM_INFO("\t\tpower level %d sclk: %u vddc: %u\n",
533 i, pl->sclk,
534 cz_convert_8bit_index_to_voltage(adev, pl->vddc_index));
535 }
536
537 amdgpu_dpm_print_ps_status(adev, rps);
538}
539
540static void cz_dpm_set_funcs(struct amdgpu_device *adev);
541
542static int cz_dpm_early_init(struct amdgpu_device *adev)
543{
544 cz_dpm_set_funcs(adev);
545
546 return 0;
547}
548
549static int cz_dpm_sw_init(struct amdgpu_device *adev)
550{
551 int ret = 0;
552 /* fix me to add thermal support TODO */
553
554 /* default to balanced state */
555 adev->pm.dpm.state = POWER_STATE_TYPE_BALANCED;
556 adev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED;
557 adev->pm.dpm.forced_level = AMDGPU_DPM_FORCED_LEVEL_AUTO;
558 adev->pm.default_sclk = adev->clock.default_sclk;
559 adev->pm.default_mclk = adev->clock.default_mclk;
560 adev->pm.current_sclk = adev->clock.default_sclk;
561 adev->pm.current_mclk = adev->clock.default_mclk;
562 adev->pm.int_thermal_type = THERMAL_TYPE_NONE;
563
564 if (amdgpu_dpm == 0)
565 return 0;
566
567 mutex_lock(&adev->pm.mutex);
568 ret = cz_dpm_init(adev);
569 if (ret)
570 goto dpm_init_failed;
571
572 adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
573 if (amdgpu_dpm == 1)
574 amdgpu_pm_print_power_states(adev);
575
576 ret = amdgpu_pm_sysfs_init(adev);
577 if (ret)
578 goto dpm_init_failed;
579
580 mutex_unlock(&adev->pm.mutex);
581 DRM_INFO("amdgpu: dpm initialized\n");
582
583 return 0;
584
585dpm_init_failed:
586 cz_dpm_fini(adev);
587 mutex_unlock(&adev->pm.mutex);
588 DRM_ERROR("amdgpu: dpm initialization failed\n");
589
590 return ret;
591}
592
593static int cz_dpm_sw_fini(struct amdgpu_device *adev)
594{
595 mutex_lock(&adev->pm.mutex);
596 amdgpu_pm_sysfs_fini(adev);
597 cz_dpm_fini(adev);
598 mutex_unlock(&adev->pm.mutex);
599
600 return 0;
601}
602
603static void cz_reset_ap_mask(struct amdgpu_device *adev)
604{
605 struct cz_power_info *pi = cz_get_pi(adev);
606
607 pi->active_process_mask = 0;
608
609}
610
611static int cz_dpm_download_pptable_from_smu(struct amdgpu_device *adev,
612 void **table)
613{
614 int ret = 0;
615
616 ret = cz_smu_download_pptable(adev, table);
617
618 return ret;
619}
620
621static int cz_dpm_upload_pptable_to_smu(struct amdgpu_device *adev)
622{
623 struct cz_power_info *pi = cz_get_pi(adev);
624 struct SMU8_Fusion_ClkTable *clock_table;
625 struct atom_clock_dividers dividers;
626 void *table = NULL;
627 uint8_t i = 0;
628 int ret = 0;
629
630 struct amdgpu_clock_voltage_dependency_table *vddc_table =
631 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
632 struct amdgpu_clock_voltage_dependency_table *vddgfx_table =
633 &adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk;
634 struct amdgpu_uvd_clock_voltage_dependency_table *uvd_table =
635 &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table;
636 struct amdgpu_vce_clock_voltage_dependency_table *vce_table =
637 &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
638 struct amdgpu_clock_voltage_dependency_table *acp_table =
639 &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table;
640
641 if (!pi->need_pptable_upload)
642 return 0;
643
644 ret = cz_dpm_download_pptable_from_smu(adev, &table);
645 if (ret) {
646 DRM_ERROR("amdgpu: Failed to get power play table from SMU!\n");
647 return -EINVAL;
648 }
649
650 clock_table = (struct SMU8_Fusion_ClkTable *)table;
651 /* patch clock table */
652 if (vddc_table->count > CZ_MAX_HARDWARE_POWERLEVELS ||
653 vddgfx_table->count > CZ_MAX_HARDWARE_POWERLEVELS ||
654 uvd_table->count > CZ_MAX_HARDWARE_POWERLEVELS ||
655 vce_table->count > CZ_MAX_HARDWARE_POWERLEVELS ||
656 acp_table->count > CZ_MAX_HARDWARE_POWERLEVELS) {
657 DRM_ERROR("amdgpu: Invalid Clock Voltage Dependency Table!\n");
658 return -EINVAL;
659 }
660
661 for (i = 0; i < CZ_MAX_HARDWARE_POWERLEVELS; i++) {
662
663 /* vddc sclk */
664 clock_table->SclkBreakdownTable.ClkLevel[i].GnbVid =
665 (i < vddc_table->count) ? (uint8_t)vddc_table->entries[i].v : 0;
666 clock_table->SclkBreakdownTable.ClkLevel[i].Frequency =
667 (i < vddc_table->count) ? vddc_table->entries[i].clk : 0;
668 ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
669 clock_table->SclkBreakdownTable.ClkLevel[i].Frequency,
670 false, &dividers);
671 if (ret)
672 return ret;
673 clock_table->SclkBreakdownTable.ClkLevel[i].DfsDid =
674 (uint8_t)dividers.post_divider;
675
676 /* vddgfx sclk */
677 clock_table->SclkBreakdownTable.ClkLevel[i].GfxVid =
678 (i < vddgfx_table->count) ? (uint8_t)vddgfx_table->entries[i].v : 0;
679
680 /* acp breakdown */
681 clock_table->AclkBreakdownTable.ClkLevel[i].GfxVid =
682 (i < acp_table->count) ? (uint8_t)acp_table->entries[i].v : 0;
683 clock_table->AclkBreakdownTable.ClkLevel[i].Frequency =
684 (i < acp_table->count) ? acp_table->entries[i].clk : 0;
685 ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
686 clock_table->SclkBreakdownTable.ClkLevel[i].Frequency,
687 false, &dividers);
688 if (ret)
689 return ret;
690 clock_table->AclkBreakdownTable.ClkLevel[i].DfsDid =
691 (uint8_t)dividers.post_divider;
692
693 /* uvd breakdown */
694 clock_table->VclkBreakdownTable.ClkLevel[i].GfxVid =
695 (i < uvd_table->count) ? (uint8_t)uvd_table->entries[i].v : 0;
696 clock_table->VclkBreakdownTable.ClkLevel[i].Frequency =
697 (i < uvd_table->count) ? uvd_table->entries[i].vclk : 0;
698 ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
699 clock_table->VclkBreakdownTable.ClkLevel[i].Frequency,
700 false, &dividers);
701 if (ret)
702 return ret;
703 clock_table->VclkBreakdownTable.ClkLevel[i].DfsDid =
704 (uint8_t)dividers.post_divider;
705
706 clock_table->DclkBreakdownTable.ClkLevel[i].GfxVid =
707 (i < uvd_table->count) ? (uint8_t)uvd_table->entries[i].v : 0;
708 clock_table->DclkBreakdownTable.ClkLevel[i].Frequency =
709 (i < uvd_table->count) ? uvd_table->entries[i].dclk : 0;
710 ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
711 clock_table->DclkBreakdownTable.ClkLevel[i].Frequency,
712 false, &dividers);
713 if (ret)
714 return ret;
715 clock_table->DclkBreakdownTable.ClkLevel[i].DfsDid =
716 (uint8_t)dividers.post_divider;
717
718 /* vce breakdown */
719 clock_table->EclkBreakdownTable.ClkLevel[i].GfxVid =
720 (i < vce_table->count) ? (uint8_t)vce_table->entries[i].v : 0;
721 clock_table->EclkBreakdownTable.ClkLevel[i].Frequency =
722 (i < vce_table->count) ? vce_table->entries[i].ecclk : 0;
723 ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
724 clock_table->EclkBreakdownTable.ClkLevel[i].Frequency,
725 false, &dividers);
726 if (ret)
727 return ret;
728 clock_table->EclkBreakdownTable.ClkLevel[i].DfsDid =
729 (uint8_t)dividers.post_divider;
730 }
731
732 /* its time to upload to SMU */
733 ret = cz_smu_upload_pptable(adev);
734 if (ret) {
735 DRM_ERROR("amdgpu: Failed to put power play table to SMU!\n");
736 return ret;
737 }
738
739 return 0;
740}
741
742static void cz_init_sclk_limit(struct amdgpu_device *adev)
743{
744 struct cz_power_info *pi = cz_get_pi(adev);
745 struct amdgpu_clock_voltage_dependency_table *table =
746 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
747 uint32_t clock = 0, level;
748
749 if (!table || !table->count) {
750 DRM_ERROR("Invalid Voltage Dependency table.\n");
751 return;
752 }
753
754 pi->sclk_dpm.soft_min_clk = 0;
755 pi->sclk_dpm.hard_min_clk = 0;
756 cz_send_msg_to_smc(adev, PPSMC_MSG_GetMaxSclkLevel);
757 level = cz_get_argument(adev);
758 if (level < table->count)
759 clock = table->entries[level].clk;
760 else {
761 DRM_ERROR("Invalid SLCK Voltage Dependency table entry.\n");
762 clock = table->entries[table->count - 1].clk;
763 }
764
765 pi->sclk_dpm.soft_max_clk = clock;
766 pi->sclk_dpm.hard_max_clk = clock;
767
768}
769
770static void cz_init_uvd_limit(struct amdgpu_device *adev)
771{
772 struct cz_power_info *pi = cz_get_pi(adev);
773 struct amdgpu_uvd_clock_voltage_dependency_table *table =
774 &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table;
775 uint32_t clock = 0, level;
776
777 if (!table || !table->count) {
778 DRM_ERROR("Invalid Voltage Dependency table.\n");
779 return;
780 }
781
782 pi->uvd_dpm.soft_min_clk = 0;
783 pi->uvd_dpm.hard_min_clk = 0;
784 cz_send_msg_to_smc(adev, PPSMC_MSG_GetMaxUvdLevel);
785 level = cz_get_argument(adev);
786 if (level < table->count)
787 clock = table->entries[level].vclk;
788 else {
789 DRM_ERROR("Invalid UVD Voltage Dependency table entry.\n");
790 clock = table->entries[table->count - 1].vclk;
791 }
792
793 pi->uvd_dpm.soft_max_clk = clock;
794 pi->uvd_dpm.hard_max_clk = clock;
795
796}
797
798static void cz_init_vce_limit(struct amdgpu_device *adev)
799{
800 struct cz_power_info *pi = cz_get_pi(adev);
801 struct amdgpu_vce_clock_voltage_dependency_table *table =
802 &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
803 uint32_t clock = 0, level;
804
805 if (!table || !table->count) {
806 DRM_ERROR("Invalid Voltage Dependency table.\n");
807 return;
808 }
809
810 pi->vce_dpm.soft_min_clk = 0;
811 pi->vce_dpm.hard_min_clk = 0;
812 cz_send_msg_to_smc(adev, PPSMC_MSG_GetMaxEclkLevel);
813 level = cz_get_argument(adev);
814 if (level < table->count)
815 clock = table->entries[level].evclk;
816 else {
817 /* future BIOS would fix this error */
818 DRM_ERROR("Invalid VCE Voltage Dependency table entry.\n");
819 clock = table->entries[table->count - 1].evclk;
820 }
821
822 pi->vce_dpm.soft_max_clk = clock;
823 pi->vce_dpm.hard_max_clk = clock;
824
825}
826
827static void cz_init_acp_limit(struct amdgpu_device *adev)
828{
829 struct cz_power_info *pi = cz_get_pi(adev);
830 struct amdgpu_clock_voltage_dependency_table *table =
831 &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table;
832 uint32_t clock = 0, level;
833
834 if (!table || !table->count) {
835 DRM_ERROR("Invalid Voltage Dependency table.\n");
836 return;
837 }
838
839 pi->acp_dpm.soft_min_clk = 0;
840 pi->acp_dpm.hard_min_clk = 0;
841 cz_send_msg_to_smc(adev, PPSMC_MSG_GetMaxAclkLevel);
842 level = cz_get_argument(adev);
843 if (level < table->count)
844 clock = table->entries[level].clk;
845 else {
846 DRM_ERROR("Invalid ACP Voltage Dependency table entry.\n");
847 clock = table->entries[table->count - 1].clk;
848 }
849
850 pi->acp_dpm.soft_max_clk = clock;
851 pi->acp_dpm.hard_max_clk = clock;
852
853}
854
855static void cz_init_pg_state(struct amdgpu_device *adev)
856{
857 struct cz_power_info *pi = cz_get_pi(adev);
858
859 pi->uvd_power_gated = false;
860 pi->vce_power_gated = false;
861 pi->acp_power_gated = false;
862
863}
864
865static void cz_init_sclk_threshold(struct amdgpu_device *adev)
866{
867 struct cz_power_info *pi = cz_get_pi(adev);
868
869 pi->low_sclk_interrupt_threshold = 0;
870
871}
872
873static void cz_dpm_setup_asic(struct amdgpu_device *adev)
874{
875 cz_reset_ap_mask(adev);
876 cz_dpm_upload_pptable_to_smu(adev);
877 cz_init_sclk_limit(adev);
878 cz_init_uvd_limit(adev);
879 cz_init_vce_limit(adev);
880 cz_init_acp_limit(adev);
881 cz_init_pg_state(adev);
882 cz_init_sclk_threshold(adev);
883
884}
885
886static bool cz_check_smu_feature(struct amdgpu_device *adev,
887 uint32_t feature)
888{
889 uint32_t smu_feature = 0;
890 int ret;
891
892 ret = cz_send_msg_to_smc_with_parameter(adev,
893 PPSMC_MSG_GetFeatureStatus, 0);
894 if (ret) {
895 DRM_ERROR("Failed to get SMU features from SMC.\n");
896 return false;
897 } else {
898 smu_feature = cz_get_argument(adev);
899 if (feature & smu_feature)
900 return true;
901 }
902
903 return false;
904}
905
906static bool cz_check_for_dpm_enabled(struct amdgpu_device *adev)
907{
908 if (cz_check_smu_feature(adev,
909 SMU_EnabledFeatureScoreboard_SclkDpmOn))
910 return true;
911
912 return false;
913}
914
915static void cz_program_voting_clients(struct amdgpu_device *adev)
916{
917 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0, PPCZ_VOTINGRIGHTSCLIENTS_DFLT0);
918}
919
920static void cz_clear_voting_clients(struct amdgpu_device *adev)
921{
922 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0, 0);
923}
924
925static int cz_start_dpm(struct amdgpu_device *adev)
926{
927 int ret = 0;
928
929 if (amdgpu_dpm) {
930 ret = cz_send_msg_to_smc_with_parameter(adev,
931 PPSMC_MSG_EnableAllSmuFeatures, SCLK_DPM_MASK);
932 if (ret) {
933 DRM_ERROR("SMU feature: SCLK_DPM enable failed\n");
934 return -EINVAL;
935 }
936 }
937
938 return 0;
939}
940
941static int cz_stop_dpm(struct amdgpu_device *adev)
942{
943 int ret = 0;
944
945 if (amdgpu_dpm && adev->pm.dpm_enabled) {
946 ret = cz_send_msg_to_smc_with_parameter(adev,
947 PPSMC_MSG_DisableAllSmuFeatures, SCLK_DPM_MASK);
948 if (ret) {
949 DRM_ERROR("SMU feature: SCLK_DPM disable failed\n");
950 return -EINVAL;
951 }
952 }
953
954 return 0;
955}
956
957static uint32_t cz_get_sclk_level(struct amdgpu_device *adev,
958 uint32_t clock, uint16_t msg)
959{
960 int i = 0;
961 struct amdgpu_clock_voltage_dependency_table *table =
962 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
963
964 switch (msg) {
965 case PPSMC_MSG_SetSclkSoftMin:
966 case PPSMC_MSG_SetSclkHardMin:
967 for (i = 0; i < table->count; i++)
968 if (clock <= table->entries[i].clk)
969 break;
970 if (i == table->count)
971 i = table->count - 1;
972 break;
973 case PPSMC_MSG_SetSclkSoftMax:
974 case PPSMC_MSG_SetSclkHardMax:
975 for (i = table->count - 1; i >= 0; i--)
976 if (clock >= table->entries[i].clk)
977 break;
978 if (i < 0)
979 i = 0;
980 break;
981 default:
982 break;
983 }
984
985 return i;
986}
987
988static int cz_program_bootup_state(struct amdgpu_device *adev)
989{
990 struct cz_power_info *pi = cz_get_pi(adev);
991 uint32_t soft_min_clk = 0;
992 uint32_t soft_max_clk = 0;
993 int ret = 0;
994
995 pi->sclk_dpm.soft_min_clk = pi->sys_info.bootup_sclk;
996 pi->sclk_dpm.soft_max_clk = pi->sys_info.bootup_sclk;
997
998 soft_min_clk = cz_get_sclk_level(adev,
999 pi->sclk_dpm.soft_min_clk,
1000 PPSMC_MSG_SetSclkSoftMin);
1001 soft_max_clk = cz_get_sclk_level(adev,
1002 pi->sclk_dpm.soft_max_clk,
1003 PPSMC_MSG_SetSclkSoftMax);
1004
1005 ret = cz_send_msg_to_smc_with_parameter(adev,
1006 PPSMC_MSG_SetSclkSoftMin, soft_min_clk);
1007 if (ret)
1008 return -EINVAL;
1009
1010 ret = cz_send_msg_to_smc_with_parameter(adev,
1011 PPSMC_MSG_SetSclkSoftMax, soft_max_clk);
1012 if (ret)
1013 return -EINVAL;
1014
1015 return 0;
1016}
1017
1018/* TODO */
1019static int cz_disable_cgpg(struct amdgpu_device *adev)
1020{
1021 return 0;
1022}
1023
1024/* TODO */
1025static int cz_enable_cgpg(struct amdgpu_device *adev)
1026{
1027 return 0;
1028}
1029
1030/* TODO */
1031static int cz_program_pt_config_registers(struct amdgpu_device *adev)
1032{
1033 return 0;
1034}
1035
1036static void cz_do_enable_didt(struct amdgpu_device *adev, bool enable)
1037{
1038 struct cz_power_info *pi = cz_get_pi(adev);
1039 uint32_t reg = 0;
1040
1041 if (pi->caps_sq_ramping) {
1042 reg = RREG32_DIDT(ixDIDT_SQ_CTRL0);
1043 if (enable)
1044 reg = REG_SET_FIELD(reg, DIDT_SQ_CTRL0, DIDT_CTRL_EN, 1);
1045 else
1046 reg = REG_SET_FIELD(reg, DIDT_SQ_CTRL0, DIDT_CTRL_EN, 0);
1047 WREG32_DIDT(ixDIDT_SQ_CTRL0, reg);
1048 }
1049 if (pi->caps_db_ramping) {
1050 reg = RREG32_DIDT(ixDIDT_DB_CTRL0);
1051 if (enable)
1052 reg = REG_SET_FIELD(reg, DIDT_DB_CTRL0, DIDT_CTRL_EN, 1);
1053 else
1054 reg = REG_SET_FIELD(reg, DIDT_DB_CTRL0, DIDT_CTRL_EN, 0);
1055 WREG32_DIDT(ixDIDT_DB_CTRL0, reg);
1056 }
1057 if (pi->caps_td_ramping) {
1058 reg = RREG32_DIDT(ixDIDT_TD_CTRL0);
1059 if (enable)
1060 reg = REG_SET_FIELD(reg, DIDT_TD_CTRL0, DIDT_CTRL_EN, 1);
1061 else
1062 reg = REG_SET_FIELD(reg, DIDT_TD_CTRL0, DIDT_CTRL_EN, 0);
1063 WREG32_DIDT(ixDIDT_TD_CTRL0, reg);
1064 }
1065 if (pi->caps_tcp_ramping) {
1066 reg = RREG32_DIDT(ixDIDT_TCP_CTRL0);
1067 if (enable)
1068 reg = REG_SET_FIELD(reg, DIDT_SQ_CTRL0, DIDT_CTRL_EN, 1);
1069 else
1070 reg = REG_SET_FIELD(reg, DIDT_SQ_CTRL0, DIDT_CTRL_EN, 0);
1071 WREG32_DIDT(ixDIDT_TCP_CTRL0, reg);
1072 }
1073
1074}
1075
1076static int cz_enable_didt(struct amdgpu_device *adev, bool enable)
1077{
1078 struct cz_power_info *pi = cz_get_pi(adev);
1079 int ret;
1080
1081 if (pi->caps_sq_ramping || pi->caps_db_ramping ||
1082 pi->caps_td_ramping || pi->caps_tcp_ramping) {
1083 if (adev->gfx.gfx_current_status != AMDGPU_GFX_SAFE_MODE) {
1084 ret = cz_disable_cgpg(adev);
1085 if (ret) {
1086 DRM_ERROR("Pre Di/Dt disable cg/pg failed\n");
1087 return -EINVAL;
1088 }
1089 adev->gfx.gfx_current_status = AMDGPU_GFX_SAFE_MODE;
1090 }
1091
1092 ret = cz_program_pt_config_registers(adev);
1093 if (ret) {
1094 DRM_ERROR("Di/Dt config failed\n");
1095 return -EINVAL;
1096 }
1097 cz_do_enable_didt(adev, enable);
1098
1099 if (adev->gfx.gfx_current_status == AMDGPU_GFX_SAFE_MODE) {
1100 ret = cz_enable_cgpg(adev);
1101 if (ret) {
1102 DRM_ERROR("Post Di/Dt enable cg/pg failed\n");
1103 return -EINVAL;
1104 }
1105 adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE;
1106 }
1107 }
1108
1109 return 0;
1110}
1111
1112/* TODO */
1113static void cz_reset_acp_boot_level(struct amdgpu_device *adev)
1114{
1115}
1116
1117static void cz_update_current_ps(struct amdgpu_device *adev,
1118 struct amdgpu_ps *rps)
1119{
1120 struct cz_power_info *pi = cz_get_pi(adev);
1121 struct cz_ps *ps = cz_get_ps(rps);
1122
1123 pi->current_ps = *ps;
1124 pi->current_rps = *rps;
1125 pi->current_rps.ps_priv = ps;
1126
1127}
1128
1129static void cz_update_requested_ps(struct amdgpu_device *adev,
1130 struct amdgpu_ps *rps)
1131{
1132 struct cz_power_info *pi = cz_get_pi(adev);
1133 struct cz_ps *ps = cz_get_ps(rps);
1134
1135 pi->requested_ps = *ps;
1136 pi->requested_rps = *rps;
1137 pi->requested_rps.ps_priv = ps;
1138
1139}
1140
1141/* PP arbiter support needed TODO */
1142static void cz_apply_state_adjust_rules(struct amdgpu_device *adev,
1143 struct amdgpu_ps *new_rps,
1144 struct amdgpu_ps *old_rps)
1145{
1146 struct cz_ps *ps = cz_get_ps(new_rps);
1147 struct cz_power_info *pi = cz_get_pi(adev);
1148 struct amdgpu_clock_and_voltage_limits *limits =
1149 &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
1150 /* 10kHz memory clock */
1151 uint32_t mclk = 0;
1152
1153 ps->force_high = false;
1154 ps->need_dfs_bypass = true;
1155 pi->video_start = new_rps->dclk || new_rps->vclk ||
1156 new_rps->evclk || new_rps->ecclk;
1157
1158 if ((new_rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) ==
1159 ATOM_PPLIB_CLASSIFICATION_UI_BATTERY)
1160 pi->battery_state = true;
1161 else
1162 pi->battery_state = false;
1163
1164 if (pi->caps_stable_power_state)
1165 mclk = limits->mclk;
1166
1167 if (mclk > pi->sys_info.nbp_memory_clock[CZ_NUM_NBPMEMORY_CLOCK - 1])
1168 ps->force_high = true;
1169
1170}
1171
1172static int cz_dpm_enable(struct amdgpu_device *adev)
1173{
1174 int ret = 0;
1175
1176 /* renable will hang up SMU, so check first */
1177 if (cz_check_for_dpm_enabled(adev))
1178 return -EINVAL;
1179
1180 cz_program_voting_clients(adev);
1181
1182 ret = cz_start_dpm(adev);
1183 if (ret) {
1184 DRM_ERROR("Carrizo DPM enable failed\n");
1185 return -EINVAL;
1186 }
1187
1188 ret = cz_program_bootup_state(adev);
1189 if (ret) {
1190 DRM_ERROR("Carrizo bootup state program failed\n");
1191 return -EINVAL;
1192 }
1193
1194 ret = cz_enable_didt(adev, true);
1195 if (ret) {
1196 DRM_ERROR("Carrizo enable di/dt failed\n");
1197 return -EINVAL;
1198 }
1199
1200 cz_reset_acp_boot_level(adev);
1201
1202 cz_update_current_ps(adev, adev->pm.dpm.boot_ps);
1203
1204 return 0;
1205}
1206
1207static int cz_dpm_hw_init(struct amdgpu_device *adev)
1208{
46651cc5 1209 int ret = 0;
aaa36a97
AD
1210
1211 mutex_lock(&adev->pm.mutex);
1212
1213 /* init smc in dpm hw init */
1214 ret = cz_smu_init(adev);
1215 if (ret) {
1216 DRM_ERROR("amdgpu: smc initialization failed\n");
1217 mutex_unlock(&adev->pm.mutex);
1218 return ret;
1219 }
1220
1221 /* do the actual fw loading */
1222 ret = cz_smu_start(adev);
1223 if (ret) {
1224 DRM_ERROR("amdgpu: smc start failed\n");
1225 mutex_unlock(&adev->pm.mutex);
1226 return ret;
1227 }
1228
46651cc5
SJ
1229 if (!amdgpu_dpm) {
1230 adev->pm.dpm_enabled = false;
1231 mutex_unlock(&adev->pm.mutex);
1232 return ret;
1233 }
1234
aaa36a97
AD
1235 /* cz dpm setup asic */
1236 cz_dpm_setup_asic(adev);
1237
1238 /* cz dpm enable */
1239 ret = cz_dpm_enable(adev);
1240 if (ret)
1241 adev->pm.dpm_enabled = false;
1242 else
1243 adev->pm.dpm_enabled = true;
1244
1245 mutex_unlock(&adev->pm.mutex);
1246
1247 return 0;
1248}
1249
1250static int cz_dpm_disable(struct amdgpu_device *adev)
1251{
1252 int ret = 0;
1253
1254 if (!cz_check_for_dpm_enabled(adev))
1255 return -EINVAL;
1256
1257 ret = cz_enable_didt(adev, false);
1258 if (ret) {
1259 DRM_ERROR("Carrizo disable di/dt failed\n");
1260 return -EINVAL;
1261 }
1262
1263 cz_clear_voting_clients(adev);
1264 cz_stop_dpm(adev);
1265 cz_update_current_ps(adev, adev->pm.dpm.boot_ps);
1266
1267 return 0;
1268}
1269
1270static int cz_dpm_hw_fini(struct amdgpu_device *adev)
1271{
1272 int ret = 0;
1273
1274 mutex_lock(&adev->pm.mutex);
1275
1276 cz_smu_fini(adev);
1277
1278 if (adev->pm.dpm_enabled) {
1279 ret = cz_dpm_disable(adev);
aaa36a97
AD
1280
1281 adev->pm.dpm.current_ps =
1282 adev->pm.dpm.requested_ps =
1283 adev->pm.dpm.boot_ps;
1284 }
1285
1286 adev->pm.dpm_enabled = false;
1287
1288 mutex_unlock(&adev->pm.mutex);
1289
10457457 1290 return ret;
aaa36a97
AD
1291}
1292
1293static int cz_dpm_suspend(struct amdgpu_device *adev)
1294{
1295 int ret = 0;
1296
1297 if (adev->pm.dpm_enabled) {
1298 mutex_lock(&adev->pm.mutex);
1299
1300 ret = cz_dpm_disable(adev);
aaa36a97
AD
1301
1302 adev->pm.dpm.current_ps =
1303 adev->pm.dpm.requested_ps =
1304 adev->pm.dpm.boot_ps;
1305
1306 mutex_unlock(&adev->pm.mutex);
1307 }
1308
10457457 1309 return ret;
aaa36a97
AD
1310}
1311
1312static int cz_dpm_resume(struct amdgpu_device *adev)
1313{
1314 int ret = 0;
1315
1316 mutex_lock(&adev->pm.mutex);
1317 ret = cz_smu_init(adev);
1318 if (ret) {
1319 DRM_ERROR("amdgpu: smc resume failed\n");
1320 mutex_unlock(&adev->pm.mutex);
1321 return ret;
1322 }
1323
1324 /* do the actual fw loading */
1325 ret = cz_smu_start(adev);
1326 if (ret) {
1327 DRM_ERROR("amdgpu: smc start failed\n");
1328 mutex_unlock(&adev->pm.mutex);
1329 return ret;
1330 }
1331
46651cc5
SJ
1332 if (!amdgpu_dpm) {
1333 adev->pm.dpm_enabled = false;
1334 mutex_unlock(&adev->pm.mutex);
1335 return ret;
1336 }
1337
aaa36a97
AD
1338 /* cz dpm setup asic */
1339 cz_dpm_setup_asic(adev);
1340
1341 /* cz dpm enable */
1342 ret = cz_dpm_enable(adev);
1343 if (ret)
1344 adev->pm.dpm_enabled = false;
1345 else
1346 adev->pm.dpm_enabled = true;
1347
1348 mutex_unlock(&adev->pm.mutex);
1349 /* upon resume, re-compute the clocks */
1350 if (adev->pm.dpm_enabled)
1351 amdgpu_pm_compute_clocks(adev);
1352
1353 return 0;
1354}
1355
1356static int cz_dpm_set_clockgating_state(struct amdgpu_device *adev,
1357 enum amdgpu_clockgating_state state)
1358{
1359 return 0;
1360}
1361
1362static int cz_dpm_set_powergating_state(struct amdgpu_device *adev,
1363 enum amdgpu_powergating_state state)
1364{
1365 return 0;
1366}
1367
1368/* borrowed from KV, need future unify */
1369static int cz_dpm_get_temperature(struct amdgpu_device *adev)
1370{
1371 int actual_temp = 0;
1372 uint32_t temp = RREG32_SMC(0xC0300E0C);
1373
1374 if (temp)
1375 actual_temp = 1000 * ((temp / 8) - 49);
1376
1377 return actual_temp;
1378}
1379
1380static int cz_dpm_pre_set_power_state(struct amdgpu_device *adev)
1381{
1382 struct cz_power_info *pi = cz_get_pi(adev);
1383 struct amdgpu_ps requested_ps = *adev->pm.dpm.requested_ps;
1384 struct amdgpu_ps *new_ps = &requested_ps;
1385
1386 cz_update_requested_ps(adev, new_ps);
1387 cz_apply_state_adjust_rules(adev, &pi->requested_rps,
1388 &pi->current_rps);
1389
1390 return 0;
1391}
1392
1393static int cz_dpm_update_sclk_limit(struct amdgpu_device *adev)
1394{
1395 struct cz_power_info *pi = cz_get_pi(adev);
1396 struct amdgpu_clock_and_voltage_limits *limits =
1397 &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
1398 uint32_t clock, stable_ps_clock = 0;
1399
1400 clock = pi->sclk_dpm.soft_min_clk;
1401
1402 if (pi->caps_stable_power_state) {
1403 stable_ps_clock = limits->sclk * 75 / 100;
1404 if (clock < stable_ps_clock)
1405 clock = stable_ps_clock;
1406 }
1407
1408 if (clock != pi->sclk_dpm.soft_min_clk) {
1409 pi->sclk_dpm.soft_min_clk = clock;
1410 cz_send_msg_to_smc_with_parameter(adev,
1411 PPSMC_MSG_SetSclkSoftMin,
1412 cz_get_sclk_level(adev, clock,
1413 PPSMC_MSG_SetSclkSoftMin));
1414 }
1415
1416 if (pi->caps_stable_power_state &&
1417 pi->sclk_dpm.soft_max_clk != clock) {
1418 pi->sclk_dpm.soft_max_clk = clock;
1419 cz_send_msg_to_smc_with_parameter(adev,
1420 PPSMC_MSG_SetSclkSoftMax,
1421 cz_get_sclk_level(adev, clock,
1422 PPSMC_MSG_SetSclkSoftMax));
1423 } else {
1424 cz_send_msg_to_smc_with_parameter(adev,
1425 PPSMC_MSG_SetSclkSoftMax,
1426 cz_get_sclk_level(adev,
1427 pi->sclk_dpm.soft_max_clk,
1428 PPSMC_MSG_SetSclkSoftMax));
1429 }
1430
1431 return 0;
1432}
1433
1434static int cz_dpm_set_deep_sleep_sclk_threshold(struct amdgpu_device *adev)
1435{
1436 int ret = 0;
1437 struct cz_power_info *pi = cz_get_pi(adev);
1438
1439 if (pi->caps_sclk_ds) {
1440 cz_send_msg_to_smc_with_parameter(adev,
1441 PPSMC_MSG_SetMinDeepSleepSclk,
1442 CZ_MIN_DEEP_SLEEP_SCLK);
1443 }
1444
1445 return ret;
1446}
1447
1448/* ?? without dal support, is this still needed in setpowerstate list*/
1449static int cz_dpm_set_watermark_threshold(struct amdgpu_device *adev)
1450{
1451 int ret = 0;
1452 struct cz_power_info *pi = cz_get_pi(adev);
1453
1454 cz_send_msg_to_smc_with_parameter(adev,
1455 PPSMC_MSG_SetWatermarkFrequency,
1456 pi->sclk_dpm.soft_max_clk);
1457
1458 return ret;
1459}
1460
1461static int cz_dpm_enable_nbdpm(struct amdgpu_device *adev)
1462{
1463 int ret = 0;
1464 struct cz_power_info *pi = cz_get_pi(adev);
1465
1466 /* also depend on dal NBPStateDisableRequired */
1467 if (pi->nb_dpm_enabled_by_driver && !pi->nb_dpm_enabled) {
1468 ret = cz_send_msg_to_smc_with_parameter(adev,
1469 PPSMC_MSG_EnableAllSmuFeatures,
1470 NB_DPM_MASK);
1471 if (ret) {
1472 DRM_ERROR("amdgpu: nb dpm enable failed\n");
1473 return ret;
1474 }
1475 pi->nb_dpm_enabled = true;
1476 }
1477
1478 return ret;
1479}
1480
1481static void cz_dpm_nbdpm_lm_pstate_enable(struct amdgpu_device *adev,
1482 bool enable)
1483{
1484 if (enable)
1485 cz_send_msg_to_smc(adev, PPSMC_MSG_EnableLowMemoryPstate);
1486 else
1487 cz_send_msg_to_smc(adev, PPSMC_MSG_DisableLowMemoryPstate);
1488
1489}
1490
1491static int cz_dpm_update_low_memory_pstate(struct amdgpu_device *adev)
1492{
1493 int ret = 0;
1494 struct cz_power_info *pi = cz_get_pi(adev);
1495 struct cz_ps *ps = &pi->requested_ps;
1496
1497 if (pi->sys_info.nb_dpm_enable) {
1498 if (ps->force_high)
1499 cz_dpm_nbdpm_lm_pstate_enable(adev, true);
1500 else
1501 cz_dpm_nbdpm_lm_pstate_enable(adev, false);
1502 }
1503
1504 return ret;
1505}
1506
1507/* with dpm enabled */
1508static int cz_dpm_set_power_state(struct amdgpu_device *adev)
1509{
1510 int ret = 0;
1511
1512 cz_dpm_update_sclk_limit(adev);
1513 cz_dpm_set_deep_sleep_sclk_threshold(adev);
1514 cz_dpm_set_watermark_threshold(adev);
1515 cz_dpm_enable_nbdpm(adev);
1516 cz_dpm_update_low_memory_pstate(adev);
1517
1518 return ret;
1519}
1520
1521static void cz_dpm_post_set_power_state(struct amdgpu_device *adev)
1522{
1523 struct cz_power_info *pi = cz_get_pi(adev);
1524 struct amdgpu_ps *ps = &pi->requested_rps;
1525
1526 cz_update_current_ps(adev, ps);
1527
1528}
1529
1530static int cz_dpm_force_highest(struct amdgpu_device *adev)
1531{
1532 struct cz_power_info *pi = cz_get_pi(adev);
1533 int ret = 0;
1534
1535 if (pi->sclk_dpm.soft_min_clk != pi->sclk_dpm.soft_max_clk) {
1536 pi->sclk_dpm.soft_min_clk =
1537 pi->sclk_dpm.soft_max_clk;
1538 ret = cz_send_msg_to_smc_with_parameter(adev,
1539 PPSMC_MSG_SetSclkSoftMin,
1540 cz_get_sclk_level(adev,
1541 pi->sclk_dpm.soft_min_clk,
1542 PPSMC_MSG_SetSclkSoftMin));
1543 if (ret)
1544 return ret;
1545 }
1546
1547 return ret;
1548}
1549
1550static int cz_dpm_force_lowest(struct amdgpu_device *adev)
1551{
1552 struct cz_power_info *pi = cz_get_pi(adev);
1553 int ret = 0;
1554
1555 if (pi->sclk_dpm.soft_max_clk != pi->sclk_dpm.soft_min_clk) {
1556 pi->sclk_dpm.soft_max_clk = pi->sclk_dpm.soft_min_clk;
1557 ret = cz_send_msg_to_smc_with_parameter(adev,
1558 PPSMC_MSG_SetSclkSoftMax,
1559 cz_get_sclk_level(adev,
1560 pi->sclk_dpm.soft_max_clk,
1561 PPSMC_MSG_SetSclkSoftMax));
1562 if (ret)
1563 return ret;
1564 }
1565
1566 return ret;
1567}
1568
1569static uint32_t cz_dpm_get_max_sclk_level(struct amdgpu_device *adev)
1570{
1571 struct cz_power_info *pi = cz_get_pi(adev);
1572
1573 if (!pi->max_sclk_level) {
1574 cz_send_msg_to_smc(adev, PPSMC_MSG_GetMaxSclkLevel);
1575 pi->max_sclk_level = cz_get_argument(adev) + 1;
1576 }
1577
1578 if (pi->max_sclk_level > CZ_MAX_HARDWARE_POWERLEVELS) {
1579 DRM_ERROR("Invalid max sclk level!\n");
1580 return -EINVAL;
1581 }
1582
1583 return pi->max_sclk_level;
1584}
1585
1586static int cz_dpm_unforce_dpm_levels(struct amdgpu_device *adev)
1587{
1588 struct cz_power_info *pi = cz_get_pi(adev);
1589 struct amdgpu_clock_voltage_dependency_table *dep_table =
1590 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
1591 uint32_t level = 0;
1592 int ret = 0;
1593
1594 pi->sclk_dpm.soft_min_clk = dep_table->entries[0].clk;
1595 level = cz_dpm_get_max_sclk_level(adev) - 1;
1596 if (level < dep_table->count)
1597 pi->sclk_dpm.soft_max_clk = dep_table->entries[level].clk;
1598 else
1599 pi->sclk_dpm.soft_max_clk =
1600 dep_table->entries[dep_table->count - 1].clk;
1601
1602 /* get min/max sclk soft value
1603 * notify SMU to execute */
1604 ret = cz_send_msg_to_smc_with_parameter(adev,
1605 PPSMC_MSG_SetSclkSoftMin,
1606 cz_get_sclk_level(adev,
1607 pi->sclk_dpm.soft_min_clk,
1608 PPSMC_MSG_SetSclkSoftMin));
1609 if (ret)
1610 return ret;
1611
1612 ret = cz_send_msg_to_smc_with_parameter(adev,
1613 PPSMC_MSG_SetSclkSoftMax,
1614 cz_get_sclk_level(adev,
1615 pi->sclk_dpm.soft_max_clk,
1616 PPSMC_MSG_SetSclkSoftMax));
1617 if (ret)
1618 return ret;
1619
1620 DRM_INFO("DPM unforce state min=%d, max=%d.\n",
1621 pi->sclk_dpm.soft_min_clk,
1622 pi->sclk_dpm.soft_max_clk);
1623
1624 return 0;
1625}
1626
1627static int cz_dpm_force_dpm_level(struct amdgpu_device *adev,
1628 enum amdgpu_dpm_forced_level level)
1629{
1630 int ret = 0;
1631
1632 switch (level) {
1633 case AMDGPU_DPM_FORCED_LEVEL_HIGH:
1634 ret = cz_dpm_force_highest(adev);
1635 if (ret)
1636 return ret;
1637 break;
1638 case AMDGPU_DPM_FORCED_LEVEL_LOW:
1639 ret = cz_dpm_force_lowest(adev);
1640 if (ret)
1641 return ret;
1642 break;
1643 case AMDGPU_DPM_FORCED_LEVEL_AUTO:
1644 ret = cz_dpm_unforce_dpm_levels(adev);
1645 if (ret)
1646 return ret;
1647 break;
1648 default:
1649 break;
1650 }
1651
1652 return ret;
1653}
1654
1655/* fix me, display configuration change lists here
1656 * mostly dal related*/
1657static void cz_dpm_display_configuration_changed(struct amdgpu_device *adev)
1658{
1659}
1660
1661static uint32_t cz_dpm_get_sclk(struct amdgpu_device *adev, bool low)
1662{
1663 struct cz_power_info *pi = cz_get_pi(adev);
1664 struct cz_ps *requested_state = cz_get_ps(&pi->requested_rps);
1665
1666 if (low)
1667 return requested_state->levels[0].sclk;
1668 else
1669 return requested_state->levels[requested_state->num_levels - 1].sclk;
1670
1671}
1672
1673static uint32_t cz_dpm_get_mclk(struct amdgpu_device *adev, bool low)
1674{
1675 struct cz_power_info *pi = cz_get_pi(adev);
1676
1677 return pi->sys_info.bootup_uma_clk;
1678}
1679
1680const struct amdgpu_ip_funcs cz_dpm_ip_funcs = {
1681 .early_init = cz_dpm_early_init,
1682 .late_init = NULL,
1683 .sw_init = cz_dpm_sw_init,
1684 .sw_fini = cz_dpm_sw_fini,
1685 .hw_init = cz_dpm_hw_init,
1686 .hw_fini = cz_dpm_hw_fini,
1687 .suspend = cz_dpm_suspend,
1688 .resume = cz_dpm_resume,
1689 .is_idle = NULL,
1690 .wait_for_idle = NULL,
1691 .soft_reset = NULL,
1692 .print_status = NULL,
1693 .set_clockgating_state = cz_dpm_set_clockgating_state,
1694 .set_powergating_state = cz_dpm_set_powergating_state,
1695};
1696
1697static const struct amdgpu_dpm_funcs cz_dpm_funcs = {
1698 .get_temperature = cz_dpm_get_temperature,
1699 .pre_set_power_state = cz_dpm_pre_set_power_state,
1700 .set_power_state = cz_dpm_set_power_state,
1701 .post_set_power_state = cz_dpm_post_set_power_state,
1702 .display_configuration_changed = cz_dpm_display_configuration_changed,
1703 .get_sclk = cz_dpm_get_sclk,
1704 .get_mclk = cz_dpm_get_mclk,
1705 .print_power_state = cz_dpm_print_power_state,
1706 .debugfs_print_current_performance_level =
1707 cz_dpm_debugfs_print_current_performance_level,
1708 .force_performance_level = cz_dpm_force_dpm_level,
1709 .vblank_too_short = NULL,
1710 .powergate_uvd = NULL,
1711};
1712
1713static void cz_dpm_set_funcs(struct amdgpu_device *adev)
1714{
1715 if (NULL == adev->pm.funcs)
1716 adev->pm.funcs = &cz_dpm_funcs;
1717}