]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/gpu/drm/amd/amdgpu/cz_dpm.c
drm/amdgpu: let bo_list handler start from 1
[mirror_ubuntu-jammy-kernel.git] / drivers / gpu / drm / amd / amdgpu / cz_dpm.c
CommitLineData
aaa36a97
AD
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include <linux/firmware.h>
25#include <linux/seq_file.h>
26#include "drmP.h"
27#include "amdgpu.h"
28#include "amdgpu_pm.h"
29#include "amdgpu_atombios.h"
30#include "vid.h"
31#include "vi_dpm.h"
32#include "amdgpu_dpm.h"
33#include "cz_dpm.h"
34#include "cz_ppsmc.h"
35#include "atom.h"
36
37#include "smu/smu_8_0_d.h"
38#include "smu/smu_8_0_sh_mask.h"
39#include "gca/gfx_8_0_d.h"
40#include "gca/gfx_8_0_sh_mask.h"
41#include "gmc/gmc_8_1_d.h"
42#include "bif/bif_5_1_d.h"
43#include "gfx_v8_0.h"
44
45static struct cz_ps *cz_get_ps(struct amdgpu_ps *rps)
46{
47 struct cz_ps *ps = rps->ps_priv;
48
49 return ps;
50}
51
52static struct cz_power_info *cz_get_pi(struct amdgpu_device *adev)
53{
54 struct cz_power_info *pi = adev->pm.dpm.priv;
55
56 return pi;
57}
58
59static uint16_t cz_convert_8bit_index_to_voltage(struct amdgpu_device *adev,
60 uint16_t voltage)
61{
62 uint16_t tmp = 6200 - voltage * 25;
63
64 return tmp;
65}
66
67static void cz_construct_max_power_limits_table(struct amdgpu_device *adev,
68 struct amdgpu_clock_and_voltage_limits *table)
69{
70 struct cz_power_info *pi = cz_get_pi(adev);
71 struct amdgpu_clock_voltage_dependency_table *dep_table =
72 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
73
74 if (dep_table->count > 0) {
75 table->sclk = dep_table->entries[dep_table->count - 1].clk;
76 table->vddc = cz_convert_8bit_index_to_voltage(adev,
77 dep_table->entries[dep_table->count - 1].v);
78 }
79
80 table->mclk = pi->sys_info.nbp_memory_clock[0];
81
82}
83
84union igp_info {
85 struct _ATOM_INTEGRATED_SYSTEM_INFO info;
86 struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7;
87 struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_8 info_8;
88 struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_9 info_9;
89};
90
91static int cz_parse_sys_info_table(struct amdgpu_device *adev)
92{
93 struct cz_power_info *pi = cz_get_pi(adev);
94 struct amdgpu_mode_info *mode_info = &adev->mode_info;
95 int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo);
96 union igp_info *igp_info;
97 u8 frev, crev;
98 u16 data_offset;
99 int i = 0;
100
101 if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
102 &frev, &crev, &data_offset)) {
103 igp_info = (union igp_info *)(mode_info->atom_context->bios +
104 data_offset);
105
106 if (crev != 9) {
107 DRM_ERROR("Unsupported IGP table: %d %d\n", frev, crev);
108 return -EINVAL;
109 }
110 pi->sys_info.bootup_sclk =
111 le32_to_cpu(igp_info->info_9.ulBootUpEngineClock);
112 pi->sys_info.bootup_uma_clk =
113 le32_to_cpu(igp_info->info_9.ulBootUpUMAClock);
114 pi->sys_info.dentist_vco_freq =
115 le32_to_cpu(igp_info->info_9.ulDentistVCOFreq);
116 pi->sys_info.bootup_nb_voltage_index =
117 le16_to_cpu(igp_info->info_9.usBootUpNBVoltage);
118
119 if (igp_info->info_9.ucHtcTmpLmt == 0)
120 pi->sys_info.htc_tmp_lmt = 203;
121 else
122 pi->sys_info.htc_tmp_lmt = igp_info->info_9.ucHtcTmpLmt;
123
124 if (igp_info->info_9.ucHtcHystLmt == 0)
125 pi->sys_info.htc_hyst_lmt = 5;
126 else
127 pi->sys_info.htc_hyst_lmt = igp_info->info_9.ucHtcHystLmt;
128
129 if (pi->sys_info.htc_tmp_lmt <= pi->sys_info.htc_hyst_lmt) {
130 DRM_ERROR("The htcTmpLmt should be larger than htcHystLmt.\n");
131 return -EINVAL;
132 }
133
134 if (le32_to_cpu(igp_info->info_9.ulSystemConfig) & (1 << 3) &&
135 pi->enable_nb_ps_policy)
136 pi->sys_info.nb_dpm_enable = true;
137 else
138 pi->sys_info.nb_dpm_enable = false;
139
140 for (i = 0; i < CZ_NUM_NBPSTATES; i++) {
141 if (i < CZ_NUM_NBPMEMORY_CLOCK)
142 pi->sys_info.nbp_memory_clock[i] =
143 le32_to_cpu(igp_info->info_9.ulNbpStateMemclkFreq[i]);
144 pi->sys_info.nbp_n_clock[i] =
145 le32_to_cpu(igp_info->info_9.ulNbpStateNClkFreq[i]);
146 }
147
148 for (i = 0; i < CZ_MAX_DISPLAY_CLOCK_LEVEL; i++)
149 pi->sys_info.display_clock[i] =
150 le32_to_cpu(igp_info->info_9.sDispClkVoltageMapping[i].ulMaximumSupportedCLK);
151
152 for (i = 0; i < CZ_NUM_NBPSTATES; i++)
153 pi->sys_info.nbp_voltage_index[i] =
154 le32_to_cpu(igp_info->info_9.usNBPStateVoltage[i]);
155
156 if (le32_to_cpu(igp_info->info_9.ulGPUCapInfo) &
157 SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS)
158 pi->caps_enable_dfs_bypass = true;
159
160 pi->sys_info.uma_channel_number =
161 igp_info->info_9.ucUMAChannelNumber;
162
163 cz_construct_max_power_limits_table(adev,
164 &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac);
165 }
166
167 return 0;
168}
169
170static void cz_patch_voltage_values(struct amdgpu_device *adev)
171{
172 int i;
173 struct amdgpu_uvd_clock_voltage_dependency_table *uvd_table =
174 &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table;
175 struct amdgpu_vce_clock_voltage_dependency_table *vce_table =
176 &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
177 struct amdgpu_clock_voltage_dependency_table *acp_table =
178 &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table;
179
180 if (uvd_table->count) {
181 for (i = 0; i < uvd_table->count; i++)
182 uvd_table->entries[i].v =
183 cz_convert_8bit_index_to_voltage(adev,
184 uvd_table->entries[i].v);
185 }
186
187 if (vce_table->count) {
188 for (i = 0; i < vce_table->count; i++)
189 vce_table->entries[i].v =
190 cz_convert_8bit_index_to_voltage(adev,
191 vce_table->entries[i].v);
192 }
193
194 if (acp_table->count) {
195 for (i = 0; i < acp_table->count; i++)
196 acp_table->entries[i].v =
197 cz_convert_8bit_index_to_voltage(adev,
198 acp_table->entries[i].v);
199 }
200
201}
202
203static void cz_construct_boot_state(struct amdgpu_device *adev)
204{
205 struct cz_power_info *pi = cz_get_pi(adev);
206
207 pi->boot_pl.sclk = pi->sys_info.bootup_sclk;
208 pi->boot_pl.vddc_index = pi->sys_info.bootup_nb_voltage_index;
209 pi->boot_pl.ds_divider_index = 0;
210 pi->boot_pl.ss_divider_index = 0;
211 pi->boot_pl.allow_gnb_slow = 1;
212 pi->boot_pl.force_nbp_state = 0;
213 pi->boot_pl.display_wm = 0;
214 pi->boot_pl.vce_wm = 0;
215
216}
217
218static void cz_patch_boot_state(struct amdgpu_device *adev,
219 struct cz_ps *ps)
220{
221 struct cz_power_info *pi = cz_get_pi(adev);
222
223 ps->num_levels = 1;
224 ps->levels[0] = pi->boot_pl;
225}
226
227union pplib_clock_info {
228 struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
229 struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
230 struct _ATOM_PPLIB_CZ_CLOCK_INFO carrizo;
231};
232
233static void cz_parse_pplib_clock_info(struct amdgpu_device *adev,
234 struct amdgpu_ps *rps, int index,
235 union pplib_clock_info *clock_info)
236{
237 struct cz_power_info *pi = cz_get_pi(adev);
238 struct cz_ps *ps = cz_get_ps(rps);
239 struct cz_pl *pl = &ps->levels[index];
240 struct amdgpu_clock_voltage_dependency_table *table =
241 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
242
243 pl->sclk = table->entries[clock_info->carrizo.index].clk;
244 pl->vddc_index = table->entries[clock_info->carrizo.index].v;
245
246 ps->num_levels = index + 1;
247
248 if (pi->caps_sclk_ds) {
249 pl->ds_divider_index = 5;
250 pl->ss_divider_index = 5;
251 }
252
253}
254
255static void cz_parse_pplib_non_clock_info(struct amdgpu_device *adev,
256 struct amdgpu_ps *rps,
257 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info,
258 u8 table_rev)
259{
260 struct cz_ps *ps = cz_get_ps(rps);
261
262 rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings);
263 rps->class = le16_to_cpu(non_clock_info->usClassification);
264 rps->class2 = le16_to_cpu(non_clock_info->usClassification2);
265
266 if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
267 rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
268 rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
269 } else {
270 rps->vclk = 0;
271 rps->dclk = 0;
272 }
273
274 if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) {
275 adev->pm.dpm.boot_ps = rps;
276 cz_patch_boot_state(adev, ps);
277 }
278 if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
279 adev->pm.dpm.uvd_ps = rps;
280
281}
282
283union power_info {
284 struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
285 struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
286 struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
287 struct _ATOM_PPLIB_POWERPLAYTABLE4 pplib4;
288 struct _ATOM_PPLIB_POWERPLAYTABLE5 pplib5;
289};
290
291union pplib_power_state {
292 struct _ATOM_PPLIB_STATE v1;
293 struct _ATOM_PPLIB_STATE_V2 v2;
294};
295
296static int cz_parse_power_table(struct amdgpu_device *adev)
297{
298 struct amdgpu_mode_info *mode_info = &adev->mode_info;
299 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
300 union pplib_power_state *power_state;
301 int i, j, k, non_clock_array_index, clock_array_index;
302 union pplib_clock_info *clock_info;
303 struct _StateArray *state_array;
304 struct _ClockInfoArray *clock_info_array;
305 struct _NonClockInfoArray *non_clock_info_array;
306 union power_info *power_info;
307 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
308 u16 data_offset;
309 u8 frev, crev;
310 u8 *power_state_offset;
311 struct cz_ps *ps;
312
313 if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
314 &frev, &crev, &data_offset))
315 return -EINVAL;
316 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
317
318 state_array = (struct _StateArray *)
319 (mode_info->atom_context->bios + data_offset +
320 le16_to_cpu(power_info->pplib.usStateArrayOffset));
321 clock_info_array = (struct _ClockInfoArray *)
322 (mode_info->atom_context->bios + data_offset +
323 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset));
324 non_clock_info_array = (struct _NonClockInfoArray *)
325 (mode_info->atom_context->bios + data_offset +
326 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
327
328 adev->pm.dpm.ps = kzalloc(sizeof(struct amdgpu_ps) *
329 state_array->ucNumEntries, GFP_KERNEL);
330
331 if (!adev->pm.dpm.ps)
332 return -ENOMEM;
333
334 power_state_offset = (u8 *)state_array->states;
335 adev->pm.dpm.platform_caps =
336 le32_to_cpu(power_info->pplib.ulPlatformCaps);
337 adev->pm.dpm.backbias_response_time =
338 le16_to_cpu(power_info->pplib.usBackbiasTime);
339 adev->pm.dpm.voltage_response_time =
340 le16_to_cpu(power_info->pplib.usVoltageTime);
341
342 for (i = 0; i < state_array->ucNumEntries; i++) {
343 power_state = (union pplib_power_state *)power_state_offset;
344 non_clock_array_index = power_state->v2.nonClockInfoIndex;
345 non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
346 &non_clock_info_array->nonClockInfo[non_clock_array_index];
347
348 ps = kzalloc(sizeof(struct cz_ps), GFP_KERNEL);
349 if (ps == NULL) {
350 kfree(adev->pm.dpm.ps);
351 return -ENOMEM;
352 }
353
354 adev->pm.dpm.ps[i].ps_priv = ps;
355 k = 0;
356 for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
357 clock_array_index = power_state->v2.clockInfoIndex[j];
358 if (clock_array_index >= clock_info_array->ucNumEntries)
359 continue;
360 if (k >= CZ_MAX_HARDWARE_POWERLEVELS)
361 break;
362 clock_info = (union pplib_clock_info *)
363 &clock_info_array->clockInfo[clock_array_index *
364 clock_info_array->ucEntrySize];
365 cz_parse_pplib_clock_info(adev, &adev->pm.dpm.ps[i],
366 k, clock_info);
367 k++;
368 }
369 cz_parse_pplib_non_clock_info(adev, &adev->pm.dpm.ps[i],
370 non_clock_info,
371 non_clock_info_array->ucEntrySize);
372 power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
373 }
374 adev->pm.dpm.num_ps = state_array->ucNumEntries;
375
376 return 0;
377}
378
379static int cz_process_firmware_header(struct amdgpu_device *adev)
380{
381 struct cz_power_info *pi = cz_get_pi(adev);
382 u32 tmp;
383 int ret;
384
385 ret = cz_read_smc_sram_dword(adev, SMU8_FIRMWARE_HEADER_LOCATION +
386 offsetof(struct SMU8_Firmware_Header,
387 DpmTable),
388 &tmp, pi->sram_end);
389
390 if (ret == 0)
391 pi->dpm_table_start = tmp;
392
393 return ret;
394}
395
396static int cz_dpm_init(struct amdgpu_device *adev)
397{
398 struct cz_power_info *pi;
399 int ret, i;
400
401 pi = kzalloc(sizeof(struct cz_power_info), GFP_KERNEL);
402 if (NULL == pi)
403 return -ENOMEM;
404
405 adev->pm.dpm.priv = pi;
406
407 ret = amdgpu_get_platform_caps(adev);
408 if (ret)
409 return ret;
410
411 ret = amdgpu_parse_extended_power_table(adev);
412 if (ret)
413 return ret;
414
415 pi->sram_end = SMC_RAM_END;
416
417 /* set up DPM defaults */
418 for (i = 0; i < CZ_MAX_HARDWARE_POWERLEVELS; i++)
419 pi->active_target[i] = CZ_AT_DFLT;
420
421 pi->mgcg_cgtt_local0 = 0x0;
422 pi->mgcg_cgtt_local1 = 0x0;
423 pi->clock_slow_down_step = 25000;
424 pi->skip_clock_slow_down = 1;
425 pi->enable_nb_ps_policy = 1;
426 pi->caps_power_containment = true;
427 pi->caps_cac = true;
428 pi->didt_enabled = false;
429 if (pi->didt_enabled) {
430 pi->caps_sq_ramping = true;
431 pi->caps_db_ramping = true;
432 pi->caps_td_ramping = true;
433 pi->caps_tcp_ramping = true;
434 }
435 pi->caps_sclk_ds = true;
436 pi->voting_clients = 0x00c00033;
437 pi->auto_thermal_throttling_enabled = true;
438 pi->bapm_enabled = false;
439 pi->disable_nb_ps3_in_battery = false;
440 pi->voltage_drop_threshold = 0;
441 pi->caps_sclk_throttle_low_notification = false;
442 pi->gfx_pg_threshold = 500;
443 pi->caps_fps = true;
444 /* uvd */
445 pi->caps_uvd_pg = (adev->pg_flags & AMDGPU_PG_SUPPORT_UVD) ? true : false;
446 pi->caps_uvd_dpm = true;
447 /* vce */
448 pi->caps_vce_pg = (adev->pg_flags & AMDGPU_PG_SUPPORT_VCE) ? true : false;
449 pi->caps_vce_dpm = true;
450 /* acp */
451 pi->caps_acp_pg = (adev->pg_flags & AMDGPU_PG_SUPPORT_ACP) ? true : false;
452 pi->caps_acp_dpm = true;
453
454 pi->caps_stable_power_state = false;
455 pi->nb_dpm_enabled_by_driver = true;
456 pi->nb_dpm_enabled = false;
457 pi->caps_voltage_island = false;
458 /* flags which indicate need to upload pptable */
459 pi->need_pptable_upload = true;
460
461 ret = cz_parse_sys_info_table(adev);
462 if (ret)
463 return ret;
464
465 cz_patch_voltage_values(adev);
466 cz_construct_boot_state(adev);
467
468 ret = cz_parse_power_table(adev);
469 if (ret)
470 return ret;
471
472 ret = cz_process_firmware_header(adev);
473 if (ret)
474 return ret;
475
476 pi->dpm_enabled = true;
477
478 return 0;
479}
480
481static void cz_dpm_fini(struct amdgpu_device *adev)
482{
483 int i;
484
485 for (i = 0; i < adev->pm.dpm.num_ps; i++)
486 kfree(adev->pm.dpm.ps[i].ps_priv);
487
488 kfree(adev->pm.dpm.ps);
489 kfree(adev->pm.dpm.priv);
490 amdgpu_free_extended_power_table(adev);
491}
492
493static void
494cz_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev,
495 struct seq_file *m)
496{
497 struct amdgpu_clock_voltage_dependency_table *table =
498 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
499 u32 current_index =
500 (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) &
501 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK) >>
502 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT;
503 u32 sclk, tmp;
504 u16 vddc;
505
506 if (current_index >= NUM_SCLK_LEVELS) {
507 seq_printf(m, "invalid dpm profile %d\n", current_index);
508 } else {
509 sclk = table->entries[current_index].clk;
510 tmp = (RREG32_SMC(ixSMU_VOLTAGE_STATUS) &
511 SMU_VOLTAGE_STATUS__SMU_VOLTAGE_CURRENT_LEVEL_MASK) >>
512 SMU_VOLTAGE_STATUS__SMU_VOLTAGE_CURRENT_LEVEL__SHIFT;
513 vddc = cz_convert_8bit_index_to_voltage(adev, (u16)tmp);
514 seq_printf(m, "power level %d sclk: %u vddc: %u\n",
515 current_index, sclk, vddc);
516 }
517}
518
519static void cz_dpm_print_power_state(struct amdgpu_device *adev,
520 struct amdgpu_ps *rps)
521{
522 int i;
523 struct cz_ps *ps = cz_get_ps(rps);
524
525 amdgpu_dpm_print_class_info(rps->class, rps->class2);
526 amdgpu_dpm_print_cap_info(rps->caps);
527
528 DRM_INFO("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
529 for (i = 0; i < ps->num_levels; i++) {
530 struct cz_pl *pl = &ps->levels[i];
531
532 DRM_INFO("\t\tpower level %d sclk: %u vddc: %u\n",
533 i, pl->sclk,
534 cz_convert_8bit_index_to_voltage(adev, pl->vddc_index));
535 }
536
537 amdgpu_dpm_print_ps_status(adev, rps);
538}
539
540static void cz_dpm_set_funcs(struct amdgpu_device *adev);
541
542static int cz_dpm_early_init(struct amdgpu_device *adev)
543{
544 cz_dpm_set_funcs(adev);
545
546 return 0;
547}
548
549static int cz_dpm_sw_init(struct amdgpu_device *adev)
550{
551 int ret = 0;
552 /* fix me to add thermal support TODO */
553
554 /* default to balanced state */
555 adev->pm.dpm.state = POWER_STATE_TYPE_BALANCED;
556 adev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED;
557 adev->pm.dpm.forced_level = AMDGPU_DPM_FORCED_LEVEL_AUTO;
558 adev->pm.default_sclk = adev->clock.default_sclk;
559 adev->pm.default_mclk = adev->clock.default_mclk;
560 adev->pm.current_sclk = adev->clock.default_sclk;
561 adev->pm.current_mclk = adev->clock.default_mclk;
562 adev->pm.int_thermal_type = THERMAL_TYPE_NONE;
563
564 if (amdgpu_dpm == 0)
565 return 0;
566
567 mutex_lock(&adev->pm.mutex);
568 ret = cz_dpm_init(adev);
569 if (ret)
570 goto dpm_init_failed;
571
572 adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
573 if (amdgpu_dpm == 1)
574 amdgpu_pm_print_power_states(adev);
575
576 ret = amdgpu_pm_sysfs_init(adev);
577 if (ret)
578 goto dpm_init_failed;
579
580 mutex_unlock(&adev->pm.mutex);
581 DRM_INFO("amdgpu: dpm initialized\n");
582
583 return 0;
584
585dpm_init_failed:
586 cz_dpm_fini(adev);
587 mutex_unlock(&adev->pm.mutex);
588 DRM_ERROR("amdgpu: dpm initialization failed\n");
589
590 return ret;
591}
592
593static int cz_dpm_sw_fini(struct amdgpu_device *adev)
594{
595 mutex_lock(&adev->pm.mutex);
596 amdgpu_pm_sysfs_fini(adev);
597 cz_dpm_fini(adev);
598 mutex_unlock(&adev->pm.mutex);
599
600 return 0;
601}
602
603static void cz_reset_ap_mask(struct amdgpu_device *adev)
604{
605 struct cz_power_info *pi = cz_get_pi(adev);
606
607 pi->active_process_mask = 0;
608
609}
610
611static int cz_dpm_download_pptable_from_smu(struct amdgpu_device *adev,
612 void **table)
613{
614 int ret = 0;
615
616 ret = cz_smu_download_pptable(adev, table);
617
618 return ret;
619}
620
621static int cz_dpm_upload_pptable_to_smu(struct amdgpu_device *adev)
622{
623 struct cz_power_info *pi = cz_get_pi(adev);
624 struct SMU8_Fusion_ClkTable *clock_table;
625 struct atom_clock_dividers dividers;
626 void *table = NULL;
627 uint8_t i = 0;
628 int ret = 0;
629
630 struct amdgpu_clock_voltage_dependency_table *vddc_table =
631 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
632 struct amdgpu_clock_voltage_dependency_table *vddgfx_table =
633 &adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk;
634 struct amdgpu_uvd_clock_voltage_dependency_table *uvd_table =
635 &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table;
636 struct amdgpu_vce_clock_voltage_dependency_table *vce_table =
637 &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
638 struct amdgpu_clock_voltage_dependency_table *acp_table =
639 &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table;
640
641 if (!pi->need_pptable_upload)
642 return 0;
643
644 ret = cz_dpm_download_pptable_from_smu(adev, &table);
645 if (ret) {
646 DRM_ERROR("amdgpu: Failed to get power play table from SMU!\n");
647 return -EINVAL;
648 }
649
650 clock_table = (struct SMU8_Fusion_ClkTable *)table;
651 /* patch clock table */
652 if (vddc_table->count > CZ_MAX_HARDWARE_POWERLEVELS ||
653 vddgfx_table->count > CZ_MAX_HARDWARE_POWERLEVELS ||
654 uvd_table->count > CZ_MAX_HARDWARE_POWERLEVELS ||
655 vce_table->count > CZ_MAX_HARDWARE_POWERLEVELS ||
656 acp_table->count > CZ_MAX_HARDWARE_POWERLEVELS) {
657 DRM_ERROR("amdgpu: Invalid Clock Voltage Dependency Table!\n");
658 return -EINVAL;
659 }
660
661 for (i = 0; i < CZ_MAX_HARDWARE_POWERLEVELS; i++) {
662
663 /* vddc sclk */
664 clock_table->SclkBreakdownTable.ClkLevel[i].GnbVid =
665 (i < vddc_table->count) ? (uint8_t)vddc_table->entries[i].v : 0;
666 clock_table->SclkBreakdownTable.ClkLevel[i].Frequency =
667 (i < vddc_table->count) ? vddc_table->entries[i].clk : 0;
668 ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
669 clock_table->SclkBreakdownTable.ClkLevel[i].Frequency,
670 false, &dividers);
671 if (ret)
672 return ret;
673 clock_table->SclkBreakdownTable.ClkLevel[i].DfsDid =
674 (uint8_t)dividers.post_divider;
675
676 /* vddgfx sclk */
677 clock_table->SclkBreakdownTable.ClkLevel[i].GfxVid =
678 (i < vddgfx_table->count) ? (uint8_t)vddgfx_table->entries[i].v : 0;
679
680 /* acp breakdown */
681 clock_table->AclkBreakdownTable.ClkLevel[i].GfxVid =
682 (i < acp_table->count) ? (uint8_t)acp_table->entries[i].v : 0;
683 clock_table->AclkBreakdownTable.ClkLevel[i].Frequency =
684 (i < acp_table->count) ? acp_table->entries[i].clk : 0;
685 ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
686 clock_table->SclkBreakdownTable.ClkLevel[i].Frequency,
687 false, &dividers);
688 if (ret)
689 return ret;
690 clock_table->AclkBreakdownTable.ClkLevel[i].DfsDid =
691 (uint8_t)dividers.post_divider;
692
693 /* uvd breakdown */
694 clock_table->VclkBreakdownTable.ClkLevel[i].GfxVid =
695 (i < uvd_table->count) ? (uint8_t)uvd_table->entries[i].v : 0;
696 clock_table->VclkBreakdownTable.ClkLevel[i].Frequency =
697 (i < uvd_table->count) ? uvd_table->entries[i].vclk : 0;
698 ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
699 clock_table->VclkBreakdownTable.ClkLevel[i].Frequency,
700 false, &dividers);
701 if (ret)
702 return ret;
703 clock_table->VclkBreakdownTable.ClkLevel[i].DfsDid =
704 (uint8_t)dividers.post_divider;
705
706 clock_table->DclkBreakdownTable.ClkLevel[i].GfxVid =
707 (i < uvd_table->count) ? (uint8_t)uvd_table->entries[i].v : 0;
708 clock_table->DclkBreakdownTable.ClkLevel[i].Frequency =
709 (i < uvd_table->count) ? uvd_table->entries[i].dclk : 0;
710 ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
711 clock_table->DclkBreakdownTable.ClkLevel[i].Frequency,
712 false, &dividers);
713 if (ret)
714 return ret;
715 clock_table->DclkBreakdownTable.ClkLevel[i].DfsDid =
716 (uint8_t)dividers.post_divider;
717
718 /* vce breakdown */
719 clock_table->EclkBreakdownTable.ClkLevel[i].GfxVid =
720 (i < vce_table->count) ? (uint8_t)vce_table->entries[i].v : 0;
721 clock_table->EclkBreakdownTable.ClkLevel[i].Frequency =
722 (i < vce_table->count) ? vce_table->entries[i].ecclk : 0;
723 ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
724 clock_table->EclkBreakdownTable.ClkLevel[i].Frequency,
725 false, &dividers);
726 if (ret)
727 return ret;
728 clock_table->EclkBreakdownTable.ClkLevel[i].DfsDid =
729 (uint8_t)dividers.post_divider;
730 }
731
732 /* its time to upload to SMU */
733 ret = cz_smu_upload_pptable(adev);
734 if (ret) {
735 DRM_ERROR("amdgpu: Failed to put power play table to SMU!\n");
736 return ret;
737 }
738
739 return 0;
740}
741
742static void cz_init_sclk_limit(struct amdgpu_device *adev)
743{
744 struct cz_power_info *pi = cz_get_pi(adev);
745 struct amdgpu_clock_voltage_dependency_table *table =
746 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
747 uint32_t clock = 0, level;
748
749 if (!table || !table->count) {
750 DRM_ERROR("Invalid Voltage Dependency table.\n");
751 return;
752 }
753
754 pi->sclk_dpm.soft_min_clk = 0;
755 pi->sclk_dpm.hard_min_clk = 0;
756 cz_send_msg_to_smc(adev, PPSMC_MSG_GetMaxSclkLevel);
757 level = cz_get_argument(adev);
758 if (level < table->count)
759 clock = table->entries[level].clk;
760 else {
761 DRM_ERROR("Invalid SLCK Voltage Dependency table entry.\n");
762 clock = table->entries[table->count - 1].clk;
763 }
764
765 pi->sclk_dpm.soft_max_clk = clock;
766 pi->sclk_dpm.hard_max_clk = clock;
767
768}
769
770static void cz_init_uvd_limit(struct amdgpu_device *adev)
771{
772 struct cz_power_info *pi = cz_get_pi(adev);
773 struct amdgpu_uvd_clock_voltage_dependency_table *table =
774 &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table;
775 uint32_t clock = 0, level;
776
777 if (!table || !table->count) {
778 DRM_ERROR("Invalid Voltage Dependency table.\n");
779 return;
780 }
781
782 pi->uvd_dpm.soft_min_clk = 0;
783 pi->uvd_dpm.hard_min_clk = 0;
784 cz_send_msg_to_smc(adev, PPSMC_MSG_GetMaxUvdLevel);
785 level = cz_get_argument(adev);
786 if (level < table->count)
787 clock = table->entries[level].vclk;
788 else {
789 DRM_ERROR("Invalid UVD Voltage Dependency table entry.\n");
790 clock = table->entries[table->count - 1].vclk;
791 }
792
793 pi->uvd_dpm.soft_max_clk = clock;
794 pi->uvd_dpm.hard_max_clk = clock;
795
796}
797
798static void cz_init_vce_limit(struct amdgpu_device *adev)
799{
800 struct cz_power_info *pi = cz_get_pi(adev);
801 struct amdgpu_vce_clock_voltage_dependency_table *table =
802 &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
803 uint32_t clock = 0, level;
804
805 if (!table || !table->count) {
806 DRM_ERROR("Invalid Voltage Dependency table.\n");
807 return;
808 }
809
810 pi->vce_dpm.soft_min_clk = 0;
811 pi->vce_dpm.hard_min_clk = 0;
812 cz_send_msg_to_smc(adev, PPSMC_MSG_GetMaxEclkLevel);
813 level = cz_get_argument(adev);
814 if (level < table->count)
815 clock = table->entries[level].evclk;
816 else {
817 /* future BIOS would fix this error */
818 DRM_ERROR("Invalid VCE Voltage Dependency table entry.\n");
819 clock = table->entries[table->count - 1].evclk;
820 }
821
822 pi->vce_dpm.soft_max_clk = clock;
823 pi->vce_dpm.hard_max_clk = clock;
824
825}
826
827static void cz_init_acp_limit(struct amdgpu_device *adev)
828{
829 struct cz_power_info *pi = cz_get_pi(adev);
830 struct amdgpu_clock_voltage_dependency_table *table =
831 &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table;
832 uint32_t clock = 0, level;
833
834 if (!table || !table->count) {
835 DRM_ERROR("Invalid Voltage Dependency table.\n");
836 return;
837 }
838
839 pi->acp_dpm.soft_min_clk = 0;
840 pi->acp_dpm.hard_min_clk = 0;
841 cz_send_msg_to_smc(adev, PPSMC_MSG_GetMaxAclkLevel);
842 level = cz_get_argument(adev);
843 if (level < table->count)
844 clock = table->entries[level].clk;
845 else {
846 DRM_ERROR("Invalid ACP Voltage Dependency table entry.\n");
847 clock = table->entries[table->count - 1].clk;
848 }
849
850 pi->acp_dpm.soft_max_clk = clock;
851 pi->acp_dpm.hard_max_clk = clock;
852
853}
854
855static void cz_init_pg_state(struct amdgpu_device *adev)
856{
857 struct cz_power_info *pi = cz_get_pi(adev);
858
859 pi->uvd_power_gated = false;
860 pi->vce_power_gated = false;
861 pi->acp_power_gated = false;
862
863}
864
865static void cz_init_sclk_threshold(struct amdgpu_device *adev)
866{
867 struct cz_power_info *pi = cz_get_pi(adev);
868
869 pi->low_sclk_interrupt_threshold = 0;
870
871}
872
873static void cz_dpm_setup_asic(struct amdgpu_device *adev)
874{
875 cz_reset_ap_mask(adev);
876 cz_dpm_upload_pptable_to_smu(adev);
877 cz_init_sclk_limit(adev);
878 cz_init_uvd_limit(adev);
879 cz_init_vce_limit(adev);
880 cz_init_acp_limit(adev);
881 cz_init_pg_state(adev);
882 cz_init_sclk_threshold(adev);
883
884}
885
886static bool cz_check_smu_feature(struct amdgpu_device *adev,
887 uint32_t feature)
888{
889 uint32_t smu_feature = 0;
890 int ret;
891
892 ret = cz_send_msg_to_smc_with_parameter(adev,
893 PPSMC_MSG_GetFeatureStatus, 0);
894 if (ret) {
895 DRM_ERROR("Failed to get SMU features from SMC.\n");
896 return false;
897 } else {
898 smu_feature = cz_get_argument(adev);
899 if (feature & smu_feature)
900 return true;
901 }
902
903 return false;
904}
905
906static bool cz_check_for_dpm_enabled(struct amdgpu_device *adev)
907{
908 if (cz_check_smu_feature(adev,
909 SMU_EnabledFeatureScoreboard_SclkDpmOn))
910 return true;
911
912 return false;
913}
914
915static void cz_program_voting_clients(struct amdgpu_device *adev)
916{
917 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0, PPCZ_VOTINGRIGHTSCLIENTS_DFLT0);
918}
919
920static void cz_clear_voting_clients(struct amdgpu_device *adev)
921{
922 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0, 0);
923}
924
925static int cz_start_dpm(struct amdgpu_device *adev)
926{
927 int ret = 0;
928
929 if (amdgpu_dpm) {
930 ret = cz_send_msg_to_smc_with_parameter(adev,
931 PPSMC_MSG_EnableAllSmuFeatures, SCLK_DPM_MASK);
932 if (ret) {
933 DRM_ERROR("SMU feature: SCLK_DPM enable failed\n");
934 return -EINVAL;
935 }
936 }
937
938 return 0;
939}
940
941static int cz_stop_dpm(struct amdgpu_device *adev)
942{
943 int ret = 0;
944
945 if (amdgpu_dpm && adev->pm.dpm_enabled) {
946 ret = cz_send_msg_to_smc_with_parameter(adev,
947 PPSMC_MSG_DisableAllSmuFeatures, SCLK_DPM_MASK);
948 if (ret) {
949 DRM_ERROR("SMU feature: SCLK_DPM disable failed\n");
950 return -EINVAL;
951 }
952 }
953
954 return 0;
955}
956
957static uint32_t cz_get_sclk_level(struct amdgpu_device *adev,
958 uint32_t clock, uint16_t msg)
959{
960 int i = 0;
961 struct amdgpu_clock_voltage_dependency_table *table =
962 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
963
964 switch (msg) {
965 case PPSMC_MSG_SetSclkSoftMin:
966 case PPSMC_MSG_SetSclkHardMin:
967 for (i = 0; i < table->count; i++)
968 if (clock <= table->entries[i].clk)
969 break;
970 if (i == table->count)
971 i = table->count - 1;
972 break;
973 case PPSMC_MSG_SetSclkSoftMax:
974 case PPSMC_MSG_SetSclkHardMax:
975 for (i = table->count - 1; i >= 0; i--)
976 if (clock >= table->entries[i].clk)
977 break;
978 if (i < 0)
979 i = 0;
980 break;
981 default:
982 break;
983 }
984
985 return i;
986}
987
988static int cz_program_bootup_state(struct amdgpu_device *adev)
989{
990 struct cz_power_info *pi = cz_get_pi(adev);
991 uint32_t soft_min_clk = 0;
992 uint32_t soft_max_clk = 0;
993 int ret = 0;
994
995 pi->sclk_dpm.soft_min_clk = pi->sys_info.bootup_sclk;
996 pi->sclk_dpm.soft_max_clk = pi->sys_info.bootup_sclk;
997
998 soft_min_clk = cz_get_sclk_level(adev,
999 pi->sclk_dpm.soft_min_clk,
1000 PPSMC_MSG_SetSclkSoftMin);
1001 soft_max_clk = cz_get_sclk_level(adev,
1002 pi->sclk_dpm.soft_max_clk,
1003 PPSMC_MSG_SetSclkSoftMax);
1004
1005 ret = cz_send_msg_to_smc_with_parameter(adev,
1006 PPSMC_MSG_SetSclkSoftMin, soft_min_clk);
1007 if (ret)
1008 return -EINVAL;
1009
1010 ret = cz_send_msg_to_smc_with_parameter(adev,
1011 PPSMC_MSG_SetSclkSoftMax, soft_max_clk);
1012 if (ret)
1013 return -EINVAL;
1014
1015 return 0;
1016}
1017
1018/* TODO */
1019static int cz_disable_cgpg(struct amdgpu_device *adev)
1020{
1021 return 0;
1022}
1023
1024/* TODO */
1025static int cz_enable_cgpg(struct amdgpu_device *adev)
1026{
1027 return 0;
1028}
1029
1030/* TODO */
1031static int cz_program_pt_config_registers(struct amdgpu_device *adev)
1032{
1033 return 0;
1034}
1035
1036static void cz_do_enable_didt(struct amdgpu_device *adev, bool enable)
1037{
1038 struct cz_power_info *pi = cz_get_pi(adev);
1039 uint32_t reg = 0;
1040
1041 if (pi->caps_sq_ramping) {
1042 reg = RREG32_DIDT(ixDIDT_SQ_CTRL0);
1043 if (enable)
1044 reg = REG_SET_FIELD(reg, DIDT_SQ_CTRL0, DIDT_CTRL_EN, 1);
1045 else
1046 reg = REG_SET_FIELD(reg, DIDT_SQ_CTRL0, DIDT_CTRL_EN, 0);
1047 WREG32_DIDT(ixDIDT_SQ_CTRL0, reg);
1048 }
1049 if (pi->caps_db_ramping) {
1050 reg = RREG32_DIDT(ixDIDT_DB_CTRL0);
1051 if (enable)
1052 reg = REG_SET_FIELD(reg, DIDT_DB_CTRL0, DIDT_CTRL_EN, 1);
1053 else
1054 reg = REG_SET_FIELD(reg, DIDT_DB_CTRL0, DIDT_CTRL_EN, 0);
1055 WREG32_DIDT(ixDIDT_DB_CTRL0, reg);
1056 }
1057 if (pi->caps_td_ramping) {
1058 reg = RREG32_DIDT(ixDIDT_TD_CTRL0);
1059 if (enable)
1060 reg = REG_SET_FIELD(reg, DIDT_TD_CTRL0, DIDT_CTRL_EN, 1);
1061 else
1062 reg = REG_SET_FIELD(reg, DIDT_TD_CTRL0, DIDT_CTRL_EN, 0);
1063 WREG32_DIDT(ixDIDT_TD_CTRL0, reg);
1064 }
1065 if (pi->caps_tcp_ramping) {
1066 reg = RREG32_DIDT(ixDIDT_TCP_CTRL0);
1067 if (enable)
1068 reg = REG_SET_FIELD(reg, DIDT_SQ_CTRL0, DIDT_CTRL_EN, 1);
1069 else
1070 reg = REG_SET_FIELD(reg, DIDT_SQ_CTRL0, DIDT_CTRL_EN, 0);
1071 WREG32_DIDT(ixDIDT_TCP_CTRL0, reg);
1072 }
1073
1074}
1075
1076static int cz_enable_didt(struct amdgpu_device *adev, bool enable)
1077{
1078 struct cz_power_info *pi = cz_get_pi(adev);
1079 int ret;
1080
1081 if (pi->caps_sq_ramping || pi->caps_db_ramping ||
1082 pi->caps_td_ramping || pi->caps_tcp_ramping) {
1083 if (adev->gfx.gfx_current_status != AMDGPU_GFX_SAFE_MODE) {
1084 ret = cz_disable_cgpg(adev);
1085 if (ret) {
1086 DRM_ERROR("Pre Di/Dt disable cg/pg failed\n");
1087 return -EINVAL;
1088 }
1089 adev->gfx.gfx_current_status = AMDGPU_GFX_SAFE_MODE;
1090 }
1091
1092 ret = cz_program_pt_config_registers(adev);
1093 if (ret) {
1094 DRM_ERROR("Di/Dt config failed\n");
1095 return -EINVAL;
1096 }
1097 cz_do_enable_didt(adev, enable);
1098
1099 if (adev->gfx.gfx_current_status == AMDGPU_GFX_SAFE_MODE) {
1100 ret = cz_enable_cgpg(adev);
1101 if (ret) {
1102 DRM_ERROR("Post Di/Dt enable cg/pg failed\n");
1103 return -EINVAL;
1104 }
1105 adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE;
1106 }
1107 }
1108
1109 return 0;
1110}
1111
1112/* TODO */
1113static void cz_reset_acp_boot_level(struct amdgpu_device *adev)
1114{
1115}
1116
1117static void cz_update_current_ps(struct amdgpu_device *adev,
1118 struct amdgpu_ps *rps)
1119{
1120 struct cz_power_info *pi = cz_get_pi(adev);
1121 struct cz_ps *ps = cz_get_ps(rps);
1122
1123 pi->current_ps = *ps;
1124 pi->current_rps = *rps;
1125 pi->current_rps.ps_priv = ps;
1126
1127}
1128
1129static void cz_update_requested_ps(struct amdgpu_device *adev,
1130 struct amdgpu_ps *rps)
1131{
1132 struct cz_power_info *pi = cz_get_pi(adev);
1133 struct cz_ps *ps = cz_get_ps(rps);
1134
1135 pi->requested_ps = *ps;
1136 pi->requested_rps = *rps;
1137 pi->requested_rps.ps_priv = ps;
1138
1139}
1140
1141/* PP arbiter support needed TODO */
1142static void cz_apply_state_adjust_rules(struct amdgpu_device *adev,
1143 struct amdgpu_ps *new_rps,
1144 struct amdgpu_ps *old_rps)
1145{
1146 struct cz_ps *ps = cz_get_ps(new_rps);
1147 struct cz_power_info *pi = cz_get_pi(adev);
1148 struct amdgpu_clock_and_voltage_limits *limits =
1149 &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
1150 /* 10kHz memory clock */
1151 uint32_t mclk = 0;
1152
1153 ps->force_high = false;
1154 ps->need_dfs_bypass = true;
1155 pi->video_start = new_rps->dclk || new_rps->vclk ||
1156 new_rps->evclk || new_rps->ecclk;
1157
1158 if ((new_rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) ==
1159 ATOM_PPLIB_CLASSIFICATION_UI_BATTERY)
1160 pi->battery_state = true;
1161 else
1162 pi->battery_state = false;
1163
1164 if (pi->caps_stable_power_state)
1165 mclk = limits->mclk;
1166
1167 if (mclk > pi->sys_info.nbp_memory_clock[CZ_NUM_NBPMEMORY_CLOCK - 1])
1168 ps->force_high = true;
1169
1170}
1171
1172static int cz_dpm_enable(struct amdgpu_device *adev)
1173{
1174 int ret = 0;
1175
1176 /* renable will hang up SMU, so check first */
1177 if (cz_check_for_dpm_enabled(adev))
1178 return -EINVAL;
1179
1180 cz_program_voting_clients(adev);
1181
1182 ret = cz_start_dpm(adev);
1183 if (ret) {
1184 DRM_ERROR("Carrizo DPM enable failed\n");
1185 return -EINVAL;
1186 }
1187
1188 ret = cz_program_bootup_state(adev);
1189 if (ret) {
1190 DRM_ERROR("Carrizo bootup state program failed\n");
1191 return -EINVAL;
1192 }
1193
1194 ret = cz_enable_didt(adev, true);
1195 if (ret) {
1196 DRM_ERROR("Carrizo enable di/dt failed\n");
1197 return -EINVAL;
1198 }
1199
1200 cz_reset_acp_boot_level(adev);
1201
1202 cz_update_current_ps(adev, adev->pm.dpm.boot_ps);
1203
1204 return 0;
1205}
1206
1207static int cz_dpm_hw_init(struct amdgpu_device *adev)
1208{
1209 int ret;
1210
1211 if (!amdgpu_dpm)
1212 return 0;
1213
1214 mutex_lock(&adev->pm.mutex);
1215
1216 /* init smc in dpm hw init */
1217 ret = cz_smu_init(adev);
1218 if (ret) {
1219 DRM_ERROR("amdgpu: smc initialization failed\n");
1220 mutex_unlock(&adev->pm.mutex);
1221 return ret;
1222 }
1223
1224 /* do the actual fw loading */
1225 ret = cz_smu_start(adev);
1226 if (ret) {
1227 DRM_ERROR("amdgpu: smc start failed\n");
1228 mutex_unlock(&adev->pm.mutex);
1229 return ret;
1230 }
1231
1232 /* cz dpm setup asic */
1233 cz_dpm_setup_asic(adev);
1234
1235 /* cz dpm enable */
1236 ret = cz_dpm_enable(adev);
1237 if (ret)
1238 adev->pm.dpm_enabled = false;
1239 else
1240 adev->pm.dpm_enabled = true;
1241
1242 mutex_unlock(&adev->pm.mutex);
1243
1244 return 0;
1245}
1246
1247static int cz_dpm_disable(struct amdgpu_device *adev)
1248{
1249 int ret = 0;
1250
1251 if (!cz_check_for_dpm_enabled(adev))
1252 return -EINVAL;
1253
1254 ret = cz_enable_didt(adev, false);
1255 if (ret) {
1256 DRM_ERROR("Carrizo disable di/dt failed\n");
1257 return -EINVAL;
1258 }
1259
1260 cz_clear_voting_clients(adev);
1261 cz_stop_dpm(adev);
1262 cz_update_current_ps(adev, adev->pm.dpm.boot_ps);
1263
1264 return 0;
1265}
1266
1267static int cz_dpm_hw_fini(struct amdgpu_device *adev)
1268{
1269 int ret = 0;
1270
1271 mutex_lock(&adev->pm.mutex);
1272
1273 cz_smu_fini(adev);
1274
1275 if (adev->pm.dpm_enabled) {
1276 ret = cz_dpm_disable(adev);
1277 if (ret)
1278 return -EINVAL;
1279
1280 adev->pm.dpm.current_ps =
1281 adev->pm.dpm.requested_ps =
1282 adev->pm.dpm.boot_ps;
1283 }
1284
1285 adev->pm.dpm_enabled = false;
1286
1287 mutex_unlock(&adev->pm.mutex);
1288
1289 return 0;
1290}
1291
1292static int cz_dpm_suspend(struct amdgpu_device *adev)
1293{
1294 int ret = 0;
1295
1296 if (adev->pm.dpm_enabled) {
1297 mutex_lock(&adev->pm.mutex);
1298
1299 ret = cz_dpm_disable(adev);
1300 if (ret)
1301 return -EINVAL;
1302
1303 adev->pm.dpm.current_ps =
1304 adev->pm.dpm.requested_ps =
1305 adev->pm.dpm.boot_ps;
1306
1307 mutex_unlock(&adev->pm.mutex);
1308 }
1309
1310 return 0;
1311}
1312
1313static int cz_dpm_resume(struct amdgpu_device *adev)
1314{
1315 int ret = 0;
1316
1317 mutex_lock(&adev->pm.mutex);
1318 ret = cz_smu_init(adev);
1319 if (ret) {
1320 DRM_ERROR("amdgpu: smc resume failed\n");
1321 mutex_unlock(&adev->pm.mutex);
1322 return ret;
1323 }
1324
1325 /* do the actual fw loading */
1326 ret = cz_smu_start(adev);
1327 if (ret) {
1328 DRM_ERROR("amdgpu: smc start failed\n");
1329 mutex_unlock(&adev->pm.mutex);
1330 return ret;
1331 }
1332
1333 /* cz dpm setup asic */
1334 cz_dpm_setup_asic(adev);
1335
1336 /* cz dpm enable */
1337 ret = cz_dpm_enable(adev);
1338 if (ret)
1339 adev->pm.dpm_enabled = false;
1340 else
1341 adev->pm.dpm_enabled = true;
1342
1343 mutex_unlock(&adev->pm.mutex);
1344 /* upon resume, re-compute the clocks */
1345 if (adev->pm.dpm_enabled)
1346 amdgpu_pm_compute_clocks(adev);
1347
1348 return 0;
1349}
1350
1351static int cz_dpm_set_clockgating_state(struct amdgpu_device *adev,
1352 enum amdgpu_clockgating_state state)
1353{
1354 return 0;
1355}
1356
1357static int cz_dpm_set_powergating_state(struct amdgpu_device *adev,
1358 enum amdgpu_powergating_state state)
1359{
1360 return 0;
1361}
1362
1363/* borrowed from KV, need future unify */
1364static int cz_dpm_get_temperature(struct amdgpu_device *adev)
1365{
1366 int actual_temp = 0;
1367 uint32_t temp = RREG32_SMC(0xC0300E0C);
1368
1369 if (temp)
1370 actual_temp = 1000 * ((temp / 8) - 49);
1371
1372 return actual_temp;
1373}
1374
1375static int cz_dpm_pre_set_power_state(struct amdgpu_device *adev)
1376{
1377 struct cz_power_info *pi = cz_get_pi(adev);
1378 struct amdgpu_ps requested_ps = *adev->pm.dpm.requested_ps;
1379 struct amdgpu_ps *new_ps = &requested_ps;
1380
1381 cz_update_requested_ps(adev, new_ps);
1382 cz_apply_state_adjust_rules(adev, &pi->requested_rps,
1383 &pi->current_rps);
1384
1385 return 0;
1386}
1387
1388static int cz_dpm_update_sclk_limit(struct amdgpu_device *adev)
1389{
1390 struct cz_power_info *pi = cz_get_pi(adev);
1391 struct amdgpu_clock_and_voltage_limits *limits =
1392 &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
1393 uint32_t clock, stable_ps_clock = 0;
1394
1395 clock = pi->sclk_dpm.soft_min_clk;
1396
1397 if (pi->caps_stable_power_state) {
1398 stable_ps_clock = limits->sclk * 75 / 100;
1399 if (clock < stable_ps_clock)
1400 clock = stable_ps_clock;
1401 }
1402
1403 if (clock != pi->sclk_dpm.soft_min_clk) {
1404 pi->sclk_dpm.soft_min_clk = clock;
1405 cz_send_msg_to_smc_with_parameter(adev,
1406 PPSMC_MSG_SetSclkSoftMin,
1407 cz_get_sclk_level(adev, clock,
1408 PPSMC_MSG_SetSclkSoftMin));
1409 }
1410
1411 if (pi->caps_stable_power_state &&
1412 pi->sclk_dpm.soft_max_clk != clock) {
1413 pi->sclk_dpm.soft_max_clk = clock;
1414 cz_send_msg_to_smc_with_parameter(adev,
1415 PPSMC_MSG_SetSclkSoftMax,
1416 cz_get_sclk_level(adev, clock,
1417 PPSMC_MSG_SetSclkSoftMax));
1418 } else {
1419 cz_send_msg_to_smc_with_parameter(adev,
1420 PPSMC_MSG_SetSclkSoftMax,
1421 cz_get_sclk_level(adev,
1422 pi->sclk_dpm.soft_max_clk,
1423 PPSMC_MSG_SetSclkSoftMax));
1424 }
1425
1426 return 0;
1427}
1428
1429static int cz_dpm_set_deep_sleep_sclk_threshold(struct amdgpu_device *adev)
1430{
1431 int ret = 0;
1432 struct cz_power_info *pi = cz_get_pi(adev);
1433
1434 if (pi->caps_sclk_ds) {
1435 cz_send_msg_to_smc_with_parameter(adev,
1436 PPSMC_MSG_SetMinDeepSleepSclk,
1437 CZ_MIN_DEEP_SLEEP_SCLK);
1438 }
1439
1440 return ret;
1441}
1442
1443/* ?? without dal support, is this still needed in setpowerstate list*/
1444static int cz_dpm_set_watermark_threshold(struct amdgpu_device *adev)
1445{
1446 int ret = 0;
1447 struct cz_power_info *pi = cz_get_pi(adev);
1448
1449 cz_send_msg_to_smc_with_parameter(adev,
1450 PPSMC_MSG_SetWatermarkFrequency,
1451 pi->sclk_dpm.soft_max_clk);
1452
1453 return ret;
1454}
1455
1456static int cz_dpm_enable_nbdpm(struct amdgpu_device *adev)
1457{
1458 int ret = 0;
1459 struct cz_power_info *pi = cz_get_pi(adev);
1460
1461 /* also depend on dal NBPStateDisableRequired */
1462 if (pi->nb_dpm_enabled_by_driver && !pi->nb_dpm_enabled) {
1463 ret = cz_send_msg_to_smc_with_parameter(adev,
1464 PPSMC_MSG_EnableAllSmuFeatures,
1465 NB_DPM_MASK);
1466 if (ret) {
1467 DRM_ERROR("amdgpu: nb dpm enable failed\n");
1468 return ret;
1469 }
1470 pi->nb_dpm_enabled = true;
1471 }
1472
1473 return ret;
1474}
1475
1476static void cz_dpm_nbdpm_lm_pstate_enable(struct amdgpu_device *adev,
1477 bool enable)
1478{
1479 if (enable)
1480 cz_send_msg_to_smc(adev, PPSMC_MSG_EnableLowMemoryPstate);
1481 else
1482 cz_send_msg_to_smc(adev, PPSMC_MSG_DisableLowMemoryPstate);
1483
1484}
1485
1486static int cz_dpm_update_low_memory_pstate(struct amdgpu_device *adev)
1487{
1488 int ret = 0;
1489 struct cz_power_info *pi = cz_get_pi(adev);
1490 struct cz_ps *ps = &pi->requested_ps;
1491
1492 if (pi->sys_info.nb_dpm_enable) {
1493 if (ps->force_high)
1494 cz_dpm_nbdpm_lm_pstate_enable(adev, true);
1495 else
1496 cz_dpm_nbdpm_lm_pstate_enable(adev, false);
1497 }
1498
1499 return ret;
1500}
1501
1502/* with dpm enabled */
1503static int cz_dpm_set_power_state(struct amdgpu_device *adev)
1504{
1505 int ret = 0;
1506
1507 cz_dpm_update_sclk_limit(adev);
1508 cz_dpm_set_deep_sleep_sclk_threshold(adev);
1509 cz_dpm_set_watermark_threshold(adev);
1510 cz_dpm_enable_nbdpm(adev);
1511 cz_dpm_update_low_memory_pstate(adev);
1512
1513 return ret;
1514}
1515
1516static void cz_dpm_post_set_power_state(struct amdgpu_device *adev)
1517{
1518 struct cz_power_info *pi = cz_get_pi(adev);
1519 struct amdgpu_ps *ps = &pi->requested_rps;
1520
1521 cz_update_current_ps(adev, ps);
1522
1523}
1524
1525static int cz_dpm_force_highest(struct amdgpu_device *adev)
1526{
1527 struct cz_power_info *pi = cz_get_pi(adev);
1528 int ret = 0;
1529
1530 if (pi->sclk_dpm.soft_min_clk != pi->sclk_dpm.soft_max_clk) {
1531 pi->sclk_dpm.soft_min_clk =
1532 pi->sclk_dpm.soft_max_clk;
1533 ret = cz_send_msg_to_smc_with_parameter(adev,
1534 PPSMC_MSG_SetSclkSoftMin,
1535 cz_get_sclk_level(adev,
1536 pi->sclk_dpm.soft_min_clk,
1537 PPSMC_MSG_SetSclkSoftMin));
1538 if (ret)
1539 return ret;
1540 }
1541
1542 return ret;
1543}
1544
1545static int cz_dpm_force_lowest(struct amdgpu_device *adev)
1546{
1547 struct cz_power_info *pi = cz_get_pi(adev);
1548 int ret = 0;
1549
1550 if (pi->sclk_dpm.soft_max_clk != pi->sclk_dpm.soft_min_clk) {
1551 pi->sclk_dpm.soft_max_clk = pi->sclk_dpm.soft_min_clk;
1552 ret = cz_send_msg_to_smc_with_parameter(adev,
1553 PPSMC_MSG_SetSclkSoftMax,
1554 cz_get_sclk_level(adev,
1555 pi->sclk_dpm.soft_max_clk,
1556 PPSMC_MSG_SetSclkSoftMax));
1557 if (ret)
1558 return ret;
1559 }
1560
1561 return ret;
1562}
1563
1564static uint32_t cz_dpm_get_max_sclk_level(struct amdgpu_device *adev)
1565{
1566 struct cz_power_info *pi = cz_get_pi(adev);
1567
1568 if (!pi->max_sclk_level) {
1569 cz_send_msg_to_smc(adev, PPSMC_MSG_GetMaxSclkLevel);
1570 pi->max_sclk_level = cz_get_argument(adev) + 1;
1571 }
1572
1573 if (pi->max_sclk_level > CZ_MAX_HARDWARE_POWERLEVELS) {
1574 DRM_ERROR("Invalid max sclk level!\n");
1575 return -EINVAL;
1576 }
1577
1578 return pi->max_sclk_level;
1579}
1580
1581static int cz_dpm_unforce_dpm_levels(struct amdgpu_device *adev)
1582{
1583 struct cz_power_info *pi = cz_get_pi(adev);
1584 struct amdgpu_clock_voltage_dependency_table *dep_table =
1585 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
1586 uint32_t level = 0;
1587 int ret = 0;
1588
1589 pi->sclk_dpm.soft_min_clk = dep_table->entries[0].clk;
1590 level = cz_dpm_get_max_sclk_level(adev) - 1;
1591 if (level < dep_table->count)
1592 pi->sclk_dpm.soft_max_clk = dep_table->entries[level].clk;
1593 else
1594 pi->sclk_dpm.soft_max_clk =
1595 dep_table->entries[dep_table->count - 1].clk;
1596
1597 /* get min/max sclk soft value
1598 * notify SMU to execute */
1599 ret = cz_send_msg_to_smc_with_parameter(adev,
1600 PPSMC_MSG_SetSclkSoftMin,
1601 cz_get_sclk_level(adev,
1602 pi->sclk_dpm.soft_min_clk,
1603 PPSMC_MSG_SetSclkSoftMin));
1604 if (ret)
1605 return ret;
1606
1607 ret = cz_send_msg_to_smc_with_parameter(adev,
1608 PPSMC_MSG_SetSclkSoftMax,
1609 cz_get_sclk_level(adev,
1610 pi->sclk_dpm.soft_max_clk,
1611 PPSMC_MSG_SetSclkSoftMax));
1612 if (ret)
1613 return ret;
1614
1615 DRM_INFO("DPM unforce state min=%d, max=%d.\n",
1616 pi->sclk_dpm.soft_min_clk,
1617 pi->sclk_dpm.soft_max_clk);
1618
1619 return 0;
1620}
1621
1622static int cz_dpm_force_dpm_level(struct amdgpu_device *adev,
1623 enum amdgpu_dpm_forced_level level)
1624{
1625 int ret = 0;
1626
1627 switch (level) {
1628 case AMDGPU_DPM_FORCED_LEVEL_HIGH:
1629 ret = cz_dpm_force_highest(adev);
1630 if (ret)
1631 return ret;
1632 break;
1633 case AMDGPU_DPM_FORCED_LEVEL_LOW:
1634 ret = cz_dpm_force_lowest(adev);
1635 if (ret)
1636 return ret;
1637 break;
1638 case AMDGPU_DPM_FORCED_LEVEL_AUTO:
1639 ret = cz_dpm_unforce_dpm_levels(adev);
1640 if (ret)
1641 return ret;
1642 break;
1643 default:
1644 break;
1645 }
1646
1647 return ret;
1648}
1649
1650/* fix me, display configuration change lists here
1651 * mostly dal related*/
1652static void cz_dpm_display_configuration_changed(struct amdgpu_device *adev)
1653{
1654}
1655
1656static uint32_t cz_dpm_get_sclk(struct amdgpu_device *adev, bool low)
1657{
1658 struct cz_power_info *pi = cz_get_pi(adev);
1659 struct cz_ps *requested_state = cz_get_ps(&pi->requested_rps);
1660
1661 if (low)
1662 return requested_state->levels[0].sclk;
1663 else
1664 return requested_state->levels[requested_state->num_levels - 1].sclk;
1665
1666}
1667
1668static uint32_t cz_dpm_get_mclk(struct amdgpu_device *adev, bool low)
1669{
1670 struct cz_power_info *pi = cz_get_pi(adev);
1671
1672 return pi->sys_info.bootup_uma_clk;
1673}
1674
1675const struct amdgpu_ip_funcs cz_dpm_ip_funcs = {
1676 .early_init = cz_dpm_early_init,
1677 .late_init = NULL,
1678 .sw_init = cz_dpm_sw_init,
1679 .sw_fini = cz_dpm_sw_fini,
1680 .hw_init = cz_dpm_hw_init,
1681 .hw_fini = cz_dpm_hw_fini,
1682 .suspend = cz_dpm_suspend,
1683 .resume = cz_dpm_resume,
1684 .is_idle = NULL,
1685 .wait_for_idle = NULL,
1686 .soft_reset = NULL,
1687 .print_status = NULL,
1688 .set_clockgating_state = cz_dpm_set_clockgating_state,
1689 .set_powergating_state = cz_dpm_set_powergating_state,
1690};
1691
1692static const struct amdgpu_dpm_funcs cz_dpm_funcs = {
1693 .get_temperature = cz_dpm_get_temperature,
1694 .pre_set_power_state = cz_dpm_pre_set_power_state,
1695 .set_power_state = cz_dpm_set_power_state,
1696 .post_set_power_state = cz_dpm_post_set_power_state,
1697 .display_configuration_changed = cz_dpm_display_configuration_changed,
1698 .get_sclk = cz_dpm_get_sclk,
1699 .get_mclk = cz_dpm_get_mclk,
1700 .print_power_state = cz_dpm_print_power_state,
1701 .debugfs_print_current_performance_level =
1702 cz_dpm_debugfs_print_current_performance_level,
1703 .force_performance_level = cz_dpm_force_dpm_level,
1704 .vblank_too_short = NULL,
1705 .powergate_uvd = NULL,
1706};
1707
1708static void cz_dpm_set_funcs(struct amdgpu_device *adev)
1709{
1710 if (NULL == adev->pm.funcs)
1711 adev->pm.funcs = &cz_dpm_funcs;
1712}