]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - drivers/gpu/drm/amd/amdgpu/cz_dpm.c
drm/amdgpu: drop allocation flag masks
[mirror_ubuntu-zesty-kernel.git] / drivers / gpu / drm / amd / amdgpu / cz_dpm.c
CommitLineData
aaa36a97
AD
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include <linux/firmware.h>
25#include <linux/seq_file.h>
26#include "drmP.h"
27#include "amdgpu.h"
28#include "amdgpu_pm.h"
29#include "amdgpu_atombios.h"
30#include "vid.h"
31#include "vi_dpm.h"
32#include "amdgpu_dpm.h"
33#include "cz_dpm.h"
34#include "cz_ppsmc.h"
35#include "atom.h"
36
37#include "smu/smu_8_0_d.h"
38#include "smu/smu_8_0_sh_mask.h"
39#include "gca/gfx_8_0_d.h"
40#include "gca/gfx_8_0_sh_mask.h"
41#include "gmc/gmc_8_1_d.h"
42#include "bif/bif_5_1_d.h"
43#include "gfx_v8_0.h"
44
564ea790
SJ
45static void cz_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate);
46
aaa36a97
AD
47static struct cz_ps *cz_get_ps(struct amdgpu_ps *rps)
48{
49 struct cz_ps *ps = rps->ps_priv;
50
51 return ps;
52}
53
54static struct cz_power_info *cz_get_pi(struct amdgpu_device *adev)
55{
56 struct cz_power_info *pi = adev->pm.dpm.priv;
57
58 return pi;
59}
60
61static uint16_t cz_convert_8bit_index_to_voltage(struct amdgpu_device *adev,
62 uint16_t voltage)
63{
64 uint16_t tmp = 6200 - voltage * 25;
65
66 return tmp;
67}
68
69static void cz_construct_max_power_limits_table(struct amdgpu_device *adev,
70 struct amdgpu_clock_and_voltage_limits *table)
71{
72 struct cz_power_info *pi = cz_get_pi(adev);
73 struct amdgpu_clock_voltage_dependency_table *dep_table =
74 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
75
76 if (dep_table->count > 0) {
77 table->sclk = dep_table->entries[dep_table->count - 1].clk;
78 table->vddc = cz_convert_8bit_index_to_voltage(adev,
79 dep_table->entries[dep_table->count - 1].v);
80 }
81
82 table->mclk = pi->sys_info.nbp_memory_clock[0];
83
84}
85
86union igp_info {
87 struct _ATOM_INTEGRATED_SYSTEM_INFO info;
88 struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7;
89 struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_8 info_8;
90 struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_9 info_9;
91};
92
93static int cz_parse_sys_info_table(struct amdgpu_device *adev)
94{
95 struct cz_power_info *pi = cz_get_pi(adev);
96 struct amdgpu_mode_info *mode_info = &adev->mode_info;
97 int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo);
98 union igp_info *igp_info;
99 u8 frev, crev;
100 u16 data_offset;
101 int i = 0;
102
103 if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
104 &frev, &crev, &data_offset)) {
105 igp_info = (union igp_info *)(mode_info->atom_context->bios +
106 data_offset);
107
108 if (crev != 9) {
109 DRM_ERROR("Unsupported IGP table: %d %d\n", frev, crev);
110 return -EINVAL;
111 }
112 pi->sys_info.bootup_sclk =
113 le32_to_cpu(igp_info->info_9.ulBootUpEngineClock);
114 pi->sys_info.bootup_uma_clk =
115 le32_to_cpu(igp_info->info_9.ulBootUpUMAClock);
116 pi->sys_info.dentist_vco_freq =
117 le32_to_cpu(igp_info->info_9.ulDentistVCOFreq);
118 pi->sys_info.bootup_nb_voltage_index =
119 le16_to_cpu(igp_info->info_9.usBootUpNBVoltage);
120
121 if (igp_info->info_9.ucHtcTmpLmt == 0)
122 pi->sys_info.htc_tmp_lmt = 203;
123 else
124 pi->sys_info.htc_tmp_lmt = igp_info->info_9.ucHtcTmpLmt;
125
126 if (igp_info->info_9.ucHtcHystLmt == 0)
127 pi->sys_info.htc_hyst_lmt = 5;
128 else
129 pi->sys_info.htc_hyst_lmt = igp_info->info_9.ucHtcHystLmt;
130
131 if (pi->sys_info.htc_tmp_lmt <= pi->sys_info.htc_hyst_lmt) {
132 DRM_ERROR("The htcTmpLmt should be larger than htcHystLmt.\n");
133 return -EINVAL;
134 }
135
136 if (le32_to_cpu(igp_info->info_9.ulSystemConfig) & (1 << 3) &&
137 pi->enable_nb_ps_policy)
138 pi->sys_info.nb_dpm_enable = true;
139 else
140 pi->sys_info.nb_dpm_enable = false;
141
142 for (i = 0; i < CZ_NUM_NBPSTATES; i++) {
143 if (i < CZ_NUM_NBPMEMORY_CLOCK)
144 pi->sys_info.nbp_memory_clock[i] =
145 le32_to_cpu(igp_info->info_9.ulNbpStateMemclkFreq[i]);
146 pi->sys_info.nbp_n_clock[i] =
147 le32_to_cpu(igp_info->info_9.ulNbpStateNClkFreq[i]);
148 }
149
150 for (i = 0; i < CZ_MAX_DISPLAY_CLOCK_LEVEL; i++)
151 pi->sys_info.display_clock[i] =
152 le32_to_cpu(igp_info->info_9.sDispClkVoltageMapping[i].ulMaximumSupportedCLK);
153
154 for (i = 0; i < CZ_NUM_NBPSTATES; i++)
155 pi->sys_info.nbp_voltage_index[i] =
156 le32_to_cpu(igp_info->info_9.usNBPStateVoltage[i]);
157
158 if (le32_to_cpu(igp_info->info_9.ulGPUCapInfo) &
159 SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS)
160 pi->caps_enable_dfs_bypass = true;
161
162 pi->sys_info.uma_channel_number =
163 igp_info->info_9.ucUMAChannelNumber;
164
165 cz_construct_max_power_limits_table(adev,
166 &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac);
167 }
168
169 return 0;
170}
171
172static void cz_patch_voltage_values(struct amdgpu_device *adev)
173{
174 int i;
175 struct amdgpu_uvd_clock_voltage_dependency_table *uvd_table =
176 &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table;
177 struct amdgpu_vce_clock_voltage_dependency_table *vce_table =
178 &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
179 struct amdgpu_clock_voltage_dependency_table *acp_table =
180 &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table;
181
182 if (uvd_table->count) {
183 for (i = 0; i < uvd_table->count; i++)
184 uvd_table->entries[i].v =
185 cz_convert_8bit_index_to_voltage(adev,
186 uvd_table->entries[i].v);
187 }
188
189 if (vce_table->count) {
190 for (i = 0; i < vce_table->count; i++)
191 vce_table->entries[i].v =
192 cz_convert_8bit_index_to_voltage(adev,
193 vce_table->entries[i].v);
194 }
195
196 if (acp_table->count) {
197 for (i = 0; i < acp_table->count; i++)
198 acp_table->entries[i].v =
199 cz_convert_8bit_index_to_voltage(adev,
200 acp_table->entries[i].v);
201 }
202
203}
204
205static void cz_construct_boot_state(struct amdgpu_device *adev)
206{
207 struct cz_power_info *pi = cz_get_pi(adev);
208
209 pi->boot_pl.sclk = pi->sys_info.bootup_sclk;
210 pi->boot_pl.vddc_index = pi->sys_info.bootup_nb_voltage_index;
211 pi->boot_pl.ds_divider_index = 0;
212 pi->boot_pl.ss_divider_index = 0;
213 pi->boot_pl.allow_gnb_slow = 1;
214 pi->boot_pl.force_nbp_state = 0;
215 pi->boot_pl.display_wm = 0;
216 pi->boot_pl.vce_wm = 0;
217
218}
219
220static void cz_patch_boot_state(struct amdgpu_device *adev,
221 struct cz_ps *ps)
222{
223 struct cz_power_info *pi = cz_get_pi(adev);
224
225 ps->num_levels = 1;
226 ps->levels[0] = pi->boot_pl;
227}
228
229union pplib_clock_info {
230 struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
231 struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
232 struct _ATOM_PPLIB_CZ_CLOCK_INFO carrizo;
233};
234
235static void cz_parse_pplib_clock_info(struct amdgpu_device *adev,
236 struct amdgpu_ps *rps, int index,
237 union pplib_clock_info *clock_info)
238{
239 struct cz_power_info *pi = cz_get_pi(adev);
240 struct cz_ps *ps = cz_get_ps(rps);
241 struct cz_pl *pl = &ps->levels[index];
242 struct amdgpu_clock_voltage_dependency_table *table =
243 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
244
245 pl->sclk = table->entries[clock_info->carrizo.index].clk;
246 pl->vddc_index = table->entries[clock_info->carrizo.index].v;
247
248 ps->num_levels = index + 1;
249
250 if (pi->caps_sclk_ds) {
251 pl->ds_divider_index = 5;
252 pl->ss_divider_index = 5;
253 }
254
255}
256
257static void cz_parse_pplib_non_clock_info(struct amdgpu_device *adev,
258 struct amdgpu_ps *rps,
259 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info,
260 u8 table_rev)
261{
262 struct cz_ps *ps = cz_get_ps(rps);
263
264 rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings);
265 rps->class = le16_to_cpu(non_clock_info->usClassification);
266 rps->class2 = le16_to_cpu(non_clock_info->usClassification2);
267
268 if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
269 rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
270 rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
271 } else {
272 rps->vclk = 0;
273 rps->dclk = 0;
274 }
275
276 if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) {
277 adev->pm.dpm.boot_ps = rps;
278 cz_patch_boot_state(adev, ps);
279 }
280 if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
281 adev->pm.dpm.uvd_ps = rps;
282
283}
284
285union power_info {
286 struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
287 struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
288 struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
289 struct _ATOM_PPLIB_POWERPLAYTABLE4 pplib4;
290 struct _ATOM_PPLIB_POWERPLAYTABLE5 pplib5;
291};
292
293union pplib_power_state {
294 struct _ATOM_PPLIB_STATE v1;
295 struct _ATOM_PPLIB_STATE_V2 v2;
296};
297
298static int cz_parse_power_table(struct amdgpu_device *adev)
299{
300 struct amdgpu_mode_info *mode_info = &adev->mode_info;
301 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
302 union pplib_power_state *power_state;
303 int i, j, k, non_clock_array_index, clock_array_index;
304 union pplib_clock_info *clock_info;
305 struct _StateArray *state_array;
306 struct _ClockInfoArray *clock_info_array;
307 struct _NonClockInfoArray *non_clock_info_array;
308 union power_info *power_info;
309 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
310 u16 data_offset;
311 u8 frev, crev;
312 u8 *power_state_offset;
313 struct cz_ps *ps;
314
315 if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
316 &frev, &crev, &data_offset))
317 return -EINVAL;
318 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
319
320 state_array = (struct _StateArray *)
321 (mode_info->atom_context->bios + data_offset +
322 le16_to_cpu(power_info->pplib.usStateArrayOffset));
323 clock_info_array = (struct _ClockInfoArray *)
324 (mode_info->atom_context->bios + data_offset +
325 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset));
326 non_clock_info_array = (struct _NonClockInfoArray *)
327 (mode_info->atom_context->bios + data_offset +
328 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
329
330 adev->pm.dpm.ps = kzalloc(sizeof(struct amdgpu_ps) *
331 state_array->ucNumEntries, GFP_KERNEL);
332
333 if (!adev->pm.dpm.ps)
334 return -ENOMEM;
335
336 power_state_offset = (u8 *)state_array->states;
337 adev->pm.dpm.platform_caps =
338 le32_to_cpu(power_info->pplib.ulPlatformCaps);
339 adev->pm.dpm.backbias_response_time =
340 le16_to_cpu(power_info->pplib.usBackbiasTime);
341 adev->pm.dpm.voltage_response_time =
342 le16_to_cpu(power_info->pplib.usVoltageTime);
343
344 for (i = 0; i < state_array->ucNumEntries; i++) {
345 power_state = (union pplib_power_state *)power_state_offset;
346 non_clock_array_index = power_state->v2.nonClockInfoIndex;
347 non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
348 &non_clock_info_array->nonClockInfo[non_clock_array_index];
349
350 ps = kzalloc(sizeof(struct cz_ps), GFP_KERNEL);
351 if (ps == NULL) {
352 kfree(adev->pm.dpm.ps);
353 return -ENOMEM;
354 }
355
356 adev->pm.dpm.ps[i].ps_priv = ps;
357 k = 0;
358 for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
359 clock_array_index = power_state->v2.clockInfoIndex[j];
360 if (clock_array_index >= clock_info_array->ucNumEntries)
361 continue;
362 if (k >= CZ_MAX_HARDWARE_POWERLEVELS)
363 break;
364 clock_info = (union pplib_clock_info *)
365 &clock_info_array->clockInfo[clock_array_index *
366 clock_info_array->ucEntrySize];
367 cz_parse_pplib_clock_info(adev, &adev->pm.dpm.ps[i],
368 k, clock_info);
369 k++;
370 }
371 cz_parse_pplib_non_clock_info(adev, &adev->pm.dpm.ps[i],
372 non_clock_info,
373 non_clock_info_array->ucEntrySize);
374 power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
375 }
376 adev->pm.dpm.num_ps = state_array->ucNumEntries;
377
378 return 0;
379}
380
381static int cz_process_firmware_header(struct amdgpu_device *adev)
382{
383 struct cz_power_info *pi = cz_get_pi(adev);
384 u32 tmp;
385 int ret;
386
387 ret = cz_read_smc_sram_dword(adev, SMU8_FIRMWARE_HEADER_LOCATION +
388 offsetof(struct SMU8_Firmware_Header,
389 DpmTable),
390 &tmp, pi->sram_end);
391
392 if (ret == 0)
393 pi->dpm_table_start = tmp;
394
395 return ret;
396}
397
398static int cz_dpm_init(struct amdgpu_device *adev)
399{
400 struct cz_power_info *pi;
401 int ret, i;
402
403 pi = kzalloc(sizeof(struct cz_power_info), GFP_KERNEL);
404 if (NULL == pi)
405 return -ENOMEM;
406
407 adev->pm.dpm.priv = pi;
408
409 ret = amdgpu_get_platform_caps(adev);
410 if (ret)
411 return ret;
412
413 ret = amdgpu_parse_extended_power_table(adev);
414 if (ret)
415 return ret;
416
417 pi->sram_end = SMC_RAM_END;
418
419 /* set up DPM defaults */
420 for (i = 0; i < CZ_MAX_HARDWARE_POWERLEVELS; i++)
421 pi->active_target[i] = CZ_AT_DFLT;
422
423 pi->mgcg_cgtt_local0 = 0x0;
424 pi->mgcg_cgtt_local1 = 0x0;
425 pi->clock_slow_down_step = 25000;
426 pi->skip_clock_slow_down = 1;
427 pi->enable_nb_ps_policy = 1;
428 pi->caps_power_containment = true;
429 pi->caps_cac = true;
430 pi->didt_enabled = false;
431 if (pi->didt_enabled) {
432 pi->caps_sq_ramping = true;
433 pi->caps_db_ramping = true;
434 pi->caps_td_ramping = true;
435 pi->caps_tcp_ramping = true;
436 }
437 pi->caps_sclk_ds = true;
438 pi->voting_clients = 0x00c00033;
439 pi->auto_thermal_throttling_enabled = true;
440 pi->bapm_enabled = false;
441 pi->disable_nb_ps3_in_battery = false;
442 pi->voltage_drop_threshold = 0;
443 pi->caps_sclk_throttle_low_notification = false;
444 pi->gfx_pg_threshold = 500;
445 pi->caps_fps = true;
446 /* uvd */
447 pi->caps_uvd_pg = (adev->pg_flags & AMDGPU_PG_SUPPORT_UVD) ? true : false;
448 pi->caps_uvd_dpm = true;
449 /* vce */
450 pi->caps_vce_pg = (adev->pg_flags & AMDGPU_PG_SUPPORT_VCE) ? true : false;
451 pi->caps_vce_dpm = true;
452 /* acp */
453 pi->caps_acp_pg = (adev->pg_flags & AMDGPU_PG_SUPPORT_ACP) ? true : false;
454 pi->caps_acp_dpm = true;
455
456 pi->caps_stable_power_state = false;
457 pi->nb_dpm_enabled_by_driver = true;
458 pi->nb_dpm_enabled = false;
459 pi->caps_voltage_island = false;
460 /* flags which indicate need to upload pptable */
461 pi->need_pptable_upload = true;
462
463 ret = cz_parse_sys_info_table(adev);
464 if (ret)
465 return ret;
466
467 cz_patch_voltage_values(adev);
468 cz_construct_boot_state(adev);
469
470 ret = cz_parse_power_table(adev);
471 if (ret)
472 return ret;
473
474 ret = cz_process_firmware_header(adev);
475 if (ret)
476 return ret;
477
478 pi->dpm_enabled = true;
564ea790 479 pi->uvd_dynamic_pg = false;
aaa36a97
AD
480
481 return 0;
482}
483
484static void cz_dpm_fini(struct amdgpu_device *adev)
485{
486 int i;
487
488 for (i = 0; i < adev->pm.dpm.num_ps; i++)
489 kfree(adev->pm.dpm.ps[i].ps_priv);
490
491 kfree(adev->pm.dpm.ps);
492 kfree(adev->pm.dpm.priv);
493 amdgpu_free_extended_power_table(adev);
494}
495
496static void
497cz_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev,
498 struct seq_file *m)
499{
500 struct amdgpu_clock_voltage_dependency_table *table =
501 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
502 u32 current_index =
503 (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) &
504 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK) >>
505 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT;
506 u32 sclk, tmp;
507 u16 vddc;
508
509 if (current_index >= NUM_SCLK_LEVELS) {
510 seq_printf(m, "invalid dpm profile %d\n", current_index);
511 } else {
512 sclk = table->entries[current_index].clk;
513 tmp = (RREG32_SMC(ixSMU_VOLTAGE_STATUS) &
514 SMU_VOLTAGE_STATUS__SMU_VOLTAGE_CURRENT_LEVEL_MASK) >>
515 SMU_VOLTAGE_STATUS__SMU_VOLTAGE_CURRENT_LEVEL__SHIFT;
516 vddc = cz_convert_8bit_index_to_voltage(adev, (u16)tmp);
517 seq_printf(m, "power level %d sclk: %u vddc: %u\n",
518 current_index, sclk, vddc);
519 }
520}
521
522static void cz_dpm_print_power_state(struct amdgpu_device *adev,
523 struct amdgpu_ps *rps)
524{
525 int i;
526 struct cz_ps *ps = cz_get_ps(rps);
527
528 amdgpu_dpm_print_class_info(rps->class, rps->class2);
529 amdgpu_dpm_print_cap_info(rps->caps);
530
531 DRM_INFO("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
532 for (i = 0; i < ps->num_levels; i++) {
533 struct cz_pl *pl = &ps->levels[i];
534
535 DRM_INFO("\t\tpower level %d sclk: %u vddc: %u\n",
536 i, pl->sclk,
537 cz_convert_8bit_index_to_voltage(adev, pl->vddc_index));
538 }
539
540 amdgpu_dpm_print_ps_status(adev, rps);
541}
542
543static void cz_dpm_set_funcs(struct amdgpu_device *adev);
544
545static int cz_dpm_early_init(struct amdgpu_device *adev)
546{
547 cz_dpm_set_funcs(adev);
548
549 return 0;
550}
551
564ea790
SJ
552
553static int cz_dpm_late_init(struct amdgpu_device *adev)
554{
555 /* powerdown unused blocks for now */
556 cz_dpm_powergate_uvd(adev, true);
557
558 return 0;
559}
560
aaa36a97
AD
561static int cz_dpm_sw_init(struct amdgpu_device *adev)
562{
563 int ret = 0;
564 /* fix me to add thermal support TODO */
565
566 /* default to balanced state */
567 adev->pm.dpm.state = POWER_STATE_TYPE_BALANCED;
568 adev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED;
569 adev->pm.dpm.forced_level = AMDGPU_DPM_FORCED_LEVEL_AUTO;
570 adev->pm.default_sclk = adev->clock.default_sclk;
571 adev->pm.default_mclk = adev->clock.default_mclk;
572 adev->pm.current_sclk = adev->clock.default_sclk;
573 adev->pm.current_mclk = adev->clock.default_mclk;
574 adev->pm.int_thermal_type = THERMAL_TYPE_NONE;
575
576 if (amdgpu_dpm == 0)
577 return 0;
578
579 mutex_lock(&adev->pm.mutex);
580 ret = cz_dpm_init(adev);
581 if (ret)
582 goto dpm_init_failed;
583
584 adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
585 if (amdgpu_dpm == 1)
586 amdgpu_pm_print_power_states(adev);
587
588 ret = amdgpu_pm_sysfs_init(adev);
589 if (ret)
590 goto dpm_init_failed;
591
592 mutex_unlock(&adev->pm.mutex);
593 DRM_INFO("amdgpu: dpm initialized\n");
594
595 return 0;
596
597dpm_init_failed:
598 cz_dpm_fini(adev);
599 mutex_unlock(&adev->pm.mutex);
600 DRM_ERROR("amdgpu: dpm initialization failed\n");
601
602 return ret;
603}
604
605static int cz_dpm_sw_fini(struct amdgpu_device *adev)
606{
607 mutex_lock(&adev->pm.mutex);
608 amdgpu_pm_sysfs_fini(adev);
609 cz_dpm_fini(adev);
610 mutex_unlock(&adev->pm.mutex);
611
612 return 0;
613}
614
615static void cz_reset_ap_mask(struct amdgpu_device *adev)
616{
617 struct cz_power_info *pi = cz_get_pi(adev);
618
619 pi->active_process_mask = 0;
620
621}
622
623static int cz_dpm_download_pptable_from_smu(struct amdgpu_device *adev,
624 void **table)
625{
626 int ret = 0;
627
628 ret = cz_smu_download_pptable(adev, table);
629
630 return ret;
631}
632
633static int cz_dpm_upload_pptable_to_smu(struct amdgpu_device *adev)
634{
635 struct cz_power_info *pi = cz_get_pi(adev);
636 struct SMU8_Fusion_ClkTable *clock_table;
637 struct atom_clock_dividers dividers;
638 void *table = NULL;
639 uint8_t i = 0;
640 int ret = 0;
641
642 struct amdgpu_clock_voltage_dependency_table *vddc_table =
643 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
644 struct amdgpu_clock_voltage_dependency_table *vddgfx_table =
645 &adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk;
646 struct amdgpu_uvd_clock_voltage_dependency_table *uvd_table =
647 &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table;
648 struct amdgpu_vce_clock_voltage_dependency_table *vce_table =
649 &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
650 struct amdgpu_clock_voltage_dependency_table *acp_table =
651 &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table;
652
653 if (!pi->need_pptable_upload)
654 return 0;
655
656 ret = cz_dpm_download_pptable_from_smu(adev, &table);
657 if (ret) {
658 DRM_ERROR("amdgpu: Failed to get power play table from SMU!\n");
659 return -EINVAL;
660 }
661
662 clock_table = (struct SMU8_Fusion_ClkTable *)table;
663 /* patch clock table */
664 if (vddc_table->count > CZ_MAX_HARDWARE_POWERLEVELS ||
665 vddgfx_table->count > CZ_MAX_HARDWARE_POWERLEVELS ||
666 uvd_table->count > CZ_MAX_HARDWARE_POWERLEVELS ||
667 vce_table->count > CZ_MAX_HARDWARE_POWERLEVELS ||
668 acp_table->count > CZ_MAX_HARDWARE_POWERLEVELS) {
669 DRM_ERROR("amdgpu: Invalid Clock Voltage Dependency Table!\n");
670 return -EINVAL;
671 }
672
673 for (i = 0; i < CZ_MAX_HARDWARE_POWERLEVELS; i++) {
674
675 /* vddc sclk */
676 clock_table->SclkBreakdownTable.ClkLevel[i].GnbVid =
677 (i < vddc_table->count) ? (uint8_t)vddc_table->entries[i].v : 0;
678 clock_table->SclkBreakdownTable.ClkLevel[i].Frequency =
679 (i < vddc_table->count) ? vddc_table->entries[i].clk : 0;
680 ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
681 clock_table->SclkBreakdownTable.ClkLevel[i].Frequency,
682 false, &dividers);
683 if (ret)
684 return ret;
685 clock_table->SclkBreakdownTable.ClkLevel[i].DfsDid =
686 (uint8_t)dividers.post_divider;
687
688 /* vddgfx sclk */
689 clock_table->SclkBreakdownTable.ClkLevel[i].GfxVid =
690 (i < vddgfx_table->count) ? (uint8_t)vddgfx_table->entries[i].v : 0;
691
692 /* acp breakdown */
693 clock_table->AclkBreakdownTable.ClkLevel[i].GfxVid =
694 (i < acp_table->count) ? (uint8_t)acp_table->entries[i].v : 0;
695 clock_table->AclkBreakdownTable.ClkLevel[i].Frequency =
696 (i < acp_table->count) ? acp_table->entries[i].clk : 0;
697 ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
698 clock_table->SclkBreakdownTable.ClkLevel[i].Frequency,
699 false, &dividers);
700 if (ret)
701 return ret;
702 clock_table->AclkBreakdownTable.ClkLevel[i].DfsDid =
703 (uint8_t)dividers.post_divider;
704
705 /* uvd breakdown */
706 clock_table->VclkBreakdownTable.ClkLevel[i].GfxVid =
707 (i < uvd_table->count) ? (uint8_t)uvd_table->entries[i].v : 0;
708 clock_table->VclkBreakdownTable.ClkLevel[i].Frequency =
709 (i < uvd_table->count) ? uvd_table->entries[i].vclk : 0;
710 ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
711 clock_table->VclkBreakdownTable.ClkLevel[i].Frequency,
712 false, &dividers);
713 if (ret)
714 return ret;
715 clock_table->VclkBreakdownTable.ClkLevel[i].DfsDid =
716 (uint8_t)dividers.post_divider;
717
718 clock_table->DclkBreakdownTable.ClkLevel[i].GfxVid =
719 (i < uvd_table->count) ? (uint8_t)uvd_table->entries[i].v : 0;
720 clock_table->DclkBreakdownTable.ClkLevel[i].Frequency =
721 (i < uvd_table->count) ? uvd_table->entries[i].dclk : 0;
722 ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
723 clock_table->DclkBreakdownTable.ClkLevel[i].Frequency,
724 false, &dividers);
725 if (ret)
726 return ret;
727 clock_table->DclkBreakdownTable.ClkLevel[i].DfsDid =
728 (uint8_t)dividers.post_divider;
729
730 /* vce breakdown */
731 clock_table->EclkBreakdownTable.ClkLevel[i].GfxVid =
732 (i < vce_table->count) ? (uint8_t)vce_table->entries[i].v : 0;
733 clock_table->EclkBreakdownTable.ClkLevel[i].Frequency =
734 (i < vce_table->count) ? vce_table->entries[i].ecclk : 0;
735 ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
736 clock_table->EclkBreakdownTable.ClkLevel[i].Frequency,
737 false, &dividers);
738 if (ret)
739 return ret;
740 clock_table->EclkBreakdownTable.ClkLevel[i].DfsDid =
741 (uint8_t)dividers.post_divider;
742 }
743
744 /* its time to upload to SMU */
745 ret = cz_smu_upload_pptable(adev);
746 if (ret) {
747 DRM_ERROR("amdgpu: Failed to put power play table to SMU!\n");
748 return ret;
749 }
750
751 return 0;
752}
753
754static void cz_init_sclk_limit(struct amdgpu_device *adev)
755{
756 struct cz_power_info *pi = cz_get_pi(adev);
757 struct amdgpu_clock_voltage_dependency_table *table =
758 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
759 uint32_t clock = 0, level;
760
761 if (!table || !table->count) {
762 DRM_ERROR("Invalid Voltage Dependency table.\n");
763 return;
764 }
765
766 pi->sclk_dpm.soft_min_clk = 0;
767 pi->sclk_dpm.hard_min_clk = 0;
768 cz_send_msg_to_smc(adev, PPSMC_MSG_GetMaxSclkLevel);
769 level = cz_get_argument(adev);
770 if (level < table->count)
771 clock = table->entries[level].clk;
772 else {
773 DRM_ERROR("Invalid SLCK Voltage Dependency table entry.\n");
774 clock = table->entries[table->count - 1].clk;
775 }
776
777 pi->sclk_dpm.soft_max_clk = clock;
778 pi->sclk_dpm.hard_max_clk = clock;
779
780}
781
782static void cz_init_uvd_limit(struct amdgpu_device *adev)
783{
784 struct cz_power_info *pi = cz_get_pi(adev);
785 struct amdgpu_uvd_clock_voltage_dependency_table *table =
786 &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table;
787 uint32_t clock = 0, level;
788
789 if (!table || !table->count) {
790 DRM_ERROR("Invalid Voltage Dependency table.\n");
791 return;
792 }
793
794 pi->uvd_dpm.soft_min_clk = 0;
795 pi->uvd_dpm.hard_min_clk = 0;
796 cz_send_msg_to_smc(adev, PPSMC_MSG_GetMaxUvdLevel);
797 level = cz_get_argument(adev);
798 if (level < table->count)
799 clock = table->entries[level].vclk;
800 else {
801 DRM_ERROR("Invalid UVD Voltage Dependency table entry.\n");
802 clock = table->entries[table->count - 1].vclk;
803 }
804
805 pi->uvd_dpm.soft_max_clk = clock;
806 pi->uvd_dpm.hard_max_clk = clock;
807
808}
809
810static void cz_init_vce_limit(struct amdgpu_device *adev)
811{
812 struct cz_power_info *pi = cz_get_pi(adev);
813 struct amdgpu_vce_clock_voltage_dependency_table *table =
814 &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
815 uint32_t clock = 0, level;
816
817 if (!table || !table->count) {
818 DRM_ERROR("Invalid Voltage Dependency table.\n");
819 return;
820 }
821
822 pi->vce_dpm.soft_min_clk = 0;
823 pi->vce_dpm.hard_min_clk = 0;
824 cz_send_msg_to_smc(adev, PPSMC_MSG_GetMaxEclkLevel);
825 level = cz_get_argument(adev);
826 if (level < table->count)
827 clock = table->entries[level].evclk;
828 else {
829 /* future BIOS would fix this error */
830 DRM_ERROR("Invalid VCE Voltage Dependency table entry.\n");
831 clock = table->entries[table->count - 1].evclk;
832 }
833
834 pi->vce_dpm.soft_max_clk = clock;
835 pi->vce_dpm.hard_max_clk = clock;
836
837}
838
839static void cz_init_acp_limit(struct amdgpu_device *adev)
840{
841 struct cz_power_info *pi = cz_get_pi(adev);
842 struct amdgpu_clock_voltage_dependency_table *table =
843 &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table;
844 uint32_t clock = 0, level;
845
846 if (!table || !table->count) {
847 DRM_ERROR("Invalid Voltage Dependency table.\n");
848 return;
849 }
850
851 pi->acp_dpm.soft_min_clk = 0;
852 pi->acp_dpm.hard_min_clk = 0;
853 cz_send_msg_to_smc(adev, PPSMC_MSG_GetMaxAclkLevel);
854 level = cz_get_argument(adev);
855 if (level < table->count)
856 clock = table->entries[level].clk;
857 else {
858 DRM_ERROR("Invalid ACP Voltage Dependency table entry.\n");
859 clock = table->entries[table->count - 1].clk;
860 }
861
862 pi->acp_dpm.soft_max_clk = clock;
863 pi->acp_dpm.hard_max_clk = clock;
864
865}
866
867static void cz_init_pg_state(struct amdgpu_device *adev)
868{
869 struct cz_power_info *pi = cz_get_pi(adev);
870
871 pi->uvd_power_gated = false;
872 pi->vce_power_gated = false;
873 pi->acp_power_gated = false;
874
875}
876
877static void cz_init_sclk_threshold(struct amdgpu_device *adev)
878{
879 struct cz_power_info *pi = cz_get_pi(adev);
880
881 pi->low_sclk_interrupt_threshold = 0;
882
883}
884
885static void cz_dpm_setup_asic(struct amdgpu_device *adev)
886{
887 cz_reset_ap_mask(adev);
888 cz_dpm_upload_pptable_to_smu(adev);
889 cz_init_sclk_limit(adev);
890 cz_init_uvd_limit(adev);
891 cz_init_vce_limit(adev);
892 cz_init_acp_limit(adev);
893 cz_init_pg_state(adev);
894 cz_init_sclk_threshold(adev);
895
896}
897
898static bool cz_check_smu_feature(struct amdgpu_device *adev,
899 uint32_t feature)
900{
901 uint32_t smu_feature = 0;
902 int ret;
903
904 ret = cz_send_msg_to_smc_with_parameter(adev,
905 PPSMC_MSG_GetFeatureStatus, 0);
906 if (ret) {
907 DRM_ERROR("Failed to get SMU features from SMC.\n");
908 return false;
909 } else {
910 smu_feature = cz_get_argument(adev);
911 if (feature & smu_feature)
912 return true;
913 }
914
915 return false;
916}
917
918static bool cz_check_for_dpm_enabled(struct amdgpu_device *adev)
919{
920 if (cz_check_smu_feature(adev,
921 SMU_EnabledFeatureScoreboard_SclkDpmOn))
922 return true;
923
924 return false;
925}
926
927static void cz_program_voting_clients(struct amdgpu_device *adev)
928{
929 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0, PPCZ_VOTINGRIGHTSCLIENTS_DFLT0);
930}
931
932static void cz_clear_voting_clients(struct amdgpu_device *adev)
933{
934 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0, 0);
935}
936
937static int cz_start_dpm(struct amdgpu_device *adev)
938{
939 int ret = 0;
940
941 if (amdgpu_dpm) {
942 ret = cz_send_msg_to_smc_with_parameter(adev,
943 PPSMC_MSG_EnableAllSmuFeatures, SCLK_DPM_MASK);
944 if (ret) {
945 DRM_ERROR("SMU feature: SCLK_DPM enable failed\n");
946 return -EINVAL;
947 }
948 }
949
950 return 0;
951}
952
953static int cz_stop_dpm(struct amdgpu_device *adev)
954{
955 int ret = 0;
956
957 if (amdgpu_dpm && adev->pm.dpm_enabled) {
958 ret = cz_send_msg_to_smc_with_parameter(adev,
959 PPSMC_MSG_DisableAllSmuFeatures, SCLK_DPM_MASK);
960 if (ret) {
961 DRM_ERROR("SMU feature: SCLK_DPM disable failed\n");
962 return -EINVAL;
963 }
964 }
965
966 return 0;
967}
968
969static uint32_t cz_get_sclk_level(struct amdgpu_device *adev,
970 uint32_t clock, uint16_t msg)
971{
972 int i = 0;
973 struct amdgpu_clock_voltage_dependency_table *table =
974 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
975
976 switch (msg) {
977 case PPSMC_MSG_SetSclkSoftMin:
978 case PPSMC_MSG_SetSclkHardMin:
979 for (i = 0; i < table->count; i++)
980 if (clock <= table->entries[i].clk)
981 break;
982 if (i == table->count)
983 i = table->count - 1;
984 break;
985 case PPSMC_MSG_SetSclkSoftMax:
986 case PPSMC_MSG_SetSclkHardMax:
987 for (i = table->count - 1; i >= 0; i--)
988 if (clock >= table->entries[i].clk)
989 break;
990 if (i < 0)
991 i = 0;
992 break;
993 default:
994 break;
995 }
996
997 return i;
998}
999
1000static int cz_program_bootup_state(struct amdgpu_device *adev)
1001{
1002 struct cz_power_info *pi = cz_get_pi(adev);
1003 uint32_t soft_min_clk = 0;
1004 uint32_t soft_max_clk = 0;
1005 int ret = 0;
1006
1007 pi->sclk_dpm.soft_min_clk = pi->sys_info.bootup_sclk;
1008 pi->sclk_dpm.soft_max_clk = pi->sys_info.bootup_sclk;
1009
1010 soft_min_clk = cz_get_sclk_level(adev,
1011 pi->sclk_dpm.soft_min_clk,
1012 PPSMC_MSG_SetSclkSoftMin);
1013 soft_max_clk = cz_get_sclk_level(adev,
1014 pi->sclk_dpm.soft_max_clk,
1015 PPSMC_MSG_SetSclkSoftMax);
1016
1017 ret = cz_send_msg_to_smc_with_parameter(adev,
1018 PPSMC_MSG_SetSclkSoftMin, soft_min_clk);
1019 if (ret)
1020 return -EINVAL;
1021
1022 ret = cz_send_msg_to_smc_with_parameter(adev,
1023 PPSMC_MSG_SetSclkSoftMax, soft_max_clk);
1024 if (ret)
1025 return -EINVAL;
1026
1027 return 0;
1028}
1029
1030/* TODO */
1031static int cz_disable_cgpg(struct amdgpu_device *adev)
1032{
1033 return 0;
1034}
1035
1036/* TODO */
1037static int cz_enable_cgpg(struct amdgpu_device *adev)
1038{
1039 return 0;
1040}
1041
1042/* TODO */
1043static int cz_program_pt_config_registers(struct amdgpu_device *adev)
1044{
1045 return 0;
1046}
1047
1048static void cz_do_enable_didt(struct amdgpu_device *adev, bool enable)
1049{
1050 struct cz_power_info *pi = cz_get_pi(adev);
1051 uint32_t reg = 0;
1052
1053 if (pi->caps_sq_ramping) {
1054 reg = RREG32_DIDT(ixDIDT_SQ_CTRL0);
1055 if (enable)
1056 reg = REG_SET_FIELD(reg, DIDT_SQ_CTRL0, DIDT_CTRL_EN, 1);
1057 else
1058 reg = REG_SET_FIELD(reg, DIDT_SQ_CTRL0, DIDT_CTRL_EN, 0);
1059 WREG32_DIDT(ixDIDT_SQ_CTRL0, reg);
1060 }
1061 if (pi->caps_db_ramping) {
1062 reg = RREG32_DIDT(ixDIDT_DB_CTRL0);
1063 if (enable)
1064 reg = REG_SET_FIELD(reg, DIDT_DB_CTRL0, DIDT_CTRL_EN, 1);
1065 else
1066 reg = REG_SET_FIELD(reg, DIDT_DB_CTRL0, DIDT_CTRL_EN, 0);
1067 WREG32_DIDT(ixDIDT_DB_CTRL0, reg);
1068 }
1069 if (pi->caps_td_ramping) {
1070 reg = RREG32_DIDT(ixDIDT_TD_CTRL0);
1071 if (enable)
1072 reg = REG_SET_FIELD(reg, DIDT_TD_CTRL0, DIDT_CTRL_EN, 1);
1073 else
1074 reg = REG_SET_FIELD(reg, DIDT_TD_CTRL0, DIDT_CTRL_EN, 0);
1075 WREG32_DIDT(ixDIDT_TD_CTRL0, reg);
1076 }
1077 if (pi->caps_tcp_ramping) {
1078 reg = RREG32_DIDT(ixDIDT_TCP_CTRL0);
1079 if (enable)
1080 reg = REG_SET_FIELD(reg, DIDT_SQ_CTRL0, DIDT_CTRL_EN, 1);
1081 else
1082 reg = REG_SET_FIELD(reg, DIDT_SQ_CTRL0, DIDT_CTRL_EN, 0);
1083 WREG32_DIDT(ixDIDT_TCP_CTRL0, reg);
1084 }
1085
1086}
1087
1088static int cz_enable_didt(struct amdgpu_device *adev, bool enable)
1089{
1090 struct cz_power_info *pi = cz_get_pi(adev);
1091 int ret;
1092
1093 if (pi->caps_sq_ramping || pi->caps_db_ramping ||
1094 pi->caps_td_ramping || pi->caps_tcp_ramping) {
1095 if (adev->gfx.gfx_current_status != AMDGPU_GFX_SAFE_MODE) {
1096 ret = cz_disable_cgpg(adev);
1097 if (ret) {
1098 DRM_ERROR("Pre Di/Dt disable cg/pg failed\n");
1099 return -EINVAL;
1100 }
1101 adev->gfx.gfx_current_status = AMDGPU_GFX_SAFE_MODE;
1102 }
1103
1104 ret = cz_program_pt_config_registers(adev);
1105 if (ret) {
1106 DRM_ERROR("Di/Dt config failed\n");
1107 return -EINVAL;
1108 }
1109 cz_do_enable_didt(adev, enable);
1110
1111 if (adev->gfx.gfx_current_status == AMDGPU_GFX_SAFE_MODE) {
1112 ret = cz_enable_cgpg(adev);
1113 if (ret) {
1114 DRM_ERROR("Post Di/Dt enable cg/pg failed\n");
1115 return -EINVAL;
1116 }
1117 adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE;
1118 }
1119 }
1120
1121 return 0;
1122}
1123
1124/* TODO */
1125static void cz_reset_acp_boot_level(struct amdgpu_device *adev)
1126{
1127}
1128
1129static void cz_update_current_ps(struct amdgpu_device *adev,
1130 struct amdgpu_ps *rps)
1131{
1132 struct cz_power_info *pi = cz_get_pi(adev);
1133 struct cz_ps *ps = cz_get_ps(rps);
1134
1135 pi->current_ps = *ps;
1136 pi->current_rps = *rps;
1137 pi->current_rps.ps_priv = ps;
1138
1139}
1140
1141static void cz_update_requested_ps(struct amdgpu_device *adev,
1142 struct amdgpu_ps *rps)
1143{
1144 struct cz_power_info *pi = cz_get_pi(adev);
1145 struct cz_ps *ps = cz_get_ps(rps);
1146
1147 pi->requested_ps = *ps;
1148 pi->requested_rps = *rps;
1149 pi->requested_rps.ps_priv = ps;
1150
1151}
1152
1153/* PP arbiter support needed TODO */
1154static void cz_apply_state_adjust_rules(struct amdgpu_device *adev,
1155 struct amdgpu_ps *new_rps,
1156 struct amdgpu_ps *old_rps)
1157{
1158 struct cz_ps *ps = cz_get_ps(new_rps);
1159 struct cz_power_info *pi = cz_get_pi(adev);
1160 struct amdgpu_clock_and_voltage_limits *limits =
1161 &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
1162 /* 10kHz memory clock */
1163 uint32_t mclk = 0;
1164
1165 ps->force_high = false;
1166 ps->need_dfs_bypass = true;
1167 pi->video_start = new_rps->dclk || new_rps->vclk ||
1168 new_rps->evclk || new_rps->ecclk;
1169
1170 if ((new_rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) ==
1171 ATOM_PPLIB_CLASSIFICATION_UI_BATTERY)
1172 pi->battery_state = true;
1173 else
1174 pi->battery_state = false;
1175
1176 if (pi->caps_stable_power_state)
1177 mclk = limits->mclk;
1178
1179 if (mclk > pi->sys_info.nbp_memory_clock[CZ_NUM_NBPMEMORY_CLOCK - 1])
1180 ps->force_high = true;
1181
1182}
1183
1184static int cz_dpm_enable(struct amdgpu_device *adev)
1185{
1186 int ret = 0;
1187
1188 /* renable will hang up SMU, so check first */
1189 if (cz_check_for_dpm_enabled(adev))
1190 return -EINVAL;
1191
1192 cz_program_voting_clients(adev);
1193
1194 ret = cz_start_dpm(adev);
1195 if (ret) {
1196 DRM_ERROR("Carrizo DPM enable failed\n");
1197 return -EINVAL;
1198 }
1199
1200 ret = cz_program_bootup_state(adev);
1201 if (ret) {
1202 DRM_ERROR("Carrizo bootup state program failed\n");
1203 return -EINVAL;
1204 }
1205
1206 ret = cz_enable_didt(adev, true);
1207 if (ret) {
1208 DRM_ERROR("Carrizo enable di/dt failed\n");
1209 return -EINVAL;
1210 }
1211
1212 cz_reset_acp_boot_level(adev);
1213
1214 cz_update_current_ps(adev, adev->pm.dpm.boot_ps);
1215
1216 return 0;
1217}
1218
1219static int cz_dpm_hw_init(struct amdgpu_device *adev)
1220{
46651cc5 1221 int ret = 0;
aaa36a97
AD
1222
1223 mutex_lock(&adev->pm.mutex);
1224
1225 /* init smc in dpm hw init */
1226 ret = cz_smu_init(adev);
1227 if (ret) {
1228 DRM_ERROR("amdgpu: smc initialization failed\n");
1229 mutex_unlock(&adev->pm.mutex);
1230 return ret;
1231 }
1232
1233 /* do the actual fw loading */
1234 ret = cz_smu_start(adev);
1235 if (ret) {
1236 DRM_ERROR("amdgpu: smc start failed\n");
1237 mutex_unlock(&adev->pm.mutex);
1238 return ret;
1239 }
1240
46651cc5
SJ
1241 if (!amdgpu_dpm) {
1242 adev->pm.dpm_enabled = false;
1243 mutex_unlock(&adev->pm.mutex);
1244 return ret;
1245 }
1246
aaa36a97
AD
1247 /* cz dpm setup asic */
1248 cz_dpm_setup_asic(adev);
1249
1250 /* cz dpm enable */
1251 ret = cz_dpm_enable(adev);
1252 if (ret)
1253 adev->pm.dpm_enabled = false;
1254 else
1255 adev->pm.dpm_enabled = true;
1256
1257 mutex_unlock(&adev->pm.mutex);
1258
1259 return 0;
1260}
1261
1262static int cz_dpm_disable(struct amdgpu_device *adev)
1263{
1264 int ret = 0;
1265
1266 if (!cz_check_for_dpm_enabled(adev))
1267 return -EINVAL;
1268
1269 ret = cz_enable_didt(adev, false);
1270 if (ret) {
1271 DRM_ERROR("Carrizo disable di/dt failed\n");
1272 return -EINVAL;
1273 }
1274
564ea790
SJ
1275 /* powerup blocks */
1276 cz_dpm_powergate_uvd(adev, false);
1277
aaa36a97
AD
1278 cz_clear_voting_clients(adev);
1279 cz_stop_dpm(adev);
1280 cz_update_current_ps(adev, adev->pm.dpm.boot_ps);
1281
1282 return 0;
1283}
1284
1285static int cz_dpm_hw_fini(struct amdgpu_device *adev)
1286{
1287 int ret = 0;
1288
1289 mutex_lock(&adev->pm.mutex);
1290
1291 cz_smu_fini(adev);
1292
1293 if (adev->pm.dpm_enabled) {
1294 ret = cz_dpm_disable(adev);
aaa36a97
AD
1295
1296 adev->pm.dpm.current_ps =
1297 adev->pm.dpm.requested_ps =
1298 adev->pm.dpm.boot_ps;
1299 }
1300
1301 adev->pm.dpm_enabled = false;
1302
1303 mutex_unlock(&adev->pm.mutex);
1304
10457457 1305 return ret;
aaa36a97
AD
1306}
1307
1308static int cz_dpm_suspend(struct amdgpu_device *adev)
1309{
1310 int ret = 0;
1311
1312 if (adev->pm.dpm_enabled) {
1313 mutex_lock(&adev->pm.mutex);
1314
1315 ret = cz_dpm_disable(adev);
aaa36a97
AD
1316
1317 adev->pm.dpm.current_ps =
1318 adev->pm.dpm.requested_ps =
1319 adev->pm.dpm.boot_ps;
1320
1321 mutex_unlock(&adev->pm.mutex);
1322 }
1323
10457457 1324 return ret;
aaa36a97
AD
1325}
1326
1327static int cz_dpm_resume(struct amdgpu_device *adev)
1328{
1329 int ret = 0;
1330
1331 mutex_lock(&adev->pm.mutex);
1332 ret = cz_smu_init(adev);
1333 if (ret) {
1334 DRM_ERROR("amdgpu: smc resume failed\n");
1335 mutex_unlock(&adev->pm.mutex);
1336 return ret;
1337 }
1338
1339 /* do the actual fw loading */
1340 ret = cz_smu_start(adev);
1341 if (ret) {
1342 DRM_ERROR("amdgpu: smc start failed\n");
1343 mutex_unlock(&adev->pm.mutex);
1344 return ret;
1345 }
1346
46651cc5
SJ
1347 if (!amdgpu_dpm) {
1348 adev->pm.dpm_enabled = false;
1349 mutex_unlock(&adev->pm.mutex);
1350 return ret;
1351 }
1352
aaa36a97
AD
1353 /* cz dpm setup asic */
1354 cz_dpm_setup_asic(adev);
1355
1356 /* cz dpm enable */
1357 ret = cz_dpm_enable(adev);
1358 if (ret)
1359 adev->pm.dpm_enabled = false;
1360 else
1361 adev->pm.dpm_enabled = true;
1362
1363 mutex_unlock(&adev->pm.mutex);
1364 /* upon resume, re-compute the clocks */
1365 if (adev->pm.dpm_enabled)
1366 amdgpu_pm_compute_clocks(adev);
1367
1368 return 0;
1369}
1370
1371static int cz_dpm_set_clockgating_state(struct amdgpu_device *adev,
1372 enum amdgpu_clockgating_state state)
1373{
1374 return 0;
1375}
1376
1377static int cz_dpm_set_powergating_state(struct amdgpu_device *adev,
1378 enum amdgpu_powergating_state state)
1379{
1380 return 0;
1381}
1382
1383/* borrowed from KV, need future unify */
1384static int cz_dpm_get_temperature(struct amdgpu_device *adev)
1385{
1386 int actual_temp = 0;
1387 uint32_t temp = RREG32_SMC(0xC0300E0C);
1388
1389 if (temp)
1390 actual_temp = 1000 * ((temp / 8) - 49);
1391
1392 return actual_temp;
1393}
1394
1395static int cz_dpm_pre_set_power_state(struct amdgpu_device *adev)
1396{
1397 struct cz_power_info *pi = cz_get_pi(adev);
1398 struct amdgpu_ps requested_ps = *adev->pm.dpm.requested_ps;
1399 struct amdgpu_ps *new_ps = &requested_ps;
1400
1401 cz_update_requested_ps(adev, new_ps);
1402 cz_apply_state_adjust_rules(adev, &pi->requested_rps,
1403 &pi->current_rps);
1404
1405 return 0;
1406}
1407
1408static int cz_dpm_update_sclk_limit(struct amdgpu_device *adev)
1409{
1410 struct cz_power_info *pi = cz_get_pi(adev);
1411 struct amdgpu_clock_and_voltage_limits *limits =
1412 &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
1413 uint32_t clock, stable_ps_clock = 0;
1414
1415 clock = pi->sclk_dpm.soft_min_clk;
1416
1417 if (pi->caps_stable_power_state) {
1418 stable_ps_clock = limits->sclk * 75 / 100;
1419 if (clock < stable_ps_clock)
1420 clock = stable_ps_clock;
1421 }
1422
1423 if (clock != pi->sclk_dpm.soft_min_clk) {
1424 pi->sclk_dpm.soft_min_clk = clock;
1425 cz_send_msg_to_smc_with_parameter(adev,
1426 PPSMC_MSG_SetSclkSoftMin,
1427 cz_get_sclk_level(adev, clock,
1428 PPSMC_MSG_SetSclkSoftMin));
1429 }
1430
1431 if (pi->caps_stable_power_state &&
1432 pi->sclk_dpm.soft_max_clk != clock) {
1433 pi->sclk_dpm.soft_max_clk = clock;
1434 cz_send_msg_to_smc_with_parameter(adev,
1435 PPSMC_MSG_SetSclkSoftMax,
1436 cz_get_sclk_level(adev, clock,
1437 PPSMC_MSG_SetSclkSoftMax));
1438 } else {
1439 cz_send_msg_to_smc_with_parameter(adev,
1440 PPSMC_MSG_SetSclkSoftMax,
1441 cz_get_sclk_level(adev,
1442 pi->sclk_dpm.soft_max_clk,
1443 PPSMC_MSG_SetSclkSoftMax));
1444 }
1445
1446 return 0;
1447}
1448
1449static int cz_dpm_set_deep_sleep_sclk_threshold(struct amdgpu_device *adev)
1450{
1451 int ret = 0;
1452 struct cz_power_info *pi = cz_get_pi(adev);
1453
1454 if (pi->caps_sclk_ds) {
1455 cz_send_msg_to_smc_with_parameter(adev,
1456 PPSMC_MSG_SetMinDeepSleepSclk,
1457 CZ_MIN_DEEP_SLEEP_SCLK);
1458 }
1459
1460 return ret;
1461}
1462
1463/* ?? without dal support, is this still needed in setpowerstate list*/
1464static int cz_dpm_set_watermark_threshold(struct amdgpu_device *adev)
1465{
1466 int ret = 0;
1467 struct cz_power_info *pi = cz_get_pi(adev);
1468
1469 cz_send_msg_to_smc_with_parameter(adev,
1470 PPSMC_MSG_SetWatermarkFrequency,
1471 pi->sclk_dpm.soft_max_clk);
1472
1473 return ret;
1474}
1475
1476static int cz_dpm_enable_nbdpm(struct amdgpu_device *adev)
1477{
1478 int ret = 0;
1479 struct cz_power_info *pi = cz_get_pi(adev);
1480
1481 /* also depend on dal NBPStateDisableRequired */
1482 if (pi->nb_dpm_enabled_by_driver && !pi->nb_dpm_enabled) {
1483 ret = cz_send_msg_to_smc_with_parameter(adev,
1484 PPSMC_MSG_EnableAllSmuFeatures,
1485 NB_DPM_MASK);
1486 if (ret) {
1487 DRM_ERROR("amdgpu: nb dpm enable failed\n");
1488 return ret;
1489 }
1490 pi->nb_dpm_enabled = true;
1491 }
1492
1493 return ret;
1494}
1495
1496static void cz_dpm_nbdpm_lm_pstate_enable(struct amdgpu_device *adev,
1497 bool enable)
1498{
1499 if (enable)
1500 cz_send_msg_to_smc(adev, PPSMC_MSG_EnableLowMemoryPstate);
1501 else
1502 cz_send_msg_to_smc(adev, PPSMC_MSG_DisableLowMemoryPstate);
1503
1504}
1505
1506static int cz_dpm_update_low_memory_pstate(struct amdgpu_device *adev)
1507{
1508 int ret = 0;
1509 struct cz_power_info *pi = cz_get_pi(adev);
1510 struct cz_ps *ps = &pi->requested_ps;
1511
1512 if (pi->sys_info.nb_dpm_enable) {
1513 if (ps->force_high)
1514 cz_dpm_nbdpm_lm_pstate_enable(adev, true);
1515 else
1516 cz_dpm_nbdpm_lm_pstate_enable(adev, false);
1517 }
1518
1519 return ret;
1520}
1521
1522/* with dpm enabled */
1523static int cz_dpm_set_power_state(struct amdgpu_device *adev)
1524{
1525 int ret = 0;
1526
1527 cz_dpm_update_sclk_limit(adev);
1528 cz_dpm_set_deep_sleep_sclk_threshold(adev);
1529 cz_dpm_set_watermark_threshold(adev);
1530 cz_dpm_enable_nbdpm(adev);
1531 cz_dpm_update_low_memory_pstate(adev);
1532
1533 return ret;
1534}
1535
1536static void cz_dpm_post_set_power_state(struct amdgpu_device *adev)
1537{
1538 struct cz_power_info *pi = cz_get_pi(adev);
1539 struct amdgpu_ps *ps = &pi->requested_rps;
1540
1541 cz_update_current_ps(adev, ps);
1542
1543}
1544
1545static int cz_dpm_force_highest(struct amdgpu_device *adev)
1546{
1547 struct cz_power_info *pi = cz_get_pi(adev);
1548 int ret = 0;
1549
1550 if (pi->sclk_dpm.soft_min_clk != pi->sclk_dpm.soft_max_clk) {
1551 pi->sclk_dpm.soft_min_clk =
1552 pi->sclk_dpm.soft_max_clk;
1553 ret = cz_send_msg_to_smc_with_parameter(adev,
1554 PPSMC_MSG_SetSclkSoftMin,
1555 cz_get_sclk_level(adev,
1556 pi->sclk_dpm.soft_min_clk,
1557 PPSMC_MSG_SetSclkSoftMin));
1558 if (ret)
1559 return ret;
1560 }
1561
1562 return ret;
1563}
1564
1565static int cz_dpm_force_lowest(struct amdgpu_device *adev)
1566{
1567 struct cz_power_info *pi = cz_get_pi(adev);
1568 int ret = 0;
1569
1570 if (pi->sclk_dpm.soft_max_clk != pi->sclk_dpm.soft_min_clk) {
1571 pi->sclk_dpm.soft_max_clk = pi->sclk_dpm.soft_min_clk;
1572 ret = cz_send_msg_to_smc_with_parameter(adev,
1573 PPSMC_MSG_SetSclkSoftMax,
1574 cz_get_sclk_level(adev,
1575 pi->sclk_dpm.soft_max_clk,
1576 PPSMC_MSG_SetSclkSoftMax));
1577 if (ret)
1578 return ret;
1579 }
1580
1581 return ret;
1582}
1583
1584static uint32_t cz_dpm_get_max_sclk_level(struct amdgpu_device *adev)
1585{
1586 struct cz_power_info *pi = cz_get_pi(adev);
1587
1588 if (!pi->max_sclk_level) {
1589 cz_send_msg_to_smc(adev, PPSMC_MSG_GetMaxSclkLevel);
1590 pi->max_sclk_level = cz_get_argument(adev) + 1;
1591 }
1592
1593 if (pi->max_sclk_level > CZ_MAX_HARDWARE_POWERLEVELS) {
1594 DRM_ERROR("Invalid max sclk level!\n");
1595 return -EINVAL;
1596 }
1597
1598 return pi->max_sclk_level;
1599}
1600
1601static int cz_dpm_unforce_dpm_levels(struct amdgpu_device *adev)
1602{
1603 struct cz_power_info *pi = cz_get_pi(adev);
1604 struct amdgpu_clock_voltage_dependency_table *dep_table =
1605 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
1606 uint32_t level = 0;
1607 int ret = 0;
1608
1609 pi->sclk_dpm.soft_min_clk = dep_table->entries[0].clk;
1610 level = cz_dpm_get_max_sclk_level(adev) - 1;
1611 if (level < dep_table->count)
1612 pi->sclk_dpm.soft_max_clk = dep_table->entries[level].clk;
1613 else
1614 pi->sclk_dpm.soft_max_clk =
1615 dep_table->entries[dep_table->count - 1].clk;
1616
1617 /* get min/max sclk soft value
1618 * notify SMU to execute */
1619 ret = cz_send_msg_to_smc_with_parameter(adev,
1620 PPSMC_MSG_SetSclkSoftMin,
1621 cz_get_sclk_level(adev,
1622 pi->sclk_dpm.soft_min_clk,
1623 PPSMC_MSG_SetSclkSoftMin));
1624 if (ret)
1625 return ret;
1626
1627 ret = cz_send_msg_to_smc_with_parameter(adev,
1628 PPSMC_MSG_SetSclkSoftMax,
1629 cz_get_sclk_level(adev,
1630 pi->sclk_dpm.soft_max_clk,
1631 PPSMC_MSG_SetSclkSoftMax));
1632 if (ret)
1633 return ret;
1634
1635 DRM_INFO("DPM unforce state min=%d, max=%d.\n",
1636 pi->sclk_dpm.soft_min_clk,
1637 pi->sclk_dpm.soft_max_clk);
1638
1639 return 0;
1640}
1641
1642static int cz_dpm_force_dpm_level(struct amdgpu_device *adev,
1643 enum amdgpu_dpm_forced_level level)
1644{
1645 int ret = 0;
1646
1647 switch (level) {
1648 case AMDGPU_DPM_FORCED_LEVEL_HIGH:
1649 ret = cz_dpm_force_highest(adev);
1650 if (ret)
1651 return ret;
1652 break;
1653 case AMDGPU_DPM_FORCED_LEVEL_LOW:
1654 ret = cz_dpm_force_lowest(adev);
1655 if (ret)
1656 return ret;
1657 break;
1658 case AMDGPU_DPM_FORCED_LEVEL_AUTO:
1659 ret = cz_dpm_unforce_dpm_levels(adev);
1660 if (ret)
1661 return ret;
1662 break;
1663 default:
1664 break;
1665 }
1666
1667 return ret;
1668}
1669
1670/* fix me, display configuration change lists here
1671 * mostly dal related*/
1672static void cz_dpm_display_configuration_changed(struct amdgpu_device *adev)
1673{
1674}
1675
1676static uint32_t cz_dpm_get_sclk(struct amdgpu_device *adev, bool low)
1677{
1678 struct cz_power_info *pi = cz_get_pi(adev);
1679 struct cz_ps *requested_state = cz_get_ps(&pi->requested_rps);
1680
1681 if (low)
1682 return requested_state->levels[0].sclk;
1683 else
1684 return requested_state->levels[requested_state->num_levels - 1].sclk;
1685
1686}
1687
1688static uint32_t cz_dpm_get_mclk(struct amdgpu_device *adev, bool low)
1689{
1690 struct cz_power_info *pi = cz_get_pi(adev);
1691
1692 return pi->sys_info.bootup_uma_clk;
1693}
1694
564ea790
SJ
1695static int cz_enable_uvd_dpm(struct amdgpu_device *adev, bool enable)
1696{
1697 struct cz_power_info *pi = cz_get_pi(adev);
1698 int ret = 0;
1699
1700 if (enable && pi->caps_uvd_dpm ) {
1701 pi->dpm_flags |= DPMFlags_UVD_Enabled;
1702 DRM_DEBUG("UVD DPM Enabled.\n");
1703
1704 ret = cz_send_msg_to_smc_with_parameter(adev,
1705 PPSMC_MSG_EnableAllSmuFeatures, UVD_DPM_MASK);
1706 } else {
1707 pi->dpm_flags &= ~DPMFlags_UVD_Enabled;
1708 DRM_DEBUG("UVD DPM Stopped\n");
1709
1710 ret = cz_send_msg_to_smc_with_parameter(adev,
1711 PPSMC_MSG_DisableAllSmuFeatures, UVD_DPM_MASK);
1712 }
1713
1714 return ret;
1715}
1716
1717static int cz_update_uvd_dpm(struct amdgpu_device *adev, bool gate)
1718{
1719 return cz_enable_uvd_dpm(adev, !gate);
1720}
1721
1722
1723static void cz_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate)
1724{
1725 struct cz_power_info *pi = cz_get_pi(adev);
1726 int ret;
1727
1728 if (pi->uvd_power_gated == gate)
1729 return;
1730
1731 pi->uvd_power_gated = gate;
1732
1733 if (gate) {
1734 if (pi->caps_uvd_pg) {
1735 /* disable clockgating so we can properly shut down the block */
1736 ret = amdgpu_set_clockgating_state(adev, AMDGPU_IP_BLOCK_TYPE_UVD,
1737 AMDGPU_CG_STATE_UNGATE);
1738 /* shutdown the UVD block */
1739 ret = amdgpu_set_powergating_state(adev, AMDGPU_IP_BLOCK_TYPE_UVD,
1740 AMDGPU_PG_STATE_GATE);
1741 /* XXX: check for errors */
1742 }
1743 cz_update_uvd_dpm(adev, gate);
1744 if (pi->caps_uvd_pg)
1745 /* power off the UVD block */
1746 cz_send_msg_to_smc(adev, PPSMC_MSG_UVDPowerOFF);
1747 } else {
1748 if (pi->caps_uvd_pg) {
1749 /* power on the UVD block */
1750 if (pi->uvd_dynamic_pg)
1751 cz_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_UVDPowerON, 1);
1752 else
1753 cz_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_UVDPowerON, 0);
1754 /* re-init the UVD block */
1755 ret = amdgpu_set_powergating_state(adev, AMDGPU_IP_BLOCK_TYPE_UVD,
1756 AMDGPU_PG_STATE_UNGATE);
1757 /* enable clockgating. hw will dynamically gate/ungate clocks on the fly */
1758 ret = amdgpu_set_clockgating_state(adev, AMDGPU_IP_BLOCK_TYPE_UVD,
1759 AMDGPU_CG_STATE_GATE);
1760 /* XXX: check for errors */
1761 }
1762 cz_update_uvd_dpm(adev, gate);
1763 }
1764}
1765
aaa36a97
AD
1766const struct amdgpu_ip_funcs cz_dpm_ip_funcs = {
1767 .early_init = cz_dpm_early_init,
564ea790 1768 .late_init = cz_dpm_late_init,
aaa36a97
AD
1769 .sw_init = cz_dpm_sw_init,
1770 .sw_fini = cz_dpm_sw_fini,
1771 .hw_init = cz_dpm_hw_init,
1772 .hw_fini = cz_dpm_hw_fini,
1773 .suspend = cz_dpm_suspend,
1774 .resume = cz_dpm_resume,
1775 .is_idle = NULL,
1776 .wait_for_idle = NULL,
1777 .soft_reset = NULL,
1778 .print_status = NULL,
1779 .set_clockgating_state = cz_dpm_set_clockgating_state,
1780 .set_powergating_state = cz_dpm_set_powergating_state,
1781};
1782
1783static const struct amdgpu_dpm_funcs cz_dpm_funcs = {
1784 .get_temperature = cz_dpm_get_temperature,
1785 .pre_set_power_state = cz_dpm_pre_set_power_state,
1786 .set_power_state = cz_dpm_set_power_state,
1787 .post_set_power_state = cz_dpm_post_set_power_state,
1788 .display_configuration_changed = cz_dpm_display_configuration_changed,
1789 .get_sclk = cz_dpm_get_sclk,
1790 .get_mclk = cz_dpm_get_mclk,
1791 .print_power_state = cz_dpm_print_power_state,
1792 .debugfs_print_current_performance_level =
1793 cz_dpm_debugfs_print_current_performance_level,
1794 .force_performance_level = cz_dpm_force_dpm_level,
1795 .vblank_too_short = NULL,
564ea790 1796 .powergate_uvd = cz_dpm_powergate_uvd,
aaa36a97
AD
1797};
1798
1799static void cz_dpm_set_funcs(struct amdgpu_device *adev)
1800{
1801 if (NULL == adev->pm.funcs)
1802 adev->pm.funcs = &cz_dpm_funcs;
1803}