]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
Merge tag 'asoc-v5.7' of https://git.kernel.org/pub/scm/linux/kernel/git/broonie...
[mirror_ubuntu-hirsute-kernel.git] / drivers / gpu / drm / amd / powerplay / amdgpu_smu.c
CommitLineData
137d63ab
HR
1/*
2 * Copyright 2019 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
137d63ab 23#include <linux/firmware.h>
1cf8c930 24#include <linux/pci.h>
9fdd91aa
SR
25
26#include "pp_debug.h"
137d63ab
HR
27#include "amdgpu.h"
28#include "amdgpu_smu.h"
18c1d3ce 29#include "smu_internal.h"
137d63ab 30#include "soc15_common.h"
07845526 31#include "smu_v11_0.h"
5dbbe6a7 32#include "smu_v12_0.h"
e15da5a4 33#include "atom.h"
24e141e1 34#include "amd_pcie.h"
6c45e480
EQ
35#include "vega20_ppt.h"
36#include "arcturus_ppt.h"
37#include "navi10_ppt.h"
38#include "renoir_ppt.h"
137d63ab 39
6b294793
KW
40#undef __SMU_DUMMY_MAP
41#define __SMU_DUMMY_MAP(type) #type
42static const char* __smu_message_names[] = {
43 SMU_MESSAGE_TYPES
44};
45
46const char *smu_get_message_name(struct smu_context *smu, enum smu_message_type type)
47{
e3bf125b 48 if (type < 0 || type >= SMU_MSG_MAX_COUNT)
ab631311 49 return "unknown smu message";
6b294793
KW
50 return __smu_message_names[type];
51}
52
cb33363d
KW
53#undef __SMU_DUMMY_MAP
54#define __SMU_DUMMY_MAP(fea) #fea
55static const char* __smu_feature_names[] = {
56 SMU_FEATURE_MASKS
57};
58
59const char *smu_get_feature_name(struct smu_context *smu, enum smu_feature_mask feature)
60{
e3bf125b 61 if (feature < 0 || feature >= SMU_FEATURE_COUNT)
ab631311 62 return "unknown smu feature";
cb33363d
KW
63 return __smu_feature_names[feature];
64}
65
98eb03bb
KW
66size_t smu_sys_get_pp_feature_mask(struct smu_context *smu, char *buf)
67{
68 size_t size = 0;
69 int ret = 0, i = 0;
70 uint32_t feature_mask[2] = { 0 };
71 int32_t feature_index = 0;
72 uint32_t count = 0;
67194518
KW
73 uint32_t sort_feature[SMU_FEATURE_COUNT];
74 uint64_t hw_feature_count = 0;
98eb03bb 75
3697b339
EQ
76 mutex_lock(&smu->mutex);
77
98eb03bb
KW
78 ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
79 if (ret)
80 goto failed;
81
82 size = sprintf(buf + size, "features high: 0x%08x low: 0x%08x\n",
83 feature_mask[1], feature_mask[0]);
84
85 for (i = 0; i < SMU_FEATURE_COUNT; i++) {
86 feature_index = smu_feature_get_index(smu, i);
87 if (feature_index < 0)
88 continue;
67194518
KW
89 sort_feature[feature_index] = i;
90 hw_feature_count++;
91 }
92
93 for (i = 0; i < hw_feature_count; i++) {
98eb03bb
KW
94 size += sprintf(buf + size, "%02d. %-20s (%2d) : %s\n",
95 count++,
67194518
KW
96 smu_get_feature_name(smu, sort_feature[i]),
97 i,
98 !!smu_feature_is_enabled(smu, sort_feature[i]) ?
ab631311 99 "enabled" : "disabled");
98eb03bb
KW
100 }
101
102failed:
3697b339
EQ
103 mutex_unlock(&smu->mutex);
104
98eb03bb
KW
105 return size;
106}
107
c66846e0
EQ
108static int smu_feature_update_enable_state(struct smu_context *smu,
109 uint64_t feature_mask,
110 bool enabled)
111{
112 struct smu_feature *feature = &smu->smu_feature;
113 uint32_t feature_low = 0, feature_high = 0;
114 int ret = 0;
115
116 if (!smu->pm_enabled)
117 return ret;
118
119 feature_low = (feature_mask >> 0 ) & 0xffffffff;
120 feature_high = (feature_mask >> 32) & 0xffffffff;
121
122 if (enabled) {
123 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesLow,
124 feature_low);
125 if (ret)
126 return ret;
127 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesHigh,
128 feature_high);
129 if (ret)
130 return ret;
131 } else {
132 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesLow,
133 feature_low);
134 if (ret)
135 return ret;
136 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesHigh,
137 feature_high);
138 if (ret)
139 return ret;
140 }
141
142 mutex_lock(&feature->mutex);
143 if (enabled)
144 bitmap_or(feature->enabled, feature->enabled,
145 (unsigned long *)(&feature_mask), SMU_FEATURE_MAX);
146 else
147 bitmap_andnot(feature->enabled, feature->enabled,
148 (unsigned long *)(&feature_mask), SMU_FEATURE_MAX);
149 mutex_unlock(&feature->mutex);
150
151 return ret;
152}
153
98eb03bb
KW
154int smu_sys_set_pp_feature_mask(struct smu_context *smu, uint64_t new_mask)
155{
156 int ret = 0;
157 uint32_t feature_mask[2] = { 0 };
158 uint64_t feature_2_enabled = 0;
159 uint64_t feature_2_disabled = 0;
160 uint64_t feature_enables = 0;
161
3697b339
EQ
162 mutex_lock(&smu->mutex);
163
98eb03bb
KW
164 ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
165 if (ret)
3697b339 166 goto out;
98eb03bb
KW
167
168 feature_enables = ((uint64_t)feature_mask[1] << 32 | (uint64_t)feature_mask[0]);
169
170 feature_2_enabled = ~feature_enables & new_mask;
171 feature_2_disabled = feature_enables & ~new_mask;
172
173 if (feature_2_enabled) {
174 ret = smu_feature_update_enable_state(smu, feature_2_enabled, true);
175 if (ret)
3697b339 176 goto out;
98eb03bb
KW
177 }
178 if (feature_2_disabled) {
179 ret = smu_feature_update_enable_state(smu, feature_2_disabled, false);
180 if (ret)
3697b339 181 goto out;
98eb03bb
KW
182 }
183
3697b339
EQ
184out:
185 mutex_unlock(&smu->mutex);
186
98eb03bb
KW
187 return ret;
188}
189
4fde03a7
KW
190int smu_get_smc_version(struct smu_context *smu, uint32_t *if_version, uint32_t *smu_version)
191{
192 int ret = 0;
193
194 if (!if_version && !smu_version)
195 return -EINVAL;
196
197 if (if_version) {
198 ret = smu_send_smc_msg(smu, SMU_MSG_GetDriverIfVersion);
199 if (ret)
200 return ret;
201
202 ret = smu_read_smc_arg(smu, if_version);
203 if (ret)
204 return ret;
205 }
206
207 if (smu_version) {
208 ret = smu_send_smc_msg(smu, SMU_MSG_GetSmuVersion);
209 if (ret)
210 return ret;
211
212 ret = smu_read_smc_arg(smu, smu_version);
213 if (ret)
214 return ret;
215 }
216
217 return ret;
218}
219
0d7cbd28
KW
220int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
221 uint32_t min, uint32_t max)
222{
4045f36f 223 int ret = 0;
0d7cbd28 224
80381d40 225 if (min < 0 && max < 0)
0d7cbd28
KW
226 return -EINVAL;
227
54728170
KW
228 if (!smu_clk_dpm_is_enabled(smu, clk_type))
229 return 0;
230
4045f36f 231 ret = smu_set_soft_freq_limited_range(smu, clk_type, min, max);
0d7cbd28
KW
232 return ret;
233}
234
33665617
KW
235int smu_set_hard_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
236 uint32_t min, uint32_t max)
237{
238 int ret = 0, clk_id = 0;
239 uint32_t param;
240
241 if (min <= 0 && max <= 0)
242 return -EINVAL;
243
54728170
KW
244 if (!smu_clk_dpm_is_enabled(smu, clk_type))
245 return 0;
246
33665617
KW
247 clk_id = smu_clk_get_index(smu, clk_type);
248 if (clk_id < 0)
249 return clk_id;
250
251 if (max > 0) {
252 param = (uint32_t)((clk_id << 16) | (max & 0xffff));
253 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMaxByFreq,
254 param);
255 if (ret)
256 return ret;
257 }
258
259 if (min > 0) {
260 param = (uint32_t)((clk_id << 16) | (min & 0xffff));
261 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq,
262 param);
263 if (ret)
264 return ret;
265 }
266
267
268 return ret;
269}
270
8b3d243e 271int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
3697b339 272 uint32_t *min, uint32_t *max, bool lock_needed)
8b3d243e 273{
3a48c10d 274 uint32_t clock_limit;
eee3258e 275 int ret = 0;
8b3d243e
KW
276
277 if (!min && !max)
278 return -EINVAL;
279
3697b339
EQ
280 if (lock_needed)
281 mutex_lock(&smu->mutex);
282
3a48c10d
EQ
283 if (!smu_clk_dpm_is_enabled(smu, clk_type)) {
284 switch (clk_type) {
285 case SMU_MCLK:
286 case SMU_UCLK:
287 clock_limit = smu->smu_table.boot_values.uclk;
288 break;
289 case SMU_GFXCLK:
290 case SMU_SCLK:
291 clock_limit = smu->smu_table.boot_values.gfxclk;
292 break;
293 case SMU_SOCCLK:
294 clock_limit = smu->smu_table.boot_values.socclk;
295 break;
296 default:
297 clock_limit = 0;
298 break;
299 }
300
301 /* clock in Mhz unit */
302 if (min)
303 *min = clock_limit / 100;
304 if (max)
305 *max = clock_limit / 100;
3697b339
EQ
306 } else {
307 /*
308 * Todo: Use each asic(ASIC_ppt funcs) control the callbacks exposed to the
309 * core driver and then have helpers for stuff that is common(SMU_v11_x | SMU_v12_x funcs).
310 */
311 ret = smu_get_dpm_ultimate_freq(smu, clk_type, min, max);
3a48c10d 312 }
3697b339
EQ
313
314 if (lock_needed)
315 mutex_unlock(&smu->mutex);
316
8b3d243e
KW
317 return ret;
318}
319
3ac54a50
KW
320int smu_get_dpm_freq_by_index(struct smu_context *smu, enum smu_clk_type clk_type,
321 uint16_t level, uint32_t *value)
322{
323 int ret = 0, clk_id = 0;
324 uint32_t param;
325
326 if (!value)
327 return -EINVAL;
328
54728170
KW
329 if (!smu_clk_dpm_is_enabled(smu, clk_type))
330 return 0;
331
3ac54a50
KW
332 clk_id = smu_clk_get_index(smu, clk_type);
333 if (clk_id < 0)
334 return clk_id;
335
336 param = (uint32_t)(((clk_id & 0xffff) << 16) | (level & 0xffff));
337
338 ret = smu_send_smc_msg_with_param(smu,SMU_MSG_GetDpmFreqByIndex,
339 param);
340 if (ret)
341 return ret;
342
343 ret = smu_read_smc_arg(smu, &param);
344 if (ret)
345 return ret;
346
347 /* BIT31: 0 - Fine grained DPM, 1 - Dicrete DPM
348 * now, we un-support it */
349 *value = param & 0x7fffffff;
350
351 return ret;
352}
353
354int smu_get_dpm_level_count(struct smu_context *smu, enum smu_clk_type clk_type,
355 uint32_t *value)
356{
357 return smu_get_dpm_freq_by_index(smu, clk_type, 0xff, value);
358}
359
e4b613e0
KW
360int smu_get_dpm_level_range(struct smu_context *smu, enum smu_clk_type clk_type,
361 uint32_t *min_value, uint32_t *max_value)
362{
363 int ret = 0;
364 uint32_t level_count = 0;
365
366 if (!min_value && !max_value)
367 return -EINVAL;
368
369 if (min_value) {
370 /* by default, level 0 clock value as min value */
371 ret = smu_get_dpm_freq_by_index(smu, clk_type, 0, min_value);
372 if (ret)
373 return ret;
374 }
375
376 if (max_value) {
377 ret = smu_get_dpm_level_count(smu, clk_type, &level_count);
378 if (ret)
379 return ret;
380
381 ret = smu_get_dpm_freq_by_index(smu, clk_type, level_count - 1, max_value);
382 if (ret)
383 return ret;
384 }
385
386 return ret;
387}
388
54728170
KW
389bool smu_clk_dpm_is_enabled(struct smu_context *smu, enum smu_clk_type clk_type)
390{
391 enum smu_feature_mask feature_id = 0;
392
393 switch (clk_type) {
394 case SMU_MCLK:
395 case SMU_UCLK:
396 feature_id = SMU_FEATURE_DPM_UCLK_BIT;
397 break;
398 case SMU_GFXCLK:
399 case SMU_SCLK:
400 feature_id = SMU_FEATURE_DPM_GFXCLK_BIT;
401 break;
402 case SMU_SOCCLK:
403 feature_id = SMU_FEATURE_DPM_SOCCLK_BIT;
404 break;
405 default:
406 return true;
407 }
408
409 if(!smu_feature_is_enabled(smu, feature_id)) {
54728170
KW
410 return false;
411 }
412
413 return true;
414}
415
cf3fde89
EQ
416/**
417 * smu_dpm_set_power_gate - power gate/ungate the specific IP block
418 *
419 * @smu: smu_context pointer
420 * @block_type: the IP block to power gate/ungate
421 * @gate: to power gate if true, ungate otherwise
422 *
423 * This API uses no smu->mutex lock protection due to:
424 * 1. It is either called by other IP block(gfx/sdma/vcn/uvd/vce).
425 * This is guarded to be race condition free by the caller.
426 * 2. Or get called on user setting request of power_dpm_force_performance_level.
427 * Under this case, the smu->mutex lock protection is already enforced on
428 * the parent API smu_force_performance_level of the call path.
429 */
72e91f37
KW
430int smu_dpm_set_power_gate(struct smu_context *smu, uint32_t block_type,
431 bool gate)
432{
433 int ret = 0;
434
435 switch (block_type) {
436 case AMD_IP_BLOCK_TYPE_UVD:
a64c9e15 437 ret = smu_dpm_set_uvd_enable(smu, !gate);
72e91f37
KW
438 break;
439 case AMD_IP_BLOCK_TYPE_VCE:
a64c9e15 440 ret = smu_dpm_set_vce_enable(smu, !gate);
72e91f37 441 break;
73c86d62
HZ
442 case AMD_IP_BLOCK_TYPE_GFX:
443 ret = smu_gfx_off_control(smu, gate);
444 break;
a90a24d5
PL
445 case AMD_IP_BLOCK_TYPE_SDMA:
446 ret = smu_powergate_sdma(smu, gate);
447 break;
0db2ab99 448 case AMD_IP_BLOCK_TYPE_JPEG:
a64c9e15 449 ret = smu_dpm_set_jpeg_enable(smu, !gate);
0db2ab99 450 break;
72e91f37
KW
451 default:
452 break;
453 }
454
3697b339 455 return ret;
ea2d0bf8
KW
456}
457
09895323
KW
458int smu_get_power_num_states(struct smu_context *smu,
459 struct pp_states_info *state_info)
460{
461 if (!state_info)
462 return -EINVAL;
463
464 /* not support power state */
465 memset(state_info, 0, sizeof(struct pp_states_info));
f0d2a7dc
EQ
466 state_info->nums = 1;
467 state_info->states[0] = POWER_STATE_TYPE_DEFAULT;
09895323
KW
468
469 return 0;
470}
471
143c75d6
KW
472int smu_common_read_sensor(struct smu_context *smu, enum amd_pp_sensors sensor,
473 void *data, uint32_t *size)
474{
706e5082
EQ
475 struct smu_power_context *smu_power = &smu->smu_power;
476 struct smu_power_gate *power_gate = &smu_power->power_gate;
143c75d6
KW
477 int ret = 0;
478
9b4e63f4
KF
479 if(!data || !size)
480 return -EINVAL;
481
143c75d6 482 switch (sensor) {
46814f51
CG
483 case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK:
484 *((uint32_t *)data) = smu->pstate_sclk;
485 *size = 4;
486 break;
487 case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK:
488 *((uint32_t *)data) = smu->pstate_mclk;
489 *size = 4;
490 break;
143c75d6
KW
491 case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK:
492 ret = smu_feature_get_enabled_mask(smu, (uint32_t *)data, 2);
493 *size = 8;
494 break;
6b1b7b5b
KW
495 case AMDGPU_PP_SENSOR_UVD_POWER:
496 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT) ? 1 : 0;
497 *size = 4;
498 break;
499 case AMDGPU_PP_SENSOR_VCE_POWER:
500 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_VCE_BIT) ? 1 : 0;
501 *size = 4;
502 break;
bf2bf523 503 case AMDGPU_PP_SENSOR_VCN_POWER_STATE:
706e5082 504 *(uint32_t *)data = power_gate->vcn_gated ? 0 : 1;
bf2bf523
EQ
505 *size = 4;
506 break;
143c75d6
KW
507 default:
508 ret = -EINVAL;
509 break;
510 }
511
512 if (ret)
513 *size = 0;
514
515 return ret;
516}
517
0d9d78b5 518int smu_update_table(struct smu_context *smu, enum smu_table_id table_index, int argument,
dbe6a970
KW
519 void *table_data, bool drv2smu)
520{
521 struct smu_table_context *smu_table = &smu->smu_table;
839f9117 522 struct amdgpu_device *adev = smu->adev;
ce0d0ec3 523 struct smu_table *table = &smu_table->driver_table;
33bd73ae 524 int table_id = smu_table_get_index(smu, table_index);
ce0d0ec3
EQ
525 uint32_t table_size;
526 int ret = 0;
dbe6a970 527
871e5e72 528 if (!table_data || table_id >= SMU_TABLE_COUNT || table_id < 0)
dbe6a970
KW
529 return -EINVAL;
530
ce0d0ec3 531 table_size = smu_table->tables[table_index].size;
dbe6a970 532
e0aa4a92 533 if (drv2smu) {
ce0d0ec3 534 memcpy(table->cpu_addr, table_data, table_size);
e0aa4a92
EQ
535 /*
536 * Flush hdp cache: to guard the content seen by
537 * GPU is consitent with CPU.
538 */
539 amdgpu_asic_flush_hdp(adev, NULL);
540 }
dbe6a970 541
dbe6a970
KW
542 ret = smu_send_smc_msg_with_param(smu, drv2smu ?
543 SMU_MSG_TransferTableDram2Smu :
544 SMU_MSG_TransferTableSmu2Dram,
0d9d78b5 545 table_id | ((argument & 0xFFFF) << 16));
dbe6a970
KW
546 if (ret)
547 return ret;
548
e0aa4a92
EQ
549 if (!drv2smu) {
550 amdgpu_asic_flush_hdp(adev, NULL);
ce0d0ec3 551 memcpy(table_data, table->cpu_addr, table_size);
e0aa4a92 552 }
dbe6a970
KW
553
554 return ret;
555}
556
dc8e3a0c
KW
557bool is_support_sw_smu(struct amdgpu_device *adev)
558{
54b998ca
HZ
559 if (adev->asic_type == CHIP_VEGA20)
560 return (amdgpu_dpm == 2) ? true : false;
96358810 561 else if (adev->asic_type >= CHIP_ARCTURUS) {
c2a801af 562 if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
96358810
JZ
563 return false;
564 else
565 return true;
566 } else
54b998ca 567 return false;
dc8e3a0c
KW
568}
569
54bd77f3
YZ
570bool is_support_sw_smu_xgmi(struct amdgpu_device *adev)
571{
086e1c56 572 if (!is_support_sw_smu(adev))
54bd77f3
YZ
573 return false;
574
575 if (adev->asic_type == CHIP_VEGA20)
576 return true;
577
578 return false;
579}
580
289921b0
KW
581int smu_sys_get_pp_table(struct smu_context *smu, void **table)
582{
583 struct smu_table_context *smu_table = &smu->smu_table;
3697b339 584 uint32_t powerplay_table_size;
289921b0
KW
585
586 if (!smu_table->power_play_table && !smu_table->hardcode_pptable)
587 return -EINVAL;
588
3697b339
EQ
589 mutex_lock(&smu->mutex);
590
289921b0
KW
591 if (smu_table->hardcode_pptable)
592 *table = smu_table->hardcode_pptable;
593 else
594 *table = smu_table->power_play_table;
595
3697b339
EQ
596 powerplay_table_size = smu_table->power_play_table_size;
597
598 mutex_unlock(&smu->mutex);
599
600 return powerplay_table_size;
289921b0
KW
601}
602
603int smu_sys_set_pp_table(struct smu_context *smu, void *buf, size_t size)
604{
605 struct smu_table_context *smu_table = &smu->smu_table;
606 ATOM_COMMON_TABLE_HEADER *header = (ATOM_COMMON_TABLE_HEADER *)buf;
607 int ret = 0;
608
a254bfa2
CG
609 if (!smu->pm_enabled)
610 return -EINVAL;
289921b0
KW
611 if (header->usStructureSize != size) {
612 pr_err("pp table size not matched !\n");
613 return -EIO;
614 }
615
616 mutex_lock(&smu->mutex);
617 if (!smu_table->hardcode_pptable)
618 smu_table->hardcode_pptable = kzalloc(size, GFP_KERNEL);
619 if (!smu_table->hardcode_pptable) {
620 ret = -ENOMEM;
621 goto failed;
622 }
623
624 memcpy(smu_table->hardcode_pptable, buf, size);
625 smu_table->power_play_table = smu_table->hardcode_pptable;
626 smu_table->power_play_table_size = size;
289921b0 627
c39f062e
EQ
628 /*
629 * Special hw_fini action(for Navi1x, the DPMs disablement will be
630 * skipped) may be needed for custom pptable uploading.
631 */
632 smu->uploading_custom_pp_table = true;
633
289921b0
KW
634 ret = smu_reset(smu);
635 if (ret)
636 pr_info("smu reset failed, ret = %d\n", ret);
637
c39f062e
EQ
638 smu->uploading_custom_pp_table = false;
639
289921b0
KW
640failed:
641 mutex_unlock(&smu->mutex);
642 return ret;
643}
644
6b816d73
KW
645int smu_feature_init_dpm(struct smu_context *smu)
646{
647 struct smu_feature *feature = &smu->smu_feature;
648 int ret = 0;
74c958a3 649 uint32_t allowed_feature_mask[SMU_FEATURE_MAX/32];
6b816d73 650
a254bfa2
CG
651 if (!smu->pm_enabled)
652 return ret;
f14a323d 653 mutex_lock(&feature->mutex);
74c958a3 654 bitmap_zero(feature->allowed, SMU_FEATURE_MAX);
f14a323d 655 mutex_unlock(&feature->mutex);
6b816d73 656
74c958a3 657 ret = smu_get_allowed_feature_mask(smu, allowed_feature_mask,
6b816d73
KW
658 SMU_FEATURE_MAX/32);
659 if (ret)
660 return ret;
661
f14a323d 662 mutex_lock(&feature->mutex);
74c958a3
KW
663 bitmap_or(feature->allowed, feature->allowed,
664 (unsigned long *)allowed_feature_mask,
6b816d73 665 feature->feature_num);
f14a323d 666 mutex_unlock(&feature->mutex);
6b816d73
KW
667
668 return ret;
669}
26dd6681 670
6b816d73 671
ffcb08df 672int smu_feature_is_enabled(struct smu_context *smu, enum smu_feature_mask mask)
2f25158d
KW
673{
674 struct smu_feature *feature = &smu->smu_feature;
c0640304 675 int feature_id;
f14a323d
KW
676 int ret = 0;
677
d2f925ff 678 if (smu->is_apu)
ffe61cd6 679 return 1;
51b9121a 680
ffcb08df 681 feature_id = smu_feature_get_index(smu, mask);
c0640304
EQ
682 if (feature_id < 0)
683 return 0;
ffcb08df 684
2f25158d 685 WARN_ON(feature_id > feature->feature_num);
f14a323d
KW
686
687 mutex_lock(&feature->mutex);
688 ret = test_bit(feature_id, feature->enabled);
689 mutex_unlock(&feature->mutex);
690
691 return ret;
2f25158d
KW
692}
693
ffcb08df
HR
694int smu_feature_set_enabled(struct smu_context *smu, enum smu_feature_mask mask,
695 bool enable)
2f25158d
KW
696{
697 struct smu_feature *feature = &smu->smu_feature;
c0640304 698 int feature_id;
f14a323d 699
ffcb08df 700 feature_id = smu_feature_get_index(smu, mask);
c0640304
EQ
701 if (feature_id < 0)
702 return -EINVAL;
ffcb08df 703
2f25158d 704 WARN_ON(feature_id > feature->feature_num);
f14a323d 705
c66846e0
EQ
706 return smu_feature_update_enable_state(smu,
707 1ULL << feature_id,
708 enable);
2f25158d
KW
709}
710
ffcb08df 711int smu_feature_is_supported(struct smu_context *smu, enum smu_feature_mask mask)
2f25158d
KW
712{
713 struct smu_feature *feature = &smu->smu_feature;
c0640304 714 int feature_id;
f14a323d
KW
715 int ret = 0;
716
ffcb08df 717 feature_id = smu_feature_get_index(smu, mask);
c0640304
EQ
718 if (feature_id < 0)
719 return 0;
ffcb08df 720
2f25158d 721 WARN_ON(feature_id > feature->feature_num);
f14a323d
KW
722
723 mutex_lock(&feature->mutex);
724 ret = test_bit(feature_id, feature->supported);
725 mutex_unlock(&feature->mutex);
726
727 return ret;
2f25158d
KW
728}
729
ffcb08df
HR
730int smu_feature_set_supported(struct smu_context *smu,
731 enum smu_feature_mask mask,
2f25158d
KW
732 bool enable)
733{
734 struct smu_feature *feature = &smu->smu_feature;
c0640304 735 int feature_id;
f14a323d
KW
736 int ret = 0;
737
ffcb08df 738 feature_id = smu_feature_get_index(smu, mask);
c0640304
EQ
739 if (feature_id < 0)
740 return -EINVAL;
ffcb08df 741
2f25158d 742 WARN_ON(feature_id > feature->feature_num);
f14a323d 743
029f4153 744 mutex_lock(&feature->mutex);
2f25158d
KW
745 if (enable)
746 test_and_set_bit(feature_id, feature->supported);
747 else
748 test_and_clear_bit(feature_id, feature->supported);
f14a323d
KW
749 mutex_unlock(&feature->mutex);
750
751 return ret;
2f25158d
KW
752}
753
137d63ab
HR
754static int smu_set_funcs(struct amdgpu_device *adev)
755{
07845526
HR
756 struct smu_context *smu = &adev->smu;
757
875dc7c4
EQ
758 if (adev->pm.pp_feature & PP_OVERDRIVE_MASK)
759 smu->od_enabled = true;
760
07845526
HR
761 switch (adev->asic_type) {
762 case CHIP_VEGA20:
85f8433f 763 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
6c45e480
EQ
764 vega20_set_ppt_funcs(smu);
765 break;
2573e870 766 case CHIP_NAVI10:
b02ff126 767 case CHIP_NAVI14:
9ea8da75 768 case CHIP_NAVI12:
6c45e480
EQ
769 navi10_set_ppt_funcs(smu);
770 break;
22e18317 771 case CHIP_ARCTURUS:
85f8433f 772 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
6c45e480 773 arcturus_set_ppt_funcs(smu);
875dc7c4
EQ
774 /* OD is not supported on Arcturus */
775 smu->od_enabled =false;
07845526 776 break;
5dbbe6a7 777 case CHIP_RENOIR:
6c45e480 778 renoir_set_ppt_funcs(smu);
5dbbe6a7 779 break;
07845526
HR
780 default:
781 return -EINVAL;
782 }
783
137d63ab
HR
784 return 0;
785}
786
787static int smu_early_init(void *handle)
788{
789 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
790 struct smu_context *smu = &adev->smu;
137d63ab
HR
791
792 smu->adev = adev;
a7517677 793 smu->pm_enabled = !!amdgpu_dpm;
fe9c32a6 794 smu->is_apu = false;
137d63ab
HR
795 mutex_init(&smu->mutex);
796
74e07f9d 797 return smu_set_funcs(adev);
137d63ab
HR
798}
799
bee71d26
CG
800static int smu_late_init(void *handle)
801{
802 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
803 struct smu_context *smu = &adev->smu;
a254bfa2
CG
804
805 if (!smu->pm_enabled)
806 return 0;
51548c0f 807
bee71d26
CG
808 smu_handle_task(&adev->smu,
809 smu->smu_dpm.dpm_level,
3697b339
EQ
810 AMD_PP_TASK_COMPLETE_INIT,
811 false);
bee71d26
CG
812
813 return 0;
814}
815
e15da5a4
HR
816int smu_get_atom_data_table(struct smu_context *smu, uint32_t table,
817 uint16_t *size, uint8_t *frev, uint8_t *crev,
818 uint8_t **addr)
819{
820 struct amdgpu_device *adev = smu->adev;
821 uint16_t data_start;
822
823 if (!amdgpu_atom_parse_data_header(adev->mode_info.atom_context, table,
824 size, frev, crev, &data_start))
825 return -EINVAL;
826
827 *addr = (uint8_t *)adev->mode_info.atom_context->bios + data_start;
828
829 return 0;
830}
831
b5624000
HR
832static int smu_initialize_pptable(struct smu_context *smu)
833{
834 /* TODO */
835 return 0;
836}
837
838static int smu_smc_table_sw_init(struct smu_context *smu)
839{
840 int ret;
841
842 ret = smu_initialize_pptable(smu);
843 if (ret) {
844 pr_err("Failed to init smu_initialize_pptable!\n");
845 return ret;
846 }
847
cabd44c0
HR
848 /**
849 * Create smu_table structure, and init smc tables such as
850 * TABLE_PPTABLE, TABLE_WATERMARKS, TABLE_SMU_METRICS, and etc.
851 */
852 ret = smu_init_smc_tables(smu);
853 if (ret) {
854 pr_err("Failed to init smc tables!\n");
855 return ret;
856 }
857
17e6081b
HR
858 /**
859 * Create smu_power_context structure, and allocate smu_dpm_context and
860 * context size to fill the smu_power_context data.
861 */
862 ret = smu_init_power(smu);
863 if (ret) {
864 pr_err("Failed to init smu_init_power!\n");
865 return ret;
866 }
867
b5624000
HR
868 return 0;
869}
870
813ce279
KW
871static int smu_smc_table_sw_fini(struct smu_context *smu)
872{
873 int ret;
874
875 ret = smu_fini_smc_tables(smu);
876 if (ret) {
877 pr_err("Failed to smu_fini_smc_tables!\n");
878 return ret;
879 }
880
881 return 0;
882}
883
137d63ab
HR
884static int smu_sw_init(void *handle)
885{
886 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
887 struct smu_context *smu = &adev->smu;
888 int ret;
889
0b51d993 890 smu->pool_size = adev->pm.smu_prv_buffer_size;
6b816d73 891 smu->smu_feature.feature_num = SMU_FEATURE_MAX;
f14a323d 892 mutex_init(&smu->smu_feature.mutex);
6b816d73
KW
893 bitmap_zero(smu->smu_feature.supported, SMU_FEATURE_MAX);
894 bitmap_zero(smu->smu_feature.enabled, SMU_FEATURE_MAX);
895 bitmap_zero(smu->smu_feature.allowed, SMU_FEATURE_MAX);
767acabd
KW
896
897 mutex_init(&smu->smu_baco.mutex);
898 smu->smu_baco.state = SMU_BACO_STATE_EXIT;
899 smu->smu_baco.platform_support = false;
900
95f71bfa 901 mutex_init(&smu->sensor_lock);
fa7df751 902 mutex_init(&smu->metrics_lock);
95f71bfa 903
2e069391 904 smu->watermarks_bitmap = 0;
16177fd0
CG
905 smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
906 smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
907
908 smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
909 smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0;
910 smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1;
911 smu->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2;
912 smu->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 3;
913 smu->workload_prority[PP_SMC_POWER_PROFILE_VR] = 4;
914 smu->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5;
915 smu->workload_prority[PP_SMC_POWER_PROFILE_CUSTOM] = 6;
916
917 smu->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
918 smu->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
919 smu->workload_setting[2] = PP_SMC_POWER_PROFILE_POWERSAVING;
920 smu->workload_setting[3] = PP_SMC_POWER_PROFILE_VIDEO;
921 smu->workload_setting[4] = PP_SMC_POWER_PROFILE_VR;
922 smu->workload_setting[5] = PP_SMC_POWER_PROFILE_COMPUTE;
923 smu->workload_setting[6] = PP_SMC_POWER_PROFILE_CUSTOM;
379a4454 924 smu->display_config = &adev->pm.pm_display_cfg;
0b51d993 925
9a431038
CG
926 smu->smu_dpm.dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
927 smu->smu_dpm.requested_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
137d63ab
HR
928 ret = smu_init_microcode(smu);
929 if (ret) {
930 pr_err("Failed to load smu firmware!\n");
931 return ret;
932 }
933
b5624000
HR
934 ret = smu_smc_table_sw_init(smu);
935 if (ret) {
936 pr_err("Failed to sw init smc table!\n");
937 return ret;
938 }
939
4d7fd9e2
EQ
940 ret = smu_register_irq_handler(smu);
941 if (ret) {
942 pr_err("Failed to register smc irq handler!\n");
943 return ret;
944 }
945
137d63ab
HR
946 return 0;
947}
948
949static int smu_sw_fini(void *handle)
950{
951 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
813ce279
KW
952 struct smu_context *smu = &adev->smu;
953 int ret;
137d63ab 954
4d7fd9e2
EQ
955 kfree(smu->irq_source);
956 smu->irq_source = NULL;
957
813ce279
KW
958 ret = smu_smc_table_sw_fini(smu);
959 if (ret) {
960 pr_err("Failed to sw fini smc table!\n");
961 return ret;
962 }
963
8bf16963
KW
964 ret = smu_fini_power(smu);
965 if (ret) {
966 pr_err("Failed to init smu_fini_power!\n");
967 return ret;
968 }
969
137d63ab
HR
970 return 0;
971}
972
9c9a1747
HR
973static int smu_init_fb_allocations(struct smu_context *smu)
974{
f96357a9
KW
975 struct amdgpu_device *adev = smu->adev;
976 struct smu_table_context *smu_table = &smu->smu_table;
977 struct smu_table *tables = smu_table->tables;
ce0d0ec3
EQ
978 struct smu_table *driver_table = &(smu_table->driver_table);
979 uint32_t max_table_size = 0;
ea6d8811 980 int ret, i;
f96357a9 981
ce0d0ec3
EQ
982 /* VRAM allocation for tool table */
983 if (tables[SMU_TABLE_PMSTATUSLOG].size) {
f96357a9 984 ret = amdgpu_bo_create_kernel(adev,
ce0d0ec3
EQ
985 tables[SMU_TABLE_PMSTATUSLOG].size,
986 tables[SMU_TABLE_PMSTATUSLOG].align,
987 tables[SMU_TABLE_PMSTATUSLOG].domain,
988 &tables[SMU_TABLE_PMSTATUSLOG].bo,
989 &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
990 &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
991 if (ret) {
992 pr_err("VRAM allocation for tool table failed!\n");
993 return ret;
994 }
f96357a9
KW
995 }
996
ce0d0ec3
EQ
997 /* VRAM allocation for driver table */
998 for (i = 0; i < SMU_TABLE_COUNT; i++) {
f96357a9
KW
999 if (tables[i].size == 0)
1000 continue;
f96357a9 1001
ce0d0ec3
EQ
1002 if (i == SMU_TABLE_PMSTATUSLOG)
1003 continue;
1004
1005 if (max_table_size < tables[i].size)
1006 max_table_size = tables[i].size;
f96357a9 1007 }
ce0d0ec3
EQ
1008
1009 driver_table->size = max_table_size;
1010 driver_table->align = PAGE_SIZE;
1011 driver_table->domain = AMDGPU_GEM_DOMAIN_VRAM;
1012
1013 ret = amdgpu_bo_create_kernel(adev,
1014 driver_table->size,
1015 driver_table->align,
1016 driver_table->domain,
1017 &driver_table->bo,
1018 &driver_table->mc_address,
1019 &driver_table->cpu_addr);
1020 if (ret) {
1021 pr_err("VRAM allocation for driver table failed!\n");
1022 if (tables[SMU_TABLE_PMSTATUSLOG].mc_address)
1023 amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo,
1024 &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
1025 &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
1026 }
1027
f96357a9 1028 return ret;
9c9a1747
HR
1029}
1030
f96357a9
KW
1031static int smu_fini_fb_allocations(struct smu_context *smu)
1032{
1033 struct smu_table_context *smu_table = &smu->smu_table;
1034 struct smu_table *tables = smu_table->tables;
ce0d0ec3 1035 struct smu_table *driver_table = &(smu_table->driver_table);
f96357a9 1036
871e5e72 1037 if (!tables)
289921b0 1038 return 0;
f96357a9 1039
ce0d0ec3
EQ
1040 if (tables[SMU_TABLE_PMSTATUSLOG].mc_address)
1041 amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo,
1042 &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
1043 &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
1044
1045 amdgpu_bo_free_kernel(&driver_table->bo,
1046 &driver_table->mc_address,
1047 &driver_table->cpu_addr);
f96357a9
KW
1048
1049 return 0;
1050}
f6a6b952 1051
4733cc72
LG
1052static int smu_smc_table_hw_init(struct smu_context *smu,
1053 bool initialize)
05cadcd3 1054{
f067499b 1055 struct amdgpu_device *adev = smu->adev;
05cadcd3
HR
1056 int ret;
1057
f067499b
LG
1058 if (smu_is_dpm_running(smu) && adev->in_suspend) {
1059 pr_info("dpm has been enabled\n");
1060 return 0;
1061 }
1062
22e18317
EQ
1063 if (adev->asic_type != CHIP_ARCTURUS) {
1064 ret = smu_init_display_count(smu, 0);
1065 if (ret)
1066 return ret;
1067 }
56c53ad6 1068
4733cc72 1069 if (initialize) {
4009b9b5
HR
1070 /* get boot_values from vbios to set revision, gfxclk, and etc. */
1071 ret = smu_get_vbios_bootup_values(smu);
4733cc72
LG
1072 if (ret)
1073 return ret;
05cadcd3 1074
b55c83a7 1075 ret = smu_setup_pptable(smu);
4733cc72
LG
1076 if (ret)
1077 return ret;
a6b35900 1078
309bce0c
EQ
1079 ret = smu_get_clk_info_from_vbios(smu);
1080 if (ret)
1081 return ret;
1082
4733cc72
LG
1083 /*
1084 * check if the format_revision in vbios is up to pptable header
1085 * version, and the structure size is not 0.
1086 */
4733cc72
LG
1087 ret = smu_check_pptable(smu);
1088 if (ret)
1089 return ret;
46126e6d 1090
4733cc72
LG
1091 /*
1092 * allocate vram bos to store smc table contents.
1093 */
1094 ret = smu_init_fb_allocations(smu);
1095 if (ret)
1096 return ret;
9c9a1747 1097
4733cc72
LG
1098 /*
1099 * Parse pptable format and fill PPTable_t smc_pptable to
1100 * smu_table_context structure. And read the smc_dpm_table from vbios,
1101 * then fill it into smc_pptable.
1102 */
1103 ret = smu_parse_pptable(smu);
1104 if (ret)
1105 return ret;
9e4848a4 1106
4733cc72
LG
1107 /*
1108 * Send msg GetDriverIfVersion to check if the return value is equal
1109 * with DRIVER_IF_VERSION of smc header.
1110 */
1111 ret = smu_check_fw_version(smu);
1112 if (ret)
1113 return ret;
1114 }
a751b095 1115
7c8bcaf4 1116 /* smu_dump_pptable(smu); */
c2a801af 1117 if (!amdgpu_sriov_vf(adev)) {
ce0d0ec3
EQ
1118 ret = smu_set_driver_table_location(smu);
1119 if (ret)
1120 return ret;
1121
c2a801af
JZ
1122 /*
1123 * Copy pptable bo in the vram to smc with SMU MSGs such as
1124 * SetDriverDramAddr and TransferTableDram2Smu.
1125 */
1126 ret = smu_write_pptable(smu);
1127 if (ret)
1128 return ret;
7c8bcaf4 1129
c2a801af
JZ
1130 /* issue Run*Btc msg */
1131 ret = smu_run_btc(smu);
1132 if (ret)
1133 return ret;
1134 ret = smu_feature_set_allowed_mask(smu);
1135 if (ret)
1136 return ret;
6b816d73 1137
c2a801af
JZ
1138 ret = smu_system_features_control(smu, true);
1139 if (ret)
1140 return ret;
1cf8c930
EQ
1141
1142 if (adev->asic_type == CHIP_NAVI10) {
1143 if ((adev->pdev->device == 0x731f && (adev->pdev->revision == 0xc2 ||
1144 adev->pdev->revision == 0xc3 ||
1145 adev->pdev->revision == 0xca ||
1146 adev->pdev->revision == 0xcb)) ||
1147 (adev->pdev->device == 0x66af && (adev->pdev->revision == 0xf3 ||
1148 adev->pdev->revision == 0xf4 ||
1149 adev->pdev->revision == 0xf5 ||
1150 adev->pdev->revision == 0xf6))) {
1151 ret = smu_disable_umc_cdr_12gbps_workaround(smu);
1152 if (ret) {
1153 pr_err("Workaround failed to disable UMC CDR feature on 12Gbps SKU!\n");
1154 return ret;
1155 }
1156 }
1157 }
c2a801af 1158 }
22e18317
EQ
1159 if (adev->asic_type != CHIP_ARCTURUS) {
1160 ret = smu_notify_display_change(smu);
1161 if (ret)
1162 return ret;
e1c6f86a 1163
22e18317
EQ
1164 /*
1165 * Set min deep sleep dce fclk with bootup value from vbios via
1166 * SetMinDeepSleepDcefclk MSG.
1167 */
1168 ret = smu_set_min_dcef_deep_sleep(smu);
1169 if (ret)
1170 return ret;
1171 }
a7ebb6d2 1172
d6a4aa82
LG
1173 /*
1174 * Set initialized values (get from vbios) to dpm tables context such as
1175 * gfxclk, memclk, dcefclk, and etc. And enable the DPM feature for each
1176 * type of clks.
1177 */
4733cc72 1178 if (initialize) {
723d4735 1179 ret = smu_populate_smc_tables(smu);
4733cc72
LG
1180 if (ret)
1181 return ret;
d6a4aa82 1182
4733cc72
LG
1183 ret = smu_init_max_sustainable_clocks(smu);
1184 if (ret)
1185 return ret;
1186 }
7457cf02 1187
fddbfb1c
KF
1188 if (adev->asic_type != CHIP_ARCTURUS) {
1189 ret = smu_override_pcie_parameters(smu);
1190 if (ret)
1191 return ret;
1192 }
1193
8f30a16d 1194 ret = smu_set_default_od_settings(smu, initialize);
2c80abe3
LG
1195 if (ret)
1196 return ret;
1197
4733cc72
LG
1198 if (initialize) {
1199 ret = smu_populate_umd_state_clk(smu);
1200 if (ret)
1201 return ret;
133438fa 1202
73abde4d 1203 ret = smu_get_power_limit(smu, &smu->default_power_limit, false, false);
4733cc72
LG
1204 if (ret)
1205 return ret;
1206 }
e66adb1e 1207
206bc589
HR
1208 /*
1209 * Set PMSTATUSLOG table bo address with SetToolsDramAddr MSG for tools.
1210 */
c2a801af
JZ
1211 if (!amdgpu_sriov_vf(adev)) {
1212 ret = smu_set_tool_table_location(smu);
1213 }
a254bfa2
CG
1214 if (!smu_is_dpm_running(smu))
1215 pr_info("dpm has been disabled\n");
1216
206bc589 1217 return ret;
05cadcd3
HR
1218}
1219
e65d45f2
HR
1220/**
1221 * smu_alloc_memory_pool - allocate memory pool in the system memory
1222 *
1223 * @smu: amdgpu_device pointer
1224 *
1225 * This memory pool will be used for SMC use and msg SetSystemVirtualDramAddr
1226 * and DramLogSetDramAddr can notify it changed.
1227 *
1228 * Returns 0 on success, error on failure.
1229 */
1230static int smu_alloc_memory_pool(struct smu_context *smu)
1231{
0b51d993
KW
1232 struct amdgpu_device *adev = smu->adev;
1233 struct smu_table_context *smu_table = &smu->smu_table;
1234 struct smu_table *memory_pool = &smu_table->memory_pool;
1235 uint64_t pool_size = smu->pool_size;
1236 int ret = 0;
1237
1238 if (pool_size == SMU_MEMORY_POOL_SIZE_ZERO)
1239 return ret;
1240
1241 memory_pool->size = pool_size;
1242 memory_pool->align = PAGE_SIZE;
1243 memory_pool->domain = AMDGPU_GEM_DOMAIN_GTT;
1244
1245 switch (pool_size) {
1246 case SMU_MEMORY_POOL_SIZE_256_MB:
1247 case SMU_MEMORY_POOL_SIZE_512_MB:
1248 case SMU_MEMORY_POOL_SIZE_1_GB:
1249 case SMU_MEMORY_POOL_SIZE_2_GB:
1250 ret = amdgpu_bo_create_kernel(adev,
1251 memory_pool->size,
1252 memory_pool->align,
1253 memory_pool->domain,
1254 &memory_pool->bo,
1255 &memory_pool->mc_address,
1256 &memory_pool->cpu_addr);
1257 break;
1258 default:
1259 break;
1260 }
1261
1262 return ret;
e65d45f2
HR
1263}
1264
0b51d993
KW
1265static int smu_free_memory_pool(struct smu_context *smu)
1266{
1267 struct smu_table_context *smu_table = &smu->smu_table;
1268 struct smu_table *memory_pool = &smu_table->memory_pool;
0b51d993
KW
1269
1270 if (memory_pool->size == SMU_MEMORY_POOL_SIZE_ZERO)
c7d5dfa8 1271 return 0;
0b51d993
KW
1272
1273 amdgpu_bo_free_kernel(&memory_pool->bo,
1274 &memory_pool->mc_address,
1275 &memory_pool->cpu_addr);
1276
1277 memset(memory_pool, 0, sizeof(struct smu_table));
1278
c7d5dfa8 1279 return 0;
0b51d993 1280}
4733cc72 1281
f7e3a577 1282static int smu_start_smc_engine(struct smu_context *smu)
137d63ab 1283{
f7e3a577
EQ
1284 struct amdgpu_device *adev = smu->adev;
1285 int ret = 0;
137d63ab 1286
22e18317
EQ
1287 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1288 if (adev->asic_type < CHIP_NAVI10) {
6c45e480
EQ
1289 if (smu->ppt_funcs->load_microcode) {
1290 ret = smu->ppt_funcs->load_microcode(smu);
3697b339
EQ
1291 if (ret)
1292 return ret;
1293 }
0186eb96 1294 }
e11c4fd5
HR
1295 }
1296
6c45e480
EQ
1297 if (smu->ppt_funcs->check_fw_status) {
1298 ret = smu->ppt_funcs->check_fw_status(smu);
3697b339
EQ
1299 if (ret)
1300 pr_err("SMC is not ready\n");
1301 }
f7e3a577
EQ
1302
1303 return ret;
1304}
1305
1306static int smu_hw_init(void *handle)
1307{
1308 int ret;
1309 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1310 struct smu_context *smu = &adev->smu;
1311
1312 ret = smu_start_smc_engine(smu);
22e18317 1313 if (ret) {
f7e3a577 1314 pr_err("SMU is not ready yet!\n");
22e18317
EQ
1315 return ret;
1316 }
1317
d2f925ff 1318 if (smu->is_apu) {
97222cfa 1319 smu_powergate_sdma(&adev->smu, false);
4a629668 1320 smu_powergate_vcn(&adev->smu, false);
27f7ff32 1321 smu_powergate_jpeg(&adev->smu, false);
f8391101 1322 smu_set_gfx_cgpg(&adev->smu, true);
4a629668 1323 }
97222cfa 1324
c2a801af
JZ
1325 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
1326 return 0;
1327
d3a593e9
EQ
1328 if (!smu->pm_enabled)
1329 return 0;
1330
6b816d73
KW
1331 ret = smu_feature_init_dpm(smu);
1332 if (ret)
1333 goto failed;
1334
4733cc72 1335 ret = smu_smc_table_hw_init(smu, true);
05cadcd3
HR
1336 if (ret)
1337 goto failed;
137d63ab 1338
e65d45f2
HR
1339 ret = smu_alloc_memory_pool(smu);
1340 if (ret)
1341 goto failed;
1342
c56de9e8
HR
1343 /*
1344 * Use msg SetSystemVirtualDramAddr and DramLogSetDramAddr can notify
1345 * pool location.
1346 */
1347 ret = smu_notify_memory_pool_location(smu);
1348 if (ret)
1349 goto failed;
1350
74ba3553
LG
1351 ret = smu_start_thermal_control(smu);
1352 if (ret)
1353 goto failed;
1354
a254bfa2
CG
1355 if (!smu->pm_enabled)
1356 adev->pm.dpm_enabled = false;
1357 else
948f540c 1358 adev->pm.dpm_enabled = true; /* TODO: will set dpm_enabled flag while VCN and DAL DPM is workable */
a317cf03 1359
137d63ab
HR
1360 pr_info("SMU is initialized successfully!\n");
1361
1362 return 0;
05cadcd3
HR
1363
1364failed:
05cadcd3 1365 return ret;
137d63ab
HR
1366}
1367
faa695c7
EQ
1368static int smu_stop_dpms(struct smu_context *smu)
1369{
6a876844 1370 return smu_system_features_control(smu, false);
faa695c7
EQ
1371}
1372
137d63ab
HR
1373static int smu_hw_fini(void *handle)
1374{
1375 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1376 struct smu_context *smu = &adev->smu;
afba8282 1377 struct smu_table_context *table_context = &smu->smu_table;
f96357a9 1378 int ret = 0;
137d63ab 1379
c2a801af
JZ
1380 if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
1381 return 0;
1382
d2f925ff 1383 if (smu->is_apu) {
97222cfa 1384 smu_powergate_sdma(&adev->smu, true);
4a629668 1385 smu_powergate_vcn(&adev->smu, true);
27f7ff32 1386 smu_powergate_jpeg(&adev->smu, true);
4a629668 1387 }
97222cfa 1388
29a45960
EQ
1389 if (!smu->pm_enabled)
1390 return 0;
1391
c2a801af
JZ
1392 if (!amdgpu_sriov_vf(adev)){
1393 ret = smu_stop_thermal_control(smu);
c39f062e 1394 if (ret) {
c2a801af 1395 pr_warn("Fail to stop thermal control!\n");
c39f062e
EQ
1396 return ret;
1397 }
c2a801af
JZ
1398
1399 /*
1400 * For custom pptable uploading, skip the DPM features
1401 * disable process on Navi1x ASICs.
1402 * - As the gfx related features are under control of
1403 * RLC on those ASICs. RLC reinitialization will be
1404 * needed to reenable them. That will cost much more
1405 * efforts.
1406 *
1407 * - SMU firmware can handle the DPM reenablement
1408 * properly.
1409 */
1410 if (!smu->uploading_custom_pp_table ||
1411 !((adev->asic_type >= CHIP_NAVI10) &&
1412 (adev->asic_type <= CHIP_NAVI12))) {
1413 ret = smu_stop_dpms(smu);
1414 if (ret) {
1415 pr_warn("Fail to stop Dpms!\n");
1416 return ret;
1417 }
1418 }
faa695c7
EQ
1419 }
1420
6316f51c
HR
1421 kfree(table_context->driver_pptable);
1422 table_context->driver_pptable = NULL;
afba8282 1423
6316f51c
HR
1424 kfree(table_context->max_sustainable_clocks);
1425 table_context->max_sustainable_clocks = NULL;
7457cf02 1426
6316f51c
HR
1427 kfree(table_context->overdrive_table);
1428 table_context->overdrive_table = NULL;
2c80abe3 1429
f96357a9
KW
1430 ret = smu_fini_fb_allocations(smu);
1431 if (ret)
1432 return ret;
1433
0b51d993
KW
1434 ret = smu_free_memory_pool(smu);
1435 if (ret)
1436 return ret;
1437
137d63ab
HR
1438 return 0;
1439}
1440
289921b0
KW
1441int smu_reset(struct smu_context *smu)
1442{
1443 struct amdgpu_device *adev = smu->adev;
1444 int ret = 0;
1445
1446 ret = smu_hw_fini(adev);
1447 if (ret)
1448 return ret;
1449
1450 ret = smu_hw_init(adev);
1451 if (ret)
1452 return ret;
1453
1454 return ret;
1455}
1456
137d63ab
HR
1457static int smu_suspend(void *handle)
1458{
4733cc72 1459 int ret;
137d63ab 1460 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4733cc72 1461 struct smu_context *smu = &adev->smu;
068ad870 1462 bool baco_feature_is_enabled = false;
1463
29a45960
EQ
1464 if (!smu->pm_enabled)
1465 return 0;
1466
d2f925ff 1467 if(!smu->is_apu)
068ad870 1468 baco_feature_is_enabled = smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT);
137d63ab 1469
f067499b 1470 ret = smu_system_features_control(smu, false);
4733cc72
LG
1471 if (ret)
1472 return ret;
1473
4b4c5638 1474 if (baco_feature_is_enabled) {
767acabd
KW
1475 ret = smu_feature_set_enabled(smu, SMU_FEATURE_BACO_BIT, true);
1476 if (ret) {
1477 pr_warn("set BACO feature enabled failed, return %d\n", ret);
1478 return ret;
1479 }
1480 }
1481
4733cc72
LG
1482 smu->watermarks_bitmap &= ~(WATERMARKS_LOADED);
1483
e17a512a
JX
1484 if (adev->asic_type >= CHIP_NAVI10 &&
1485 adev->gfx.rlc.funcs->stop)
1486 adev->gfx.rlc.funcs->stop(adev);
f509be18 1487 if (smu->is_apu)
1488 smu_set_gfx_cgpg(&adev->smu, false);
e17a512a 1489
137d63ab
HR
1490 return 0;
1491}
1492
1493static int smu_resume(void *handle)
1494{
1495 int ret;
1496 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1497 struct smu_context *smu = &adev->smu;
1498
895bd048
JZ
1499 if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
1500 return 0;
1501
1502 if (!smu->pm_enabled)
1503 return 0;
1504
fad3ecf2
HR
1505 pr_info("SMU is resuming...\n");
1506
f7e3a577
EQ
1507 ret = smu_start_smc_engine(smu);
1508 if (ret) {
1509 pr_err("SMU is not ready yet!\n");
fa073f13 1510 goto failed;
f7e3a577
EQ
1511 }
1512
4733cc72 1513 ret = smu_smc_table_hw_init(smu, false);
fad3ecf2
HR
1514 if (ret)
1515 goto failed;
1516
4733cc72 1517 ret = smu_start_thermal_control(smu);
fad3ecf2
HR
1518 if (ret)
1519 goto failed;
137d63ab 1520
f8391101
PL
1521 if (smu->is_apu)
1522 smu_set_gfx_cgpg(&adev->smu, true);
1523
5441dd0e
KF
1524 smu->disable_uclk_switch = 0;
1525
fad3ecf2
HR
1526 pr_info("SMU is resumed successfully!\n");
1527
137d63ab 1528 return 0;
3697b339 1529
fad3ecf2 1530failed:
fad3ecf2 1531 return ret;
137d63ab
HR
1532}
1533
94ed6d0c
HR
1534int smu_display_configuration_change(struct smu_context *smu,
1535 const struct amd_pp_display_configuration *display_config)
1536{
1537 int index = 0;
1538 int num_of_active_display = 0;
1539
a254bfa2 1540 if (!smu->pm_enabled || !is_support_sw_smu(smu->adev))
94ed6d0c
HR
1541 return -EINVAL;
1542
1543 if (!display_config)
1544 return -EINVAL;
1545
1546 mutex_lock(&smu->mutex);
1547
6c45e480
EQ
1548 if (smu->ppt_funcs->set_deep_sleep_dcefclk)
1549 smu->ppt_funcs->set_deep_sleep_dcefclk(smu,
3697b339 1550 display_config->min_dcef_deep_sleep_set_clk / 100);
94ed6d0c
HR
1551
1552 for (index = 0; index < display_config->num_path_including_non_display; index++) {
1553 if (display_config->displays[index].controller_id != 0)
1554 num_of_active_display++;
1555 }
1556
1557 smu_set_active_display_count(smu, num_of_active_display);
1558
1559 smu_store_cc6_data(smu, display_config->cpu_pstate_separation_time,
1560 display_config->cpu_cc6_disable,
1561 display_config->cpu_pstate_disable,
1562 display_config->nb_pstate_switch_disable);
1563
1564 mutex_unlock(&smu->mutex);
1565
1566 return 0;
1567}
1568
5e2d3881
HR
1569static int smu_get_clock_info(struct smu_context *smu,
1570 struct smu_clock_info *clk_info,
1571 enum smu_perf_level_designation designation)
1572{
1573 int ret;
1574 struct smu_performance_level level = {0};
1575
1576 if (!clk_info)
1577 return -EINVAL;
1578
1579 ret = smu_get_perf_level(smu, PERF_LEVEL_ACTIVITY, &level);
1580 if (ret)
1581 return -EINVAL;
1582
1583 clk_info->min_mem_clk = level.memory_clock;
1584 clk_info->min_eng_clk = level.core_clock;
1585 clk_info->min_bus_bandwidth = level.non_local_mem_freq * level.non_local_mem_width;
1586
1587 ret = smu_get_perf_level(smu, designation, &level);
1588 if (ret)
1589 return -EINVAL;
1590
1591 clk_info->min_mem_clk = level.memory_clock;
1592 clk_info->min_eng_clk = level.core_clock;
1593 clk_info->min_bus_bandwidth = level.non_local_mem_freq * level.non_local_mem_width;
1594
1595 return 0;
1596}
1597
1598int smu_get_current_clocks(struct smu_context *smu,
1599 struct amd_pp_clock_info *clocks)
1600{
1601 struct amd_pp_simple_clock_info simple_clocks = {0};
1602 struct smu_clock_info hw_clocks;
1603 int ret = 0;
1604
1605 if (!is_support_sw_smu(smu->adev))
1606 return -EINVAL;
1607
1608 mutex_lock(&smu->mutex);
1609
1610 smu_get_dal_power_level(smu, &simple_clocks);
1611
1612 if (smu->support_power_containment)
1613 ret = smu_get_clock_info(smu, &hw_clocks,
1614 PERF_LEVEL_POWER_CONTAINMENT);
1615 else
1616 ret = smu_get_clock_info(smu, &hw_clocks, PERF_LEVEL_ACTIVITY);
1617
1618 if (ret) {
1619 pr_err("Error in smu_get_clock_info\n");
1620 goto failed;
1621 }
1622
1623 clocks->min_engine_clock = hw_clocks.min_eng_clk;
1624 clocks->max_engine_clock = hw_clocks.max_eng_clk;
1625 clocks->min_memory_clock = hw_clocks.min_mem_clk;
1626 clocks->max_memory_clock = hw_clocks.max_mem_clk;
1627 clocks->min_bus_bandwidth = hw_clocks.min_bus_bandwidth;
1628 clocks->max_bus_bandwidth = hw_clocks.max_bus_bandwidth;
1629 clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1630 clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1631
1632 if (simple_clocks.level == 0)
1633 clocks->max_clocks_state = PP_DAL_POWERLEVEL_7;
1634 else
1635 clocks->max_clocks_state = simple_clocks.level;
1636
1637 if (!smu_get_current_shallow_sleep_clocks(smu, &hw_clocks)) {
1638 clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1639 clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1640 }
1641
1642failed:
1643 mutex_unlock(&smu->mutex);
1644 return ret;
1645}
1646
137d63ab
HR
1647static int smu_set_clockgating_state(void *handle,
1648 enum amd_clockgating_state state)
1649{
1650 return 0;
1651}
1652
1653static int smu_set_powergating_state(void *handle,
1654 enum amd_powergating_state state)
1655{
1656 return 0;
1657}
1658
49d27e91
CG
1659static int smu_enable_umd_pstate(void *handle,
1660 enum amd_dpm_forced_level *level)
1661{
1662 uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
1663 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
1664 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
1665 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
1666
1667 struct smu_context *smu = (struct smu_context*)(handle);
1668 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
af1ec44f
PL
1669
1670 if (!smu->is_apu && (!smu->pm_enabled || !smu_dpm_ctx->dpm_context))
49d27e91
CG
1671 return -EINVAL;
1672
1673 if (!(smu_dpm_ctx->dpm_level & profile_mode_mask)) {
1674 /* enter umd pstate, save current level, disable gfx cg*/
1675 if (*level & profile_mode_mask) {
1676 smu_dpm_ctx->saved_dpm_level = smu_dpm_ctx->dpm_level;
1677 smu_dpm_ctx->enable_umd_pstate = true;
1678 amdgpu_device_ip_set_clockgating_state(smu->adev,
1679 AMD_IP_BLOCK_TYPE_GFX,
1680 AMD_CG_STATE_UNGATE);
1681 amdgpu_device_ip_set_powergating_state(smu->adev,
1682 AMD_IP_BLOCK_TYPE_GFX,
1683 AMD_PG_STATE_UNGATE);
1684 }
1685 } else {
1686 /* exit umd pstate, restore level, enable gfx cg*/
1687 if (!(*level & profile_mode_mask)) {
1688 if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
1689 *level = smu_dpm_ctx->saved_dpm_level;
1690 smu_dpm_ctx->enable_umd_pstate = false;
1691 amdgpu_device_ip_set_clockgating_state(smu->adev,
1692 AMD_IP_BLOCK_TYPE_GFX,
1693 AMD_CG_STATE_GATE);
1694 amdgpu_device_ip_set_powergating_state(smu->adev,
1695 AMD_IP_BLOCK_TYPE_GFX,
1696 AMD_PG_STATE_GATE);
1697 }
1698 }
1699
1700 return 0;
1701}
1702
bc0fcffd
LG
1703int smu_adjust_power_state_dynamic(struct smu_context *smu,
1704 enum amd_dpm_forced_level level,
1705 bool skip_display_settings)
1706{
1707 int ret = 0;
1708 int index = 0;
bc0fcffd
LG
1709 long workload;
1710 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1711
a254bfa2
CG
1712 if (!smu->pm_enabled)
1713 return -EINVAL;
780f3a9c 1714
bc0fcffd
LG
1715 if (!skip_display_settings) {
1716 ret = smu_display_config_changed(smu);
1717 if (ret) {
1718 pr_err("Failed to change display config!");
1719 return ret;
1720 }
1721 }
1722
1723 ret = smu_apply_clocks_adjust_rules(smu);
1724 if (ret) {
1725 pr_err("Failed to apply clocks adjust rules!");
1726 return ret;
1727 }
1728
1729 if (!skip_display_settings) {
19796597 1730 ret = smu_notify_smc_display_config(smu);
bc0fcffd
LG
1731 if (ret) {
1732 pr_err("Failed to notify smc display config!");
1733 return ret;
1734 }
1735 }
1736
1737 if (smu_dpm_ctx->dpm_level != level) {
ebf8fc31
KW
1738 ret = smu_asic_set_performance_level(smu, level);
1739 if (ret) {
337443d0
AD
1740 pr_err("Failed to set performance level!");
1741 return ret;
bc0fcffd 1742 }
780f3a9c
EQ
1743
1744 /* update the saved copy */
1745 smu_dpm_ctx->dpm_level = level;
bc0fcffd
LG
1746 }
1747
1748 if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
1749 index = fls(smu->workload_mask);
1750 index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
1751 workload = smu->workload_setting[index];
1752
1753 if (smu->power_profile_mode != workload)
3697b339 1754 smu_set_power_profile_mode(smu, &workload, 0, false);
bc0fcffd
LG
1755 }
1756
1757 return ret;
1758}
1759
1760int smu_handle_task(struct smu_context *smu,
1761 enum amd_dpm_forced_level level,
3697b339
EQ
1762 enum amd_pp_task task_id,
1763 bool lock_needed)
bc0fcffd
LG
1764{
1765 int ret = 0;
1766
3697b339
EQ
1767 if (lock_needed)
1768 mutex_lock(&smu->mutex);
1769
bc0fcffd
LG
1770 switch (task_id) {
1771 case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE:
1772 ret = smu_pre_display_config_changed(smu);
1773 if (ret)
3697b339 1774 goto out;
bc0fcffd
LG
1775 ret = smu_set_cpu_power_state(smu);
1776 if (ret)
3697b339 1777 goto out;
bc0fcffd
LG
1778 ret = smu_adjust_power_state_dynamic(smu, level, false);
1779 break;
1780 case AMD_PP_TASK_COMPLETE_INIT:
1781 case AMD_PP_TASK_READJUST_POWER_STATE:
1782 ret = smu_adjust_power_state_dynamic(smu, level, true);
1783 break;
1784 default:
1785 break;
1786 }
1787
3697b339
EQ
1788out:
1789 if (lock_needed)
1790 mutex_unlock(&smu->mutex);
1791
bc0fcffd
LG
1792 return ret;
1793}
1794
4abc1765
EQ
1795int smu_switch_power_profile(struct smu_context *smu,
1796 enum PP_SMC_POWER_PROFILE type,
1797 bool en)
1798{
1799 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1800 long workload;
1801 uint32_t index;
1802
1803 if (!smu->pm_enabled)
1804 return -EINVAL;
1805
1806 if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
1807 return -EINVAL;
1808
1809 mutex_lock(&smu->mutex);
1810
1811 if (!en) {
1812 smu->workload_mask &= ~(1 << smu->workload_prority[type]);
1813 index = fls(smu->workload_mask);
1814 index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
1815 workload = smu->workload_setting[index];
1816 } else {
1817 smu->workload_mask |= (1 << smu->workload_prority[type]);
1818 index = fls(smu->workload_mask);
1819 index = index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
1820 workload = smu->workload_setting[index];
1821 }
1822
1823 if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
3697b339 1824 smu_set_power_profile_mode(smu, &workload, 0, false);
4abc1765
EQ
1825
1826 mutex_unlock(&smu->mutex);
1827
1828 return 0;
1829}
1830
a38470f0
KW
1831enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu)
1832{
1833 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
8e33376b 1834 enum amd_dpm_forced_level level;
a38470f0 1835
af1ec44f 1836 if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
a38470f0
KW
1837 return -EINVAL;
1838
1839 mutex_lock(&(smu->mutex));
8e33376b 1840 level = smu_dpm_ctx->dpm_level;
a38470f0
KW
1841 mutex_unlock(&(smu->mutex));
1842
8e33376b 1843 return level;
a38470f0
KW
1844}
1845
1846int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level)
1847{
a38470f0 1848 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
780f3a9c 1849 int ret = 0;
a38470f0 1850
af1ec44f 1851 if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
a38470f0
KW
1852 return -EINVAL;
1853
3697b339
EQ
1854 mutex_lock(&smu->mutex);
1855
780f3a9c 1856 ret = smu_enable_umd_pstate(smu, &level);
3697b339
EQ
1857 if (ret) {
1858 mutex_unlock(&smu->mutex);
6f6a7bba 1859 return ret;
3697b339 1860 }
a38470f0 1861
780f3a9c 1862 ret = smu_handle_task(smu, level,
3697b339
EQ
1863 AMD_PP_TASK_READJUST_POWER_STATE,
1864 false);
1865
1866 mutex_unlock(&smu->mutex);
a38470f0
KW
1867
1868 return ret;
1869}
1870
2e13c755 1871int smu_set_display_count(struct smu_context *smu, uint32_t count)
1872{
1873 int ret = 0;
1874
1875 mutex_lock(&smu->mutex);
1876 ret = smu_init_display_count(smu, count);
1877 mutex_unlock(&smu->mutex);
1878
1879 return ret;
1880}
1881
f78c47f6
EQ
1882int smu_force_clk_levels(struct smu_context *smu,
1883 enum smu_clk_type clk_type,
3697b339
EQ
1884 uint32_t mask,
1885 bool lock_needed)
f78c47f6
EQ
1886{
1887 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1888 int ret = 0;
1889
1890 if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
1891 pr_debug("force clock level is for dpm manual mode only.\n");
1892 return -EINVAL;
1893 }
1894
3697b339
EQ
1895 if (lock_needed)
1896 mutex_lock(&smu->mutex);
1897
f78c47f6
EQ
1898 if (smu->ppt_funcs && smu->ppt_funcs->force_clk_levels)
1899 ret = smu->ppt_funcs->force_clk_levels(smu, clk_type, mask);
1900
3697b339
EQ
1901 if (lock_needed)
1902 mutex_unlock(&smu->mutex);
1903
f78c47f6
EQ
1904 return ret;
1905}
1906
0e0b89c0
EQ
1907int smu_set_mp1_state(struct smu_context *smu,
1908 enum pp_mp1_state mp1_state)
1909{
1910 uint16_t msg;
1911 int ret;
1912
1913 /*
1914 * The SMC is not fully ready. That may be
1915 * expected as the IP may be masked.
1916 * So, just return without error.
1917 */
1918 if (!smu->pm_enabled)
1919 return 0;
1920
3697b339
EQ
1921 mutex_lock(&smu->mutex);
1922
0e0b89c0
EQ
1923 switch (mp1_state) {
1924 case PP_MP1_STATE_SHUTDOWN:
1925 msg = SMU_MSG_PrepareMp1ForShutdown;
1926 break;
1927 case PP_MP1_STATE_UNLOAD:
1928 msg = SMU_MSG_PrepareMp1ForUnload;
1929 break;
1930 case PP_MP1_STATE_RESET:
1931 msg = SMU_MSG_PrepareMp1ForReset;
1932 break;
1933 case PP_MP1_STATE_NONE:
1934 default:
3697b339 1935 mutex_unlock(&smu->mutex);
0e0b89c0
EQ
1936 return 0;
1937 }
1938
1939 /* some asics may not support those messages */
3697b339
EQ
1940 if (smu_msg_get_index(smu, msg) < 0) {
1941 mutex_unlock(&smu->mutex);
0e0b89c0 1942 return 0;
3697b339 1943 }
0e0b89c0
EQ
1944
1945 ret = smu_send_smc_msg(smu, msg);
1946 if (ret)
1947 pr_err("[PrepareMp1] Failed!\n");
1948
3697b339
EQ
1949 mutex_unlock(&smu->mutex);
1950
0e0b89c0
EQ
1951 return ret;
1952}
1953
7e899409
EQ
1954int smu_set_df_cstate(struct smu_context *smu,
1955 enum pp_df_cstate state)
1956{
1957 int ret = 0;
1958
1959 /*
1960 * The SMC is not fully ready. That may be
1961 * expected as the IP may be masked.
1962 * So, just return without error.
1963 */
1964 if (!smu->pm_enabled)
1965 return 0;
1966
1967 if (!smu->ppt_funcs || !smu->ppt_funcs->set_df_cstate)
1968 return 0;
1969
3697b339
EQ
1970 mutex_lock(&smu->mutex);
1971
7e899409
EQ
1972 ret = smu->ppt_funcs->set_df_cstate(smu, state);
1973 if (ret)
1974 pr_err("[SetDfCstate] failed!\n");
1975
3697b339
EQ
1976 mutex_unlock(&smu->mutex);
1977
7e899409
EQ
1978 return ret;
1979}
1980
7bbdbe40
HW
1981int smu_write_watermarks_table(struct smu_context *smu)
1982{
9fa1ed5b 1983 void *watermarks_table = smu->smu_table.watermarks_table;
7bbdbe40 1984
9fa1ed5b 1985 if (!watermarks_table)
7bbdbe40
HW
1986 return -EINVAL;
1987
9fa1ed5b
EQ
1988 return smu_update_table(smu,
1989 SMU_TABLE_WATERMARKS,
1990 0,
1991 watermarks_table,
7bbdbe40 1992 true);
7bbdbe40
HW
1993}
1994
1995int smu_set_watermarks_for_clock_ranges(struct smu_context *smu,
1996 struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges)
1997{
9fa1ed5b 1998 void *table = smu->smu_table.watermarks_table;
e78adc5a 1999
9fa1ed5b
EQ
2000 if (!table)
2001 return -EINVAL;
7bbdbe40 2002
3697b339
EQ
2003 mutex_lock(&smu->mutex);
2004
7bbdbe40
HW
2005 if (!smu->disable_watermark &&
2006 smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) &&
2007 smu_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
2008 smu_set_watermarks_table(smu, table, clock_ranges);
1d2686d4
HW
2009
2010 if (!(smu->watermarks_bitmap & WATERMARKS_EXIST)) {
2011 smu->watermarks_bitmap |= WATERMARKS_EXIST;
2012 smu->watermarks_bitmap &= ~WATERMARKS_LOADED;
2013 }
7bbdbe40
HW
2014 }
2015
3697b339
EQ
2016 mutex_unlock(&smu->mutex);
2017
c7d5dfa8 2018 return 0;
7bbdbe40
HW
2019}
2020
137d63ab
HR
2021const struct amd_ip_funcs smu_ip_funcs = {
2022 .name = "smu",
2023 .early_init = smu_early_init,
bee71d26 2024 .late_init = smu_late_init,
137d63ab
HR
2025 .sw_init = smu_sw_init,
2026 .sw_fini = smu_sw_fini,
2027 .hw_init = smu_hw_init,
2028 .hw_fini = smu_hw_fini,
2029 .suspend = smu_suspend,
2030 .resume = smu_resume,
2031 .is_idle = NULL,
2032 .check_soft_reset = NULL,
2033 .wait_for_idle = NULL,
2034 .soft_reset = NULL,
2035 .set_clockgating_state = smu_set_clockgating_state,
2036 .set_powergating_state = smu_set_powergating_state,
49d27e91 2037 .enable_umd_pstate = smu_enable_umd_pstate,
137d63ab 2038};
07845526
HR
2039
2040const struct amdgpu_ip_block_version smu_v11_0_ip_block =
2041{
2042 .type = AMD_IP_BLOCK_TYPE_SMC,
2043 .major = 11,
2044 .minor = 0,
2045 .rev = 0,
2046 .funcs = &smu_ip_funcs,
2047};
5dbbe6a7
AL
2048
2049const struct amdgpu_ip_block_version smu_v12_0_ip_block =
2050{
2051 .type = AMD_IP_BLOCK_TYPE_SMC,
2052 .major = 12,
2053 .minor = 0,
2054 .rev = 0,
2055 .funcs = &smu_ip_funcs,
2056};
3697b339
EQ
2057
2058int smu_load_microcode(struct smu_context *smu)
2059{
2060 int ret = 0;
2061
2062 mutex_lock(&smu->mutex);
2063
6c45e480
EQ
2064 if (smu->ppt_funcs->load_microcode)
2065 ret = smu->ppt_funcs->load_microcode(smu);
3697b339
EQ
2066
2067 mutex_unlock(&smu->mutex);
2068
2069 return ret;
2070}
2071
2072int smu_check_fw_status(struct smu_context *smu)
2073{
2074 int ret = 0;
2075
2076 mutex_lock(&smu->mutex);
2077
6c45e480
EQ
2078 if (smu->ppt_funcs->check_fw_status)
2079 ret = smu->ppt_funcs->check_fw_status(smu);
3697b339
EQ
2080
2081 mutex_unlock(&smu->mutex);
2082
2083 return ret;
2084}
2085
2086int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled)
2087{
2088 int ret = 0;
2089
2090 mutex_lock(&smu->mutex);
2091
6c45e480
EQ
2092 if (smu->ppt_funcs->set_gfx_cgpg)
2093 ret = smu->ppt_funcs->set_gfx_cgpg(smu, enabled);
3697b339
EQ
2094
2095 mutex_unlock(&smu->mutex);
2096
2097 return ret;
2098}
2099
2100int smu_set_fan_speed_rpm(struct smu_context *smu, uint32_t speed)
2101{
2102 int ret = 0;
2103
2104 mutex_lock(&smu->mutex);
2105
6c45e480
EQ
2106 if (smu->ppt_funcs->set_fan_speed_rpm)
2107 ret = smu->ppt_funcs->set_fan_speed_rpm(smu, speed);
3697b339
EQ
2108
2109 mutex_unlock(&smu->mutex);
2110
2111 return ret;
2112}
2113
2114int smu_get_power_limit(struct smu_context *smu,
2115 uint32_t *limit,
2116 bool def,
2117 bool lock_needed)
2118{
2119 int ret = 0;
2120
2121 if (lock_needed)
2122 mutex_lock(&smu->mutex);
2123
2124 if (smu->ppt_funcs->get_power_limit)
2125 ret = smu->ppt_funcs->get_power_limit(smu, limit, def);
2126
2127 if (lock_needed)
2128 mutex_unlock(&smu->mutex);
2129
2130 return ret;
2131}
2132
2133int smu_set_power_limit(struct smu_context *smu, uint32_t limit)
2134{
2135 int ret = 0;
2136
2137 mutex_lock(&smu->mutex);
2138
6c45e480
EQ
2139 if (smu->ppt_funcs->set_power_limit)
2140 ret = smu->ppt_funcs->set_power_limit(smu, limit);
3697b339
EQ
2141
2142 mutex_unlock(&smu->mutex);
2143
2144 return ret;
2145}
2146
2147int smu_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf)
2148{
2149 int ret = 0;
2150
2151 mutex_lock(&smu->mutex);
2152
2153 if (smu->ppt_funcs->print_clk_levels)
2154 ret = smu->ppt_funcs->print_clk_levels(smu, clk_type, buf);
2155
2156 mutex_unlock(&smu->mutex);
2157
2158 return ret;
2159}
2160
2161int smu_get_od_percentage(struct smu_context *smu, enum smu_clk_type type)
2162{
2163 int ret = 0;
2164
2165 mutex_lock(&smu->mutex);
2166
2167 if (smu->ppt_funcs->get_od_percentage)
2168 ret = smu->ppt_funcs->get_od_percentage(smu, type);
2169
2170 mutex_unlock(&smu->mutex);
2171
2172 return ret;
2173}
2174
2175int smu_set_od_percentage(struct smu_context *smu, enum smu_clk_type type, uint32_t value)
2176{
2177 int ret = 0;
2178
2179 mutex_lock(&smu->mutex);
2180
2181 if (smu->ppt_funcs->set_od_percentage)
2182 ret = smu->ppt_funcs->set_od_percentage(smu, type, value);
2183
2184 mutex_unlock(&smu->mutex);
2185
2186 return ret;
2187}
2188
2189int smu_od_edit_dpm_table(struct smu_context *smu,
2190 enum PP_OD_DPM_TABLE_COMMAND type,
2191 long *input, uint32_t size)
2192{
2193 int ret = 0;
2194
2195 mutex_lock(&smu->mutex);
2196
2197 if (smu->ppt_funcs->od_edit_dpm_table)
2198 ret = smu->ppt_funcs->od_edit_dpm_table(smu, type, input, size);
2199
2200 mutex_unlock(&smu->mutex);
2201
2202 return ret;
2203}
2204
2205int smu_read_sensor(struct smu_context *smu,
2206 enum amd_pp_sensors sensor,
2207 void *data, uint32_t *size)
2208{
2209 int ret = 0;
2210
2211 mutex_lock(&smu->mutex);
2212
2213 if (smu->ppt_funcs->read_sensor)
2214 ret = smu->ppt_funcs->read_sensor(smu, sensor, data, size);
2215
2216 mutex_unlock(&smu->mutex);
2217
2218 return ret;
2219}
2220
2221int smu_get_power_profile_mode(struct smu_context *smu, char *buf)
2222{
2223 int ret = 0;
2224
2225 mutex_lock(&smu->mutex);
2226
2227 if (smu->ppt_funcs->get_power_profile_mode)
2228 ret = smu->ppt_funcs->get_power_profile_mode(smu, buf);
2229
2230 mutex_unlock(&smu->mutex);
2231
2232 return ret;
2233}
2234
2235int smu_set_power_profile_mode(struct smu_context *smu,
2236 long *param,
2237 uint32_t param_size,
2238 bool lock_needed)
2239{
2240 int ret = 0;
2241
2242 if (lock_needed)
2243 mutex_lock(&smu->mutex);
2244
2245 if (smu->ppt_funcs->set_power_profile_mode)
2246 ret = smu->ppt_funcs->set_power_profile_mode(smu, param, param_size);
2247
2248 if (lock_needed)
2249 mutex_unlock(&smu->mutex);
2250
2251 return ret;
2252}
2253
2254
2255int smu_get_fan_control_mode(struct smu_context *smu)
2256{
2257 int ret = 0;
2258
2259 mutex_lock(&smu->mutex);
2260
6c45e480
EQ
2261 if (smu->ppt_funcs->get_fan_control_mode)
2262 ret = smu->ppt_funcs->get_fan_control_mode(smu);
3697b339
EQ
2263
2264 mutex_unlock(&smu->mutex);
2265
2266 return ret;
2267}
2268
2269int smu_set_fan_control_mode(struct smu_context *smu, int value)
2270{
2271 int ret = 0;
2272
2273 mutex_lock(&smu->mutex);
2274
6c45e480
EQ
2275 if (smu->ppt_funcs->set_fan_control_mode)
2276 ret = smu->ppt_funcs->set_fan_control_mode(smu, value);
3697b339
EQ
2277
2278 mutex_unlock(&smu->mutex);
2279
2280 return ret;
2281}
2282
2283int smu_get_fan_speed_percent(struct smu_context *smu, uint32_t *speed)
2284{
2285 int ret = 0;
2286
2287 mutex_lock(&smu->mutex);
2288
2289 if (smu->ppt_funcs->get_fan_speed_percent)
2290 ret = smu->ppt_funcs->get_fan_speed_percent(smu, speed);
2291
2292 mutex_unlock(&smu->mutex);
2293
2294 return ret;
2295}
2296
2297int smu_set_fan_speed_percent(struct smu_context *smu, uint32_t speed)
2298{
2299 int ret = 0;
2300
2301 mutex_lock(&smu->mutex);
2302
6c45e480
EQ
2303 if (smu->ppt_funcs->set_fan_speed_percent)
2304 ret = smu->ppt_funcs->set_fan_speed_percent(smu, speed);
3697b339
EQ
2305
2306 mutex_unlock(&smu->mutex);
2307
2308 return ret;
2309}
2310
2311int smu_get_fan_speed_rpm(struct smu_context *smu, uint32_t *speed)
2312{
2313 int ret = 0;
2314
2315 mutex_lock(&smu->mutex);
2316
2317 if (smu->ppt_funcs->get_fan_speed_rpm)
2318 ret = smu->ppt_funcs->get_fan_speed_rpm(smu, speed);
2319
2320 mutex_unlock(&smu->mutex);
2321
2322 return ret;
2323}
2324
2325int smu_set_deep_sleep_dcefclk(struct smu_context *smu, int clk)
2326{
2327 int ret = 0;
2328
2329 mutex_lock(&smu->mutex);
2330
6c45e480
EQ
2331 if (smu->ppt_funcs->set_deep_sleep_dcefclk)
2332 ret = smu->ppt_funcs->set_deep_sleep_dcefclk(smu, clk);
3697b339
EQ
2333
2334 mutex_unlock(&smu->mutex);
2335
2336 return ret;
2337}
2338
2339int smu_set_active_display_count(struct smu_context *smu, uint32_t count)
2340{
2341 int ret = 0;
2342
6c45e480
EQ
2343 if (smu->ppt_funcs->set_active_display_count)
2344 ret = smu->ppt_funcs->set_active_display_count(smu, count);
3697b339 2345
3697b339
EQ
2346 return ret;
2347}
2348
2349int smu_get_clock_by_type(struct smu_context *smu,
2350 enum amd_pp_clock_type type,
2351 struct amd_pp_clocks *clocks)
2352{
2353 int ret = 0;
2354
2355 mutex_lock(&smu->mutex);
2356
6c45e480
EQ
2357 if (smu->ppt_funcs->get_clock_by_type)
2358 ret = smu->ppt_funcs->get_clock_by_type(smu, type, clocks);
3697b339
EQ
2359
2360 mutex_unlock(&smu->mutex);
2361
2362 return ret;
2363}
2364
2365int smu_get_max_high_clocks(struct smu_context *smu,
2366 struct amd_pp_simple_clock_info *clocks)
2367{
2368 int ret = 0;
2369
2370 mutex_lock(&smu->mutex);
2371
6c45e480
EQ
2372 if (smu->ppt_funcs->get_max_high_clocks)
2373 ret = smu->ppt_funcs->get_max_high_clocks(smu, clocks);
3697b339
EQ
2374
2375 mutex_unlock(&smu->mutex);
2376
2377 return ret;
2378}
2379
2380int smu_get_clock_by_type_with_latency(struct smu_context *smu,
2381 enum smu_clk_type clk_type,
2382 struct pp_clock_levels_with_latency *clocks)
2383{
2384 int ret = 0;
2385
2386 mutex_lock(&smu->mutex);
2387
2388 if (smu->ppt_funcs->get_clock_by_type_with_latency)
2389 ret = smu->ppt_funcs->get_clock_by_type_with_latency(smu, clk_type, clocks);
2390
2391 mutex_unlock(&smu->mutex);
2392
2393 return ret;
2394}
2395
2396int smu_get_clock_by_type_with_voltage(struct smu_context *smu,
2397 enum amd_pp_clock_type type,
2398 struct pp_clock_levels_with_voltage *clocks)
2399{
2400 int ret = 0;
2401
2402 mutex_lock(&smu->mutex);
2403
2404 if (smu->ppt_funcs->get_clock_by_type_with_voltage)
2405 ret = smu->ppt_funcs->get_clock_by_type_with_voltage(smu, type, clocks);
2406
2407 mutex_unlock(&smu->mutex);
2408
2409 return ret;
2410}
2411
2412
2413int smu_display_clock_voltage_request(struct smu_context *smu,
2414 struct pp_display_clock_request *clock_req)
2415{
2416 int ret = 0;
2417
2418 mutex_lock(&smu->mutex);
2419
6c45e480
EQ
2420 if (smu->ppt_funcs->display_clock_voltage_request)
2421 ret = smu->ppt_funcs->display_clock_voltage_request(smu, clock_req);
3697b339
EQ
2422
2423 mutex_unlock(&smu->mutex);
2424
2425 return ret;
2426}
2427
2428
2429int smu_display_disable_memory_clock_switch(struct smu_context *smu, bool disable_memory_clock_switch)
2430{
2431 int ret = -EINVAL;
2432
2433 mutex_lock(&smu->mutex);
2434
2435 if (smu->ppt_funcs->display_disable_memory_clock_switch)
2436 ret = smu->ppt_funcs->display_disable_memory_clock_switch(smu, disable_memory_clock_switch);
2437
2438 mutex_unlock(&smu->mutex);
2439
2440 return ret;
2441}
2442
2443int smu_notify_smu_enable_pwe(struct smu_context *smu)
2444{
2445 int ret = 0;
2446
2447 mutex_lock(&smu->mutex);
2448
6c45e480
EQ
2449 if (smu->ppt_funcs->notify_smu_enable_pwe)
2450 ret = smu->ppt_funcs->notify_smu_enable_pwe(smu);
3697b339
EQ
2451
2452 mutex_unlock(&smu->mutex);
2453
2454 return ret;
2455}
2456
2457int smu_set_xgmi_pstate(struct smu_context *smu,
2458 uint32_t pstate)
2459{
2460 int ret = 0;
2461
2462 mutex_lock(&smu->mutex);
2463
6c45e480
EQ
2464 if (smu->ppt_funcs->set_xgmi_pstate)
2465 ret = smu->ppt_funcs->set_xgmi_pstate(smu, pstate);
3697b339
EQ
2466
2467 mutex_unlock(&smu->mutex);
2468
2469 return ret;
2470}
2471
2472int smu_set_azalia_d3_pme(struct smu_context *smu)
2473{
2474 int ret = 0;
2475
2476 mutex_lock(&smu->mutex);
2477
6c45e480
EQ
2478 if (smu->ppt_funcs->set_azalia_d3_pme)
2479 ret = smu->ppt_funcs->set_azalia_d3_pme(smu);
3697b339
EQ
2480
2481 mutex_unlock(&smu->mutex);
2482
2483 return ret;
2484}
2485
2486bool smu_baco_is_support(struct smu_context *smu)
2487{
2488 bool ret = false;
2489
2490 mutex_lock(&smu->mutex);
2491
e78adc5a 2492 if (smu->ppt_funcs && smu->ppt_funcs->baco_is_support)
6c45e480 2493 ret = smu->ppt_funcs->baco_is_support(smu);
3697b339
EQ
2494
2495 mutex_unlock(&smu->mutex);
2496
2497 return ret;
2498}
2499
2500int smu_baco_get_state(struct smu_context *smu, enum smu_baco_state *state)
2501{
6c45e480 2502 if (smu->ppt_funcs->baco_get_state)
3697b339
EQ
2503 return -EINVAL;
2504
2505 mutex_lock(&smu->mutex);
6c45e480 2506 *state = smu->ppt_funcs->baco_get_state(smu);
3697b339
EQ
2507 mutex_unlock(&smu->mutex);
2508
2509 return 0;
2510}
2511
11520f27 2512int smu_baco_enter(struct smu_context *smu)
3697b339
EQ
2513{
2514 int ret = 0;
2515
2516 mutex_lock(&smu->mutex);
2517
11520f27
AD
2518 if (smu->ppt_funcs->baco_enter)
2519 ret = smu->ppt_funcs->baco_enter(smu);
2520
2521 mutex_unlock(&smu->mutex);
2522
2523 return ret;
2524}
2525
2526int smu_baco_exit(struct smu_context *smu)
2527{
2528 int ret = 0;
2529
2530 mutex_lock(&smu->mutex);
2531
2532 if (smu->ppt_funcs->baco_exit)
2533 ret = smu->ppt_funcs->baco_exit(smu);
3697b339
EQ
2534
2535 mutex_unlock(&smu->mutex);
2536
2537 return ret;
2538}
2539
2540int smu_mode2_reset(struct smu_context *smu)
2541{
2542 int ret = 0;
2543
2544 mutex_lock(&smu->mutex);
2545
6c45e480
EQ
2546 if (smu->ppt_funcs->mode2_reset)
2547 ret = smu->ppt_funcs->mode2_reset(smu);
3697b339
EQ
2548
2549 mutex_unlock(&smu->mutex);
2550
2551 return ret;
2552}
2553
2554int smu_get_max_sustainable_clocks_by_dc(struct smu_context *smu,
2555 struct pp_smu_nv_clock_table *max_clocks)
2556{
2557 int ret = 0;
2558
2559 mutex_lock(&smu->mutex);
2560
6c45e480
EQ
2561 if (smu->ppt_funcs->get_max_sustainable_clocks_by_dc)
2562 ret = smu->ppt_funcs->get_max_sustainable_clocks_by_dc(smu, max_clocks);
3697b339
EQ
2563
2564 mutex_unlock(&smu->mutex);
2565
2566 return ret;
2567}
2568
2569int smu_get_uclk_dpm_states(struct smu_context *smu,
2570 unsigned int *clock_values_in_khz,
2571 unsigned int *num_states)
2572{
2573 int ret = 0;
2574
2575 mutex_lock(&smu->mutex);
2576
2577 if (smu->ppt_funcs->get_uclk_dpm_states)
2578 ret = smu->ppt_funcs->get_uclk_dpm_states(smu, clock_values_in_khz, num_states);
2579
2580 mutex_unlock(&smu->mutex);
2581
2582 return ret;
2583}
2584
2585enum amd_pm_state_type smu_get_current_power_state(struct smu_context *smu)
2586{
2587 enum amd_pm_state_type pm_state = POWER_STATE_TYPE_DEFAULT;
2588
2589 mutex_lock(&smu->mutex);
2590
2591 if (smu->ppt_funcs->get_current_power_state)
2592 pm_state = smu->ppt_funcs->get_current_power_state(smu);
2593
2594 mutex_unlock(&smu->mutex);
2595
2596 return pm_state;
2597}
2598
2599int smu_get_dpm_clock_table(struct smu_context *smu,
2600 struct dpm_clocks *clock_table)
2601{
2602 int ret = 0;
2603
2604 mutex_lock(&smu->mutex);
2605
2606 if (smu->ppt_funcs->get_dpm_clock_table)
2607 ret = smu->ppt_funcs->get_dpm_clock_table(smu, clock_table);
2608
2609 mutex_unlock(&smu->mutex);
2610
2611 return ret;
2612}
73abde4d
MC
2613
2614uint32_t smu_get_pptable_power_limit(struct smu_context *smu)
2615{
2616 uint32_t ret = 0;
2617
2618 if (smu->ppt_funcs->get_pptable_power_limit)
2619 ret = smu->ppt_funcs->get_pptable_power_limit(smu);
2620
2621 return ret;
2622}
f275cde7
LG
2623
2624int smu_send_smc_msg(struct smu_context *smu,
2625 enum smu_message_type msg)
2626{
2627 int ret;
2628
2629 ret = smu_send_smc_msg_with_param(smu, msg, 0);
2630 return ret;
2631}