2 * Copyright 2019 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #define SWSMU_CODE_LAYER_L1
25 #include <linux/firmware.h>
26 #include <linux/pci.h>
29 #include "amdgpu_smu.h"
30 #include "smu_internal.h"
32 #include "arcturus_ppt.h"
33 #include "navi10_ppt.h"
34 #include "sienna_cichlid_ppt.h"
35 #include "renoir_ppt.h"
36 #include "vangogh_ppt.h"
37 #include "aldebaran_ppt.h"
41 * DO NOT use these for err/warn/info/debug messages.
42 * Use dev_err, dev_warn, dev_info and dev_dbg instead.
43 * They are more MGPU friendly.
50 static const struct amd_pm_funcs swsmu_pm_funcs
;
51 static int smu_force_smuclk_levels(struct smu_context
*smu
,
52 enum smu_clk_type clk_type
,
55 int smu_sys_get_pp_feature_mask(void *handle
, char *buf
)
57 struct smu_context
*smu
= handle
;
60 if (!smu
->pm_enabled
|| !smu
->adev
->pm
.dpm_enabled
)
63 mutex_lock(&smu
->mutex
);
65 size
= smu_get_pp_feature_mask(smu
, buf
);
67 mutex_unlock(&smu
->mutex
);
72 int smu_sys_set_pp_feature_mask(void *handle
, uint64_t new_mask
)
74 struct smu_context
*smu
= handle
;
77 if (!smu
->pm_enabled
|| !smu
->adev
->pm
.dpm_enabled
)
80 mutex_lock(&smu
->mutex
);
82 ret
= smu_set_pp_feature_mask(smu
, new_mask
);
84 mutex_unlock(&smu
->mutex
);
89 int smu_get_status_gfxoff(struct amdgpu_device
*adev
, uint32_t *value
)
92 struct smu_context
*smu
= &adev
->smu
;
94 if (is_support_sw_smu(adev
) && smu
->ppt_funcs
->get_gfx_off_status
)
95 *value
= smu_get_gfx_off_status(smu
);
102 int smu_set_soft_freq_range(struct smu_context
*smu
,
103 enum smu_clk_type clk_type
,
109 mutex_lock(&smu
->mutex
);
111 if (smu
->ppt_funcs
->set_soft_freq_limited_range
)
112 ret
= smu
->ppt_funcs
->set_soft_freq_limited_range(smu
,
117 mutex_unlock(&smu
->mutex
);
122 int smu_get_dpm_freq_range(struct smu_context
*smu
,
123 enum smu_clk_type clk_type
,
132 mutex_lock(&smu
->mutex
);
134 if (smu
->ppt_funcs
->get_dpm_ultimate_freq
)
135 ret
= smu
->ppt_funcs
->get_dpm_ultimate_freq(smu
,
140 mutex_unlock(&smu
->mutex
);
145 u32
smu_get_mclk(void *handle
, bool low
)
147 struct smu_context
*smu
= handle
;
151 ret
= smu_get_dpm_freq_range(smu
, SMU_UCLK
,
152 low
? &clk_freq
: NULL
,
153 !low
? &clk_freq
: NULL
);
156 return clk_freq
* 100;
159 u32
smu_get_sclk(void *handle
, bool low
)
161 struct smu_context
*smu
= handle
;
165 ret
= smu_get_dpm_freq_range(smu
, SMU_GFXCLK
,
166 low
? &clk_freq
: NULL
,
167 !low
? &clk_freq
: NULL
);
170 return clk_freq
* 100;
173 static int smu_dpm_set_vcn_enable_locked(struct smu_context
*smu
,
176 struct smu_power_context
*smu_power
= &smu
->smu_power
;
177 struct smu_power_gate
*power_gate
= &smu_power
->power_gate
;
180 if (!smu
->ppt_funcs
->dpm_set_vcn_enable
)
183 if (atomic_read(&power_gate
->vcn_gated
) ^ enable
)
186 ret
= smu
->ppt_funcs
->dpm_set_vcn_enable(smu
, enable
);
188 atomic_set(&power_gate
->vcn_gated
, !enable
);
193 static int smu_dpm_set_vcn_enable(struct smu_context
*smu
,
196 struct smu_power_context
*smu_power
= &smu
->smu_power
;
197 struct smu_power_gate
*power_gate
= &smu_power
->power_gate
;
200 mutex_lock(&power_gate
->vcn_gate_lock
);
202 ret
= smu_dpm_set_vcn_enable_locked(smu
, enable
);
204 mutex_unlock(&power_gate
->vcn_gate_lock
);
209 static int smu_dpm_set_jpeg_enable_locked(struct smu_context
*smu
,
212 struct smu_power_context
*smu_power
= &smu
->smu_power
;
213 struct smu_power_gate
*power_gate
= &smu_power
->power_gate
;
216 if (!smu
->ppt_funcs
->dpm_set_jpeg_enable
)
219 if (atomic_read(&power_gate
->jpeg_gated
) ^ enable
)
222 ret
= smu
->ppt_funcs
->dpm_set_jpeg_enable(smu
, enable
);
224 atomic_set(&power_gate
->jpeg_gated
, !enable
);
229 static int smu_dpm_set_jpeg_enable(struct smu_context
*smu
,
232 struct smu_power_context
*smu_power
= &smu
->smu_power
;
233 struct smu_power_gate
*power_gate
= &smu_power
->power_gate
;
236 mutex_lock(&power_gate
->jpeg_gate_lock
);
238 ret
= smu_dpm_set_jpeg_enable_locked(smu
, enable
);
240 mutex_unlock(&power_gate
->jpeg_gate_lock
);
246 * smu_dpm_set_power_gate - power gate/ungate the specific IP block
248 * @handle: smu_context pointer
249 * @block_type: the IP block to power gate/ungate
250 * @gate: to power gate if true, ungate otherwise
252 * This API uses no smu->mutex lock protection due to:
253 * 1. It is either called by other IP block(gfx/sdma/vcn/uvd/vce).
254 * This is guarded to be race condition free by the caller.
255 * 2. Or get called on user setting request of power_dpm_force_performance_level.
256 * Under this case, the smu->mutex lock protection is already enforced on
257 * the parent API smu_force_performance_level of the call path.
259 int smu_dpm_set_power_gate(void *handle
, uint32_t block_type
,
262 struct smu_context
*smu
= handle
;
265 if (!smu
->pm_enabled
|| !smu
->adev
->pm
.dpm_enabled
)
268 switch (block_type
) {
270 * Some legacy code of amdgpu_vcn.c and vcn_v2*.c still uses
271 * AMD_IP_BLOCK_TYPE_UVD for VCN. So, here both of them are kept.
273 case AMD_IP_BLOCK_TYPE_UVD
:
274 case AMD_IP_BLOCK_TYPE_VCN
:
275 ret
= smu_dpm_set_vcn_enable(smu
, !gate
);
277 dev_err(smu
->adev
->dev
, "Failed to power %s VCN!\n",
278 gate
? "gate" : "ungate");
280 case AMD_IP_BLOCK_TYPE_GFX
:
281 ret
= smu_gfx_off_control(smu
, gate
);
283 dev_err(smu
->adev
->dev
, "Failed to %s gfxoff!\n",
284 gate
? "enable" : "disable");
286 case AMD_IP_BLOCK_TYPE_SDMA
:
287 ret
= smu_powergate_sdma(smu
, gate
);
289 dev_err(smu
->adev
->dev
, "Failed to power %s SDMA!\n",
290 gate
? "gate" : "ungate");
292 case AMD_IP_BLOCK_TYPE_JPEG
:
293 ret
= smu_dpm_set_jpeg_enable(smu
, !gate
);
295 dev_err(smu
->adev
->dev
, "Failed to power %s JPEG!\n",
296 gate
? "gate" : "ungate");
299 dev_err(smu
->adev
->dev
, "Unsupported block type!\n");
307 * smu_set_user_clk_dependencies - set user profile clock dependencies
309 * @smu: smu_context pointer
310 * @clk: enum smu_clk_type type
312 * Enable/Disable the clock dependency for the @clk type.
314 static void smu_set_user_clk_dependencies(struct smu_context
*smu
, enum smu_clk_type clk
)
316 if (smu
->adev
->in_suspend
)
319 if (clk
== SMU_MCLK
) {
320 smu
->user_dpm_profile
.clk_dependency
= 0;
321 smu
->user_dpm_profile
.clk_dependency
= BIT(SMU_FCLK
) | BIT(SMU_SOCCLK
);
322 } else if (clk
== SMU_FCLK
) {
323 /* MCLK takes precedence over FCLK */
324 if (smu
->user_dpm_profile
.clk_dependency
== (BIT(SMU_FCLK
) | BIT(SMU_SOCCLK
)))
327 smu
->user_dpm_profile
.clk_dependency
= 0;
328 smu
->user_dpm_profile
.clk_dependency
= BIT(SMU_MCLK
) | BIT(SMU_SOCCLK
);
329 } else if (clk
== SMU_SOCCLK
) {
330 /* MCLK takes precedence over SOCCLK */
331 if (smu
->user_dpm_profile
.clk_dependency
== (BIT(SMU_FCLK
) | BIT(SMU_SOCCLK
)))
334 smu
->user_dpm_profile
.clk_dependency
= 0;
335 smu
->user_dpm_profile
.clk_dependency
= BIT(SMU_MCLK
) | BIT(SMU_FCLK
);
337 /* Add clk dependencies here, if any */
342 * smu_restore_dpm_user_profile - reinstate user dpm profile
344 * @smu: smu_context pointer
346 * Restore the saved user power configurations include power limit,
347 * clock frequencies, fan control mode and fan speed.
349 static void smu_restore_dpm_user_profile(struct smu_context
*smu
)
351 struct smu_dpm_context
*smu_dpm_ctx
= &(smu
->smu_dpm
);
354 if (!smu
->adev
->in_suspend
)
357 if (!smu
->pm_enabled
|| !smu
->adev
->pm
.dpm_enabled
)
360 /* Enable restore flag */
361 smu
->user_dpm_profile
.flags
|= SMU_DPM_USER_PROFILE_RESTORE
;
363 /* set the user dpm power limit */
364 if (smu
->user_dpm_profile
.power_limit
) {
365 ret
= smu_set_power_limit(smu
, smu
->user_dpm_profile
.power_limit
);
367 dev_err(smu
->adev
->dev
, "Failed to set power limit value\n");
370 /* set the user dpm clock configurations */
371 if (smu_dpm_ctx
->dpm_level
== AMD_DPM_FORCED_LEVEL_MANUAL
) {
372 enum smu_clk_type clk_type
;
374 for (clk_type
= 0; clk_type
< SMU_CLK_COUNT
; clk_type
++) {
376 * Iterate over smu clk type and force the saved user clk
377 * configs, skip if clock dependency is enabled
379 if (!(smu
->user_dpm_profile
.clk_dependency
& BIT(clk_type
)) &&
380 smu
->user_dpm_profile
.clk_mask
[clk_type
]) {
381 ret
= smu_force_smuclk_levels(smu
, clk_type
,
382 smu
->user_dpm_profile
.clk_mask
[clk_type
]);
384 dev_err(smu
->adev
->dev
,
385 "Failed to set clock type = %d\n", clk_type
);
390 /* set the user dpm fan configurations */
391 if (smu
->user_dpm_profile
.fan_mode
== AMD_FAN_CTRL_MANUAL
) {
392 ret
= smu_set_fan_control_mode(smu
, smu
->user_dpm_profile
.fan_mode
);
394 dev_err(smu
->adev
->dev
, "Failed to set manual fan control mode\n");
398 if (!ret
&& smu
->user_dpm_profile
.fan_speed_percent
) {
399 ret
= smu_set_fan_speed_percent(smu
, smu
->user_dpm_profile
.fan_speed_percent
);
401 dev_err(smu
->adev
->dev
, "Failed to set manual fan speed\n");
405 /* Disable restore flag */
406 smu
->user_dpm_profile
.flags
&= ~SMU_DPM_USER_PROFILE_RESTORE
;
409 int smu_get_power_num_states(void *handle
,
410 struct pp_states_info
*state_info
)
415 /* not support power state */
416 memset(state_info
, 0, sizeof(struct pp_states_info
));
417 state_info
->nums
= 1;
418 state_info
->states
[0] = POWER_STATE_TYPE_DEFAULT
;
423 bool is_support_sw_smu(struct amdgpu_device
*adev
)
425 if (adev
->asic_type
>= CHIP_ARCTURUS
)
431 bool is_support_cclk_dpm(struct amdgpu_device
*adev
)
433 struct smu_context
*smu
= &adev
->smu
;
435 if (!is_support_sw_smu(adev
))
438 if (!smu_feature_is_enabled(smu
, SMU_FEATURE_CCLK_DPM_BIT
))
445 int smu_sys_get_pp_table(void *handle
, char **table
)
447 struct smu_context
*smu
= handle
;
448 struct smu_table_context
*smu_table
= &smu
->smu_table
;
449 uint32_t powerplay_table_size
;
451 if (!smu
->pm_enabled
|| !smu
->adev
->pm
.dpm_enabled
)
454 if (!smu_table
->power_play_table
&& !smu_table
->hardcode_pptable
)
457 mutex_lock(&smu
->mutex
);
459 if (smu_table
->hardcode_pptable
)
460 *table
= smu_table
->hardcode_pptable
;
462 *table
= smu_table
->power_play_table
;
464 powerplay_table_size
= smu_table
->power_play_table_size
;
466 mutex_unlock(&smu
->mutex
);
468 return powerplay_table_size
;
471 int smu_sys_set_pp_table(void *handle
, const char *buf
, size_t size
)
473 struct smu_context
*smu
= handle
;
474 struct smu_table_context
*smu_table
= &smu
->smu_table
;
475 ATOM_COMMON_TABLE_HEADER
*header
= (ATOM_COMMON_TABLE_HEADER
*)buf
;
478 if (!smu
->pm_enabled
|| !smu
->adev
->pm
.dpm_enabled
)
481 if (header
->usStructureSize
!= size
) {
482 dev_err(smu
->adev
->dev
, "pp table size not matched !\n");
486 mutex_lock(&smu
->mutex
);
487 if (!smu_table
->hardcode_pptable
)
488 smu_table
->hardcode_pptable
= kzalloc(size
, GFP_KERNEL
);
489 if (!smu_table
->hardcode_pptable
) {
494 memcpy(smu_table
->hardcode_pptable
, buf
, size
);
495 smu_table
->power_play_table
= smu_table
->hardcode_pptable
;
496 smu_table
->power_play_table_size
= size
;
499 * Special hw_fini action(for Navi1x, the DPMs disablement will be
500 * skipped) may be needed for custom pptable uploading.
502 smu
->uploading_custom_pp_table
= true;
504 ret
= smu_reset(smu
);
506 dev_info(smu
->adev
->dev
, "smu reset failed, ret = %d\n", ret
);
508 smu
->uploading_custom_pp_table
= false;
511 mutex_unlock(&smu
->mutex
);
515 static int smu_get_driver_allowed_feature_mask(struct smu_context
*smu
)
517 struct smu_feature
*feature
= &smu
->smu_feature
;
519 uint32_t allowed_feature_mask
[SMU_FEATURE_MAX
/32];
521 bitmap_zero(feature
->allowed
, SMU_FEATURE_MAX
);
523 ret
= smu_get_allowed_feature_mask(smu
, allowed_feature_mask
,
528 bitmap_or(feature
->allowed
, feature
->allowed
,
529 (unsigned long *)allowed_feature_mask
,
530 feature
->feature_num
);
535 static int smu_set_funcs(struct amdgpu_device
*adev
)
537 struct smu_context
*smu
= &adev
->smu
;
539 if (adev
->pm
.pp_feature
& PP_OVERDRIVE_MASK
)
540 smu
->od_enabled
= true;
542 switch (adev
->asic_type
) {
546 navi10_set_ppt_funcs(smu
);
549 adev
->pm
.pp_feature
&= ~PP_GFXOFF_MASK
;
550 arcturus_set_ppt_funcs(smu
);
551 /* OD is not supported on Arcturus */
552 smu
->od_enabled
=false;
554 case CHIP_SIENNA_CICHLID
:
555 case CHIP_NAVY_FLOUNDER
:
556 case CHIP_DIMGREY_CAVEFISH
:
557 sienna_cichlid_set_ppt_funcs(smu
);
560 aldebaran_set_ppt_funcs(smu
);
561 /* Enable pp_od_clk_voltage node */
562 smu
->od_enabled
= true;
565 renoir_set_ppt_funcs(smu
);
568 vangogh_set_ppt_funcs(smu
);
577 static int smu_early_init(void *handle
)
579 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
580 struct smu_context
*smu
= &adev
->smu
;
583 smu
->pm_enabled
= !!amdgpu_dpm
;
585 mutex_init(&smu
->mutex
);
586 mutex_init(&smu
->smu_baco
.mutex
);
587 smu
->smu_baco
.state
= SMU_BACO_STATE_EXIT
;
588 smu
->smu_baco
.platform_support
= false;
590 adev
->powerplay
.pp_handle
= smu
;
591 adev
->powerplay
.pp_funcs
= &swsmu_pm_funcs
;
593 return smu_set_funcs(adev
);
596 static int smu_set_default_dpm_table(struct smu_context
*smu
)
598 struct smu_power_context
*smu_power
= &smu
->smu_power
;
599 struct smu_power_gate
*power_gate
= &smu_power
->power_gate
;
600 int vcn_gate
, jpeg_gate
;
603 if (!smu
->ppt_funcs
->set_default_dpm_table
)
606 mutex_lock(&power_gate
->vcn_gate_lock
);
607 mutex_lock(&power_gate
->jpeg_gate_lock
);
609 vcn_gate
= atomic_read(&power_gate
->vcn_gated
);
610 jpeg_gate
= atomic_read(&power_gate
->jpeg_gated
);
612 ret
= smu_dpm_set_vcn_enable_locked(smu
, true);
616 ret
= smu_dpm_set_jpeg_enable_locked(smu
, true);
620 ret
= smu
->ppt_funcs
->set_default_dpm_table(smu
);
622 dev_err(smu
->adev
->dev
,
623 "Failed to setup default dpm clock tables!\n");
625 smu_dpm_set_jpeg_enable_locked(smu
, !jpeg_gate
);
627 smu_dpm_set_vcn_enable_locked(smu
, !vcn_gate
);
629 mutex_unlock(&power_gate
->jpeg_gate_lock
);
630 mutex_unlock(&power_gate
->vcn_gate_lock
);
635 static int smu_late_init(void *handle
)
637 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
638 struct smu_context
*smu
= &adev
->smu
;
641 smu_set_fine_grain_gfx_freq_parameters(smu
);
643 if (!smu
->pm_enabled
)
646 ret
= smu_post_init(smu
);
648 dev_err(adev
->dev
, "Failed to post smu init!\n");
652 if (!amdgpu_sriov_vf(adev
) || smu
->od_enabled
) {
653 ret
= smu_set_default_od_settings(smu
);
655 dev_err(adev
->dev
, "Failed to setup default OD settings!\n");
660 ret
= smu_populate_umd_state_clk(smu
);
662 dev_err(adev
->dev
, "Failed to populate UMD state clocks!\n");
666 ret
= smu_get_asic_power_limits(smu
);
668 dev_err(adev
->dev
, "Failed to get asic power limits!\n");
672 smu_get_unique_id(smu
);
674 smu_get_fan_parameters(smu
);
676 smu_handle_task(&adev
->smu
,
677 smu
->smu_dpm
.dpm_level
,
678 AMD_PP_TASK_COMPLETE_INIT
,
681 smu_restore_dpm_user_profile(smu
);
686 static int smu_init_fb_allocations(struct smu_context
*smu
)
688 struct amdgpu_device
*adev
= smu
->adev
;
689 struct smu_table_context
*smu_table
= &smu
->smu_table
;
690 struct smu_table
*tables
= smu_table
->tables
;
691 struct smu_table
*driver_table
= &(smu_table
->driver_table
);
692 uint32_t max_table_size
= 0;
695 /* VRAM allocation for tool table */
696 if (tables
[SMU_TABLE_PMSTATUSLOG
].size
) {
697 ret
= amdgpu_bo_create_kernel(adev
,
698 tables
[SMU_TABLE_PMSTATUSLOG
].size
,
699 tables
[SMU_TABLE_PMSTATUSLOG
].align
,
700 tables
[SMU_TABLE_PMSTATUSLOG
].domain
,
701 &tables
[SMU_TABLE_PMSTATUSLOG
].bo
,
702 &tables
[SMU_TABLE_PMSTATUSLOG
].mc_address
,
703 &tables
[SMU_TABLE_PMSTATUSLOG
].cpu_addr
);
705 dev_err(adev
->dev
, "VRAM allocation for tool table failed!\n");
710 /* VRAM allocation for driver table */
711 for (i
= 0; i
< SMU_TABLE_COUNT
; i
++) {
712 if (tables
[i
].size
== 0)
715 if (i
== SMU_TABLE_PMSTATUSLOG
)
718 if (max_table_size
< tables
[i
].size
)
719 max_table_size
= tables
[i
].size
;
722 driver_table
->size
= max_table_size
;
723 driver_table
->align
= PAGE_SIZE
;
724 driver_table
->domain
= AMDGPU_GEM_DOMAIN_VRAM
;
726 ret
= amdgpu_bo_create_kernel(adev
,
729 driver_table
->domain
,
731 &driver_table
->mc_address
,
732 &driver_table
->cpu_addr
);
734 dev_err(adev
->dev
, "VRAM allocation for driver table failed!\n");
735 if (tables
[SMU_TABLE_PMSTATUSLOG
].mc_address
)
736 amdgpu_bo_free_kernel(&tables
[SMU_TABLE_PMSTATUSLOG
].bo
,
737 &tables
[SMU_TABLE_PMSTATUSLOG
].mc_address
,
738 &tables
[SMU_TABLE_PMSTATUSLOG
].cpu_addr
);
744 static int smu_fini_fb_allocations(struct smu_context
*smu
)
746 struct smu_table_context
*smu_table
= &smu
->smu_table
;
747 struct smu_table
*tables
= smu_table
->tables
;
748 struct smu_table
*driver_table
= &(smu_table
->driver_table
);
750 if (tables
[SMU_TABLE_PMSTATUSLOG
].mc_address
)
751 amdgpu_bo_free_kernel(&tables
[SMU_TABLE_PMSTATUSLOG
].bo
,
752 &tables
[SMU_TABLE_PMSTATUSLOG
].mc_address
,
753 &tables
[SMU_TABLE_PMSTATUSLOG
].cpu_addr
);
755 amdgpu_bo_free_kernel(&driver_table
->bo
,
756 &driver_table
->mc_address
,
757 &driver_table
->cpu_addr
);
763 * smu_alloc_memory_pool - allocate memory pool in the system memory
765 * @smu: amdgpu_device pointer
767 * This memory pool will be used for SMC use and msg SetSystemVirtualDramAddr
768 * and DramLogSetDramAddr can notify it changed.
770 * Returns 0 on success, error on failure.
772 static int smu_alloc_memory_pool(struct smu_context
*smu
)
774 struct amdgpu_device
*adev
= smu
->adev
;
775 struct smu_table_context
*smu_table
= &smu
->smu_table
;
776 struct smu_table
*memory_pool
= &smu_table
->memory_pool
;
777 uint64_t pool_size
= smu
->pool_size
;
780 if (pool_size
== SMU_MEMORY_POOL_SIZE_ZERO
)
783 memory_pool
->size
= pool_size
;
784 memory_pool
->align
= PAGE_SIZE
;
785 memory_pool
->domain
= AMDGPU_GEM_DOMAIN_GTT
;
788 case SMU_MEMORY_POOL_SIZE_256_MB
:
789 case SMU_MEMORY_POOL_SIZE_512_MB
:
790 case SMU_MEMORY_POOL_SIZE_1_GB
:
791 case SMU_MEMORY_POOL_SIZE_2_GB
:
792 ret
= amdgpu_bo_create_kernel(adev
,
797 &memory_pool
->mc_address
,
798 &memory_pool
->cpu_addr
);
800 dev_err(adev
->dev
, "VRAM allocation for dramlog failed!\n");
809 static int smu_free_memory_pool(struct smu_context
*smu
)
811 struct smu_table_context
*smu_table
= &smu
->smu_table
;
812 struct smu_table
*memory_pool
= &smu_table
->memory_pool
;
814 if (memory_pool
->size
== SMU_MEMORY_POOL_SIZE_ZERO
)
817 amdgpu_bo_free_kernel(&memory_pool
->bo
,
818 &memory_pool
->mc_address
,
819 &memory_pool
->cpu_addr
);
821 memset(memory_pool
, 0, sizeof(struct smu_table
));
826 static int smu_alloc_dummy_read_table(struct smu_context
*smu
)
828 struct smu_table_context
*smu_table
= &smu
->smu_table
;
829 struct smu_table
*dummy_read_1_table
=
830 &smu_table
->dummy_read_1_table
;
831 struct amdgpu_device
*adev
= smu
->adev
;
834 dummy_read_1_table
->size
= 0x40000;
835 dummy_read_1_table
->align
= PAGE_SIZE
;
836 dummy_read_1_table
->domain
= AMDGPU_GEM_DOMAIN_VRAM
;
838 ret
= amdgpu_bo_create_kernel(adev
,
839 dummy_read_1_table
->size
,
840 dummy_read_1_table
->align
,
841 dummy_read_1_table
->domain
,
842 &dummy_read_1_table
->bo
,
843 &dummy_read_1_table
->mc_address
,
844 &dummy_read_1_table
->cpu_addr
);
846 dev_err(adev
->dev
, "VRAM allocation for dummy read table failed!\n");
851 static void smu_free_dummy_read_table(struct smu_context
*smu
)
853 struct smu_table_context
*smu_table
= &smu
->smu_table
;
854 struct smu_table
*dummy_read_1_table
=
855 &smu_table
->dummy_read_1_table
;
858 amdgpu_bo_free_kernel(&dummy_read_1_table
->bo
,
859 &dummy_read_1_table
->mc_address
,
860 &dummy_read_1_table
->cpu_addr
);
862 memset(dummy_read_1_table
, 0, sizeof(struct smu_table
));
865 static int smu_smc_table_sw_init(struct smu_context
*smu
)
870 * Create smu_table structure, and init smc tables such as
871 * TABLE_PPTABLE, TABLE_WATERMARKS, TABLE_SMU_METRICS, and etc.
873 ret
= smu_init_smc_tables(smu
);
875 dev_err(smu
->adev
->dev
, "Failed to init smc tables!\n");
880 * Create smu_power_context structure, and allocate smu_dpm_context and
881 * context size to fill the smu_power_context data.
883 ret
= smu_init_power(smu
);
885 dev_err(smu
->adev
->dev
, "Failed to init smu_init_power!\n");
890 * allocate vram bos to store smc table contents.
892 ret
= smu_init_fb_allocations(smu
);
896 ret
= smu_alloc_memory_pool(smu
);
900 ret
= smu_alloc_dummy_read_table(smu
);
904 ret
= smu_i2c_init(smu
, &smu
->adev
->pm
.smu_i2c
);
911 static int smu_smc_table_sw_fini(struct smu_context
*smu
)
915 smu_i2c_fini(smu
, &smu
->adev
->pm
.smu_i2c
);
917 smu_free_dummy_read_table(smu
);
919 ret
= smu_free_memory_pool(smu
);
923 ret
= smu_fini_fb_allocations(smu
);
927 ret
= smu_fini_power(smu
);
929 dev_err(smu
->adev
->dev
, "Failed to init smu_fini_power!\n");
933 ret
= smu_fini_smc_tables(smu
);
935 dev_err(smu
->adev
->dev
, "Failed to smu_fini_smc_tables!\n");
942 static void smu_throttling_logging_work_fn(struct work_struct
*work
)
944 struct smu_context
*smu
= container_of(work
, struct smu_context
,
945 throttling_logging_work
);
947 smu_log_thermal_throttling(smu
);
950 static void smu_interrupt_work_fn(struct work_struct
*work
)
952 struct smu_context
*smu
= container_of(work
, struct smu_context
,
955 mutex_lock(&smu
->mutex
);
957 if (smu
->ppt_funcs
&& smu
->ppt_funcs
->interrupt_work
)
958 smu
->ppt_funcs
->interrupt_work(smu
);
960 mutex_unlock(&smu
->mutex
);
963 static int smu_sw_init(void *handle
)
965 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
966 struct smu_context
*smu
= &adev
->smu
;
969 smu
->pool_size
= adev
->pm
.smu_prv_buffer_size
;
970 smu
->smu_feature
.feature_num
= SMU_FEATURE_MAX
;
971 mutex_init(&smu
->smu_feature
.mutex
);
972 bitmap_zero(smu
->smu_feature
.supported
, SMU_FEATURE_MAX
);
973 bitmap_zero(smu
->smu_feature
.enabled
, SMU_FEATURE_MAX
);
974 bitmap_zero(smu
->smu_feature
.allowed
, SMU_FEATURE_MAX
);
976 mutex_init(&smu
->sensor_lock
);
977 mutex_init(&smu
->metrics_lock
);
978 mutex_init(&smu
->message_lock
);
980 INIT_WORK(&smu
->throttling_logging_work
, smu_throttling_logging_work_fn
);
981 INIT_WORK(&smu
->interrupt_work
, smu_interrupt_work_fn
);
982 atomic64_set(&smu
->throttle_int_counter
, 0);
983 smu
->watermarks_bitmap
= 0;
984 smu
->power_profile_mode
= PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT
;
985 smu
->default_power_profile_mode
= PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT
;
987 atomic_set(&smu
->smu_power
.power_gate
.vcn_gated
, 1);
988 atomic_set(&smu
->smu_power
.power_gate
.jpeg_gated
, 1);
989 mutex_init(&smu
->smu_power
.power_gate
.vcn_gate_lock
);
990 mutex_init(&smu
->smu_power
.power_gate
.jpeg_gate_lock
);
992 smu
->workload_mask
= 1 << smu
->workload_prority
[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT
];
993 smu
->workload_prority
[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT
] = 0;
994 smu
->workload_prority
[PP_SMC_POWER_PROFILE_FULLSCREEN3D
] = 1;
995 smu
->workload_prority
[PP_SMC_POWER_PROFILE_POWERSAVING
] = 2;
996 smu
->workload_prority
[PP_SMC_POWER_PROFILE_VIDEO
] = 3;
997 smu
->workload_prority
[PP_SMC_POWER_PROFILE_VR
] = 4;
998 smu
->workload_prority
[PP_SMC_POWER_PROFILE_COMPUTE
] = 5;
999 smu
->workload_prority
[PP_SMC_POWER_PROFILE_CUSTOM
] = 6;
1001 smu
->workload_setting
[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT
;
1002 smu
->workload_setting
[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D
;
1003 smu
->workload_setting
[2] = PP_SMC_POWER_PROFILE_POWERSAVING
;
1004 smu
->workload_setting
[3] = PP_SMC_POWER_PROFILE_VIDEO
;
1005 smu
->workload_setting
[4] = PP_SMC_POWER_PROFILE_VR
;
1006 smu
->workload_setting
[5] = PP_SMC_POWER_PROFILE_COMPUTE
;
1007 smu
->workload_setting
[6] = PP_SMC_POWER_PROFILE_CUSTOM
;
1008 smu
->display_config
= &adev
->pm
.pm_display_cfg
;
1010 smu
->smu_dpm
.dpm_level
= AMD_DPM_FORCED_LEVEL_AUTO
;
1011 smu
->smu_dpm
.requested_dpm_level
= AMD_DPM_FORCED_LEVEL_AUTO
;
1013 ret
= smu_init_microcode(smu
);
1015 dev_err(adev
->dev
, "Failed to load smu firmware!\n");
1019 ret
= smu_smc_table_sw_init(smu
);
1021 dev_err(adev
->dev
, "Failed to sw init smc table!\n");
1025 ret
= smu_register_irq_handler(smu
);
1027 dev_err(adev
->dev
, "Failed to register smc irq handler!\n");
1031 /* If there is no way to query fan control mode, fan control is not supported */
1032 if (!smu
->ppt_funcs
->get_fan_control_mode
)
1033 smu
->adev
->pm
.no_fan
= true;
1038 static int smu_sw_fini(void *handle
)
1040 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1041 struct smu_context
*smu
= &adev
->smu
;
1044 ret
= smu_smc_table_sw_fini(smu
);
1046 dev_err(adev
->dev
, "Failed to sw fini smc table!\n");
1050 smu_fini_microcode(smu
);
1055 static int smu_get_thermal_temperature_range(struct smu_context
*smu
)
1057 struct amdgpu_device
*adev
= smu
->adev
;
1058 struct smu_temperature_range
*range
=
1059 &smu
->thermal_range
;
1062 if (!smu
->ppt_funcs
->get_thermal_temperature_range
)
1065 ret
= smu
->ppt_funcs
->get_thermal_temperature_range(smu
, range
);
1069 adev
->pm
.dpm
.thermal
.min_temp
= range
->min
;
1070 adev
->pm
.dpm
.thermal
.max_temp
= range
->max
;
1071 adev
->pm
.dpm
.thermal
.max_edge_emergency_temp
= range
->edge_emergency_max
;
1072 adev
->pm
.dpm
.thermal
.min_hotspot_temp
= range
->hotspot_min
;
1073 adev
->pm
.dpm
.thermal
.max_hotspot_crit_temp
= range
->hotspot_crit_max
;
1074 adev
->pm
.dpm
.thermal
.max_hotspot_emergency_temp
= range
->hotspot_emergency_max
;
1075 adev
->pm
.dpm
.thermal
.min_mem_temp
= range
->mem_min
;
1076 adev
->pm
.dpm
.thermal
.max_mem_crit_temp
= range
->mem_crit_max
;
1077 adev
->pm
.dpm
.thermal
.max_mem_emergency_temp
= range
->mem_emergency_max
;
1082 static int smu_smc_hw_setup(struct smu_context
*smu
)
1084 struct amdgpu_device
*adev
= smu
->adev
;
1085 uint32_t pcie_gen
= 0, pcie_width
= 0;
1088 if (adev
->in_suspend
&& smu_is_dpm_running(smu
)) {
1089 dev_info(adev
->dev
, "dpm has been enabled\n");
1090 /* this is needed specifically */
1091 if ((adev
->asic_type
>= CHIP_SIENNA_CICHLID
) &&
1092 (adev
->asic_type
<= CHIP_DIMGREY_CAVEFISH
))
1093 ret
= smu_system_features_control(smu
, true);
1097 ret
= smu_init_display_count(smu
, 0);
1099 dev_info(adev
->dev
, "Failed to pre-set display count as 0!\n");
1103 ret
= smu_set_driver_table_location(smu
);
1105 dev_err(adev
->dev
, "Failed to SetDriverDramAddr!\n");
1110 * Set PMSTATUSLOG table bo address with SetToolsDramAddr MSG for tools.
1112 ret
= smu_set_tool_table_location(smu
);
1114 dev_err(adev
->dev
, "Failed to SetToolsDramAddr!\n");
1119 * Use msg SetSystemVirtualDramAddr and DramLogSetDramAddr can notify
1122 ret
= smu_notify_memory_pool_location(smu
);
1124 dev_err(adev
->dev
, "Failed to SetDramLogDramAddr!\n");
1128 /* smu_dump_pptable(smu); */
1130 * Copy pptable bo in the vram to smc with SMU MSGs such as
1131 * SetDriverDramAddr and TransferTableDram2Smu.
1133 ret
= smu_write_pptable(smu
);
1135 dev_err(adev
->dev
, "Failed to transfer pptable to SMC!\n");
1139 /* issue Run*Btc msg */
1140 ret
= smu_run_btc(smu
);
1144 ret
= smu_feature_set_allowed_mask(smu
);
1146 dev_err(adev
->dev
, "Failed to set driver allowed features mask!\n");
1150 ret
= smu_system_features_control(smu
, true);
1152 dev_err(adev
->dev
, "Failed to enable requested dpm features!\n");
1156 if (!smu_is_dpm_running(smu
))
1157 dev_info(adev
->dev
, "dpm has been disabled\n");
1159 if (adev
->pm
.pcie_gen_mask
& CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4
)
1161 else if (adev
->pm
.pcie_gen_mask
& CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3
)
1163 else if (adev
->pm
.pcie_gen_mask
& CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2
)
1165 else if (adev
->pm
.pcie_gen_mask
& CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1
)
1168 /* Bit 31:16: LCLK DPM level. 0 is DPM0, and 1 is DPM1
1169 * Bit 15:8: PCIE GEN, 0 to 3 corresponds to GEN1 to GEN4
1170 * Bit 7:0: PCIE lane width, 1 to 7 corresponds is x1 to x32
1172 if (adev
->pm
.pcie_mlw_mask
& CAIL_PCIE_LINK_WIDTH_SUPPORT_X16
)
1174 else if (adev
->pm
.pcie_mlw_mask
& CAIL_PCIE_LINK_WIDTH_SUPPORT_X12
)
1176 else if (adev
->pm
.pcie_mlw_mask
& CAIL_PCIE_LINK_WIDTH_SUPPORT_X8
)
1178 else if (adev
->pm
.pcie_mlw_mask
& CAIL_PCIE_LINK_WIDTH_SUPPORT_X4
)
1180 else if (adev
->pm
.pcie_mlw_mask
& CAIL_PCIE_LINK_WIDTH_SUPPORT_X2
)
1182 else if (adev
->pm
.pcie_mlw_mask
& CAIL_PCIE_LINK_WIDTH_SUPPORT_X1
)
1184 ret
= smu_update_pcie_parameters(smu
, pcie_gen
, pcie_width
);
1186 dev_err(adev
->dev
, "Attempt to override pcie params failed!\n");
1190 ret
= smu_get_thermal_temperature_range(smu
);
1192 dev_err(adev
->dev
, "Failed to get thermal temperature ranges!\n");
1196 ret
= smu_enable_thermal_alert(smu
);
1198 dev_err(adev
->dev
, "Failed to enable thermal alert!\n");
1203 * Set initialized values (get from vbios) to dpm tables context such as
1204 * gfxclk, memclk, dcefclk, and etc. And enable the DPM feature for each
1207 ret
= smu_set_default_dpm_table(smu
);
1209 dev_err(adev
->dev
, "Failed to setup default dpm clock tables!\n");
1213 ret
= smu_notify_display_change(smu
);
1218 * Set min deep sleep dce fclk with bootup value from vbios via
1219 * SetMinDeepSleepDcefclk MSG.
1221 ret
= smu_set_min_dcef_deep_sleep(smu
,
1222 smu
->smu_table
.boot_values
.dcefclk
/ 100);
1229 static int smu_start_smc_engine(struct smu_context
*smu
)
1231 struct amdgpu_device
*adev
= smu
->adev
;
1234 if (adev
->firmware
.load_type
!= AMDGPU_FW_LOAD_PSP
) {
1235 if (adev
->asic_type
< CHIP_NAVI10
) {
1236 if (smu
->ppt_funcs
->load_microcode
) {
1237 ret
= smu
->ppt_funcs
->load_microcode(smu
);
1244 if (smu
->ppt_funcs
->check_fw_status
) {
1245 ret
= smu
->ppt_funcs
->check_fw_status(smu
);
1247 dev_err(adev
->dev
, "SMC is not ready\n");
1253 * Send msg GetDriverIfVersion to check if the return value is equal
1254 * with DRIVER_IF_VERSION of smc header.
1256 ret
= smu_check_fw_version(smu
);
1263 static int smu_hw_init(void *handle
)
1266 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1267 struct smu_context
*smu
= &adev
->smu
;
1269 if (amdgpu_sriov_vf(adev
) && !amdgpu_sriov_is_pp_one_vf(adev
)) {
1270 smu
->pm_enabled
= false;
1274 ret
= smu_start_smc_engine(smu
);
1276 dev_err(adev
->dev
, "SMC engine is not correctly up!\n");
1281 smu_powergate_sdma(&adev
->smu
, false);
1282 smu_dpm_set_vcn_enable(smu
, true);
1283 smu_dpm_set_jpeg_enable(smu
, true);
1284 smu_set_gfx_cgpg(&adev
->smu
, true);
1287 if (!smu
->pm_enabled
)
1290 /* get boot_values from vbios to set revision, gfxclk, and etc. */
1291 ret
= smu_get_vbios_bootup_values(smu
);
1293 dev_err(adev
->dev
, "Failed to get VBIOS boot clock values!\n");
1297 ret
= smu_setup_pptable(smu
);
1299 dev_err(adev
->dev
, "Failed to setup pptable!\n");
1303 ret
= smu_get_driver_allowed_feature_mask(smu
);
1307 ret
= smu_smc_hw_setup(smu
);
1309 dev_err(adev
->dev
, "Failed to setup smc hw!\n");
1314 * Move maximum sustainable clock retrieving here considering
1315 * 1. It is not needed on resume(from S3).
1316 * 2. DAL settings come between .hw_init and .late_init of SMU.
1317 * And DAL needs to know the maximum sustainable clocks. Thus
1318 * it cannot be put in .late_init().
1320 ret
= smu_init_max_sustainable_clocks(smu
);
1322 dev_err(adev
->dev
, "Failed to init max sustainable clocks!\n");
1326 adev
->pm
.dpm_enabled
= true;
1328 dev_info(adev
->dev
, "SMU is initialized successfully!\n");
1333 static int smu_disable_dpms(struct smu_context
*smu
)
1335 struct amdgpu_device
*adev
= smu
->adev
;
1337 bool use_baco
= !smu
->is_apu
&&
1338 ((amdgpu_in_reset(adev
) &&
1339 (amdgpu_asic_reset_method(adev
) == AMD_RESET_METHOD_BACO
)) ||
1340 ((adev
->in_runpm
|| adev
->in_s4
) && amdgpu_asic_supports_baco(adev
)));
1343 * For custom pptable uploading, skip the DPM features
1344 * disable process on Navi1x ASICs.
1345 * - As the gfx related features are under control of
1346 * RLC on those ASICs. RLC reinitialization will be
1347 * needed to reenable them. That will cost much more
1350 * - SMU firmware can handle the DPM reenablement
1353 if (smu
->uploading_custom_pp_table
&&
1354 (adev
->asic_type
>= CHIP_NAVI10
) &&
1355 (adev
->asic_type
<= CHIP_DIMGREY_CAVEFISH
))
1359 * For Sienna_Cichlid, PMFW will handle the features disablement properly
1360 * on BACO in. Driver involvement is unnecessary.
1362 if ((adev
->asic_type
== CHIP_SIENNA_CICHLID
) &&
1367 * For gpu reset, runpm and hibernation through BACO,
1368 * BACO feature has to be kept enabled.
1370 if (use_baco
&& smu_feature_is_enabled(smu
, SMU_FEATURE_BACO_BIT
)) {
1371 ret
= smu_disable_all_features_with_exception(smu
,
1372 SMU_FEATURE_BACO_BIT
);
1374 dev_err(adev
->dev
, "Failed to disable smu features except BACO.\n");
1376 ret
= smu_system_features_control(smu
, false);
1378 dev_err(adev
->dev
, "Failed to disable smu features.\n");
1381 if (adev
->asic_type
>= CHIP_NAVI10
&&
1382 adev
->gfx
.rlc
.funcs
->stop
)
1383 adev
->gfx
.rlc
.funcs
->stop(adev
);
1388 static int smu_smc_hw_cleanup(struct smu_context
*smu
)
1390 struct amdgpu_device
*adev
= smu
->adev
;
1393 cancel_work_sync(&smu
->throttling_logging_work
);
1394 cancel_work_sync(&smu
->interrupt_work
);
1396 ret
= smu_disable_thermal_alert(smu
);
1398 dev_err(adev
->dev
, "Fail to disable thermal alert!\n");
1402 ret
= smu_disable_dpms(smu
);
1404 dev_err(adev
->dev
, "Fail to disable dpm features!\n");
1411 static int smu_hw_fini(void *handle
)
1413 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1414 struct smu_context
*smu
= &adev
->smu
;
1416 if (amdgpu_sriov_vf(adev
)&& !amdgpu_sriov_is_pp_one_vf(adev
))
1420 smu_powergate_sdma(&adev
->smu
, true);
1421 smu_dpm_set_vcn_enable(smu
, false);
1422 smu_dpm_set_jpeg_enable(smu
, false);
1425 if (!smu
->pm_enabled
)
1428 adev
->pm
.dpm_enabled
= false;
1430 return smu_smc_hw_cleanup(smu
);
1433 int smu_reset(struct smu_context
*smu
)
1435 struct amdgpu_device
*adev
= smu
->adev
;
1438 amdgpu_gfx_off_ctrl(smu
->adev
, false);
1440 ret
= smu_hw_fini(adev
);
1444 ret
= smu_hw_init(adev
);
1448 ret
= smu_late_init(adev
);
1452 amdgpu_gfx_off_ctrl(smu
->adev
, true);
1457 static int smu_suspend(void *handle
)
1459 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1460 struct smu_context
*smu
= &adev
->smu
;
1463 if (amdgpu_sriov_vf(adev
)&& !amdgpu_sriov_is_pp_one_vf(adev
))
1466 if (!smu
->pm_enabled
)
1469 adev
->pm
.dpm_enabled
= false;
1471 ret
= smu_smc_hw_cleanup(smu
);
1475 smu
->watermarks_bitmap
&= ~(WATERMARKS_LOADED
);
1478 smu_set_gfx_cgpg(&adev
->smu
, false);
1483 static int smu_resume(void *handle
)
1486 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1487 struct smu_context
*smu
= &adev
->smu
;
1489 if (amdgpu_sriov_vf(adev
)&& !amdgpu_sriov_is_pp_one_vf(adev
))
1492 if (!smu
->pm_enabled
)
1495 dev_info(adev
->dev
, "SMU is resuming...\n");
1497 ret
= smu_start_smc_engine(smu
);
1499 dev_err(adev
->dev
, "SMC engine is not correctly up!\n");
1503 ret
= smu_smc_hw_setup(smu
);
1505 dev_err(adev
->dev
, "Failed to setup smc hw!\n");
1510 smu_set_gfx_cgpg(&adev
->smu
, true);
1512 smu
->disable_uclk_switch
= 0;
1514 adev
->pm
.dpm_enabled
= true;
1516 dev_info(adev
->dev
, "SMU is resumed successfully!\n");
1521 int smu_display_configuration_change(struct smu_context
*smu
,
1522 const struct amd_pp_display_configuration
*display_config
)
1525 int num_of_active_display
= 0;
1527 if (!smu
->pm_enabled
|| !smu
->adev
->pm
.dpm_enabled
)
1530 if (!display_config
)
1533 mutex_lock(&smu
->mutex
);
1535 smu_set_min_dcef_deep_sleep(smu
,
1536 display_config
->min_dcef_deep_sleep_set_clk
/ 100);
1538 for (index
= 0; index
< display_config
->num_path_including_non_display
; index
++) {
1539 if (display_config
->displays
[index
].controller_id
!= 0)
1540 num_of_active_display
++;
1543 mutex_unlock(&smu
->mutex
);
1548 static int smu_set_clockgating_state(void *handle
,
1549 enum amd_clockgating_state state
)
1554 static int smu_set_powergating_state(void *handle
,
1555 enum amd_powergating_state state
)
1560 static int smu_enable_umd_pstate(void *handle
,
1561 enum amd_dpm_forced_level
*level
)
1563 uint32_t profile_mode_mask
= AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD
|
1564 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK
|
1565 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK
|
1566 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK
;
1568 struct smu_context
*smu
= (struct smu_context
*)(handle
);
1569 struct smu_dpm_context
*smu_dpm_ctx
= &(smu
->smu_dpm
);
1571 if (!smu
->is_apu
&& !smu_dpm_ctx
->dpm_context
)
1574 if (!(smu_dpm_ctx
->dpm_level
& profile_mode_mask
)) {
1575 /* enter umd pstate, save current level, disable gfx cg*/
1576 if (*level
& profile_mode_mask
) {
1577 smu_dpm_ctx
->saved_dpm_level
= smu_dpm_ctx
->dpm_level
;
1578 smu_dpm_ctx
->enable_umd_pstate
= true;
1579 smu_gpo_control(smu
, false);
1580 amdgpu_device_ip_set_powergating_state(smu
->adev
,
1581 AMD_IP_BLOCK_TYPE_GFX
,
1582 AMD_PG_STATE_UNGATE
);
1583 amdgpu_device_ip_set_clockgating_state(smu
->adev
,
1584 AMD_IP_BLOCK_TYPE_GFX
,
1585 AMD_CG_STATE_UNGATE
);
1586 smu_gfx_ulv_control(smu
, false);
1587 smu_deep_sleep_control(smu
, false);
1588 amdgpu_asic_update_umd_stable_pstate(smu
->adev
, true);
1591 /* exit umd pstate, restore level, enable gfx cg*/
1592 if (!(*level
& profile_mode_mask
)) {
1593 if (*level
== AMD_DPM_FORCED_LEVEL_PROFILE_EXIT
)
1594 *level
= smu_dpm_ctx
->saved_dpm_level
;
1595 smu_dpm_ctx
->enable_umd_pstate
= false;
1596 amdgpu_asic_update_umd_stable_pstate(smu
->adev
, false);
1597 smu_deep_sleep_control(smu
, true);
1598 smu_gfx_ulv_control(smu
, true);
1599 amdgpu_device_ip_set_clockgating_state(smu
->adev
,
1600 AMD_IP_BLOCK_TYPE_GFX
,
1602 amdgpu_device_ip_set_powergating_state(smu
->adev
,
1603 AMD_IP_BLOCK_TYPE_GFX
,
1605 smu_gpo_control(smu
, true);
1612 static int smu_bump_power_profile_mode(struct smu_context
*smu
,
1614 uint32_t param_size
)
1618 if (smu
->ppt_funcs
->set_power_profile_mode
)
1619 ret
= smu
->ppt_funcs
->set_power_profile_mode(smu
, param
, param_size
);
1624 static int smu_adjust_power_state_dynamic(struct smu_context
*smu
,
1625 enum amd_dpm_forced_level level
,
1626 bool skip_display_settings
)
1631 struct smu_dpm_context
*smu_dpm_ctx
= &(smu
->smu_dpm
);
1633 if (!skip_display_settings
) {
1634 ret
= smu_display_config_changed(smu
);
1636 dev_err(smu
->adev
->dev
, "Failed to change display config!");
1641 ret
= smu_apply_clocks_adjust_rules(smu
);
1643 dev_err(smu
->adev
->dev
, "Failed to apply clocks adjust rules!");
1647 if (!skip_display_settings
) {
1648 ret
= smu_notify_smc_display_config(smu
);
1650 dev_err(smu
->adev
->dev
, "Failed to notify smc display config!");
1655 if (smu_dpm_ctx
->dpm_level
!= level
) {
1656 ret
= smu_asic_set_performance_level(smu
, level
);
1658 dev_err(smu
->adev
->dev
, "Failed to set performance level!");
1662 /* update the saved copy */
1663 smu_dpm_ctx
->dpm_level
= level
;
1666 if (smu_dpm_ctx
->dpm_level
!= AMD_DPM_FORCED_LEVEL_MANUAL
&&
1667 smu_dpm_ctx
->dpm_level
!= AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM
) {
1668 index
= fls(smu
->workload_mask
);
1669 index
= index
> 0 && index
<= WORKLOAD_POLICY_MAX
? index
- 1 : 0;
1670 workload
= smu
->workload_setting
[index
];
1672 if (smu
->power_profile_mode
!= workload
)
1673 smu_bump_power_profile_mode(smu
, &workload
, 0);
1679 int smu_handle_task(struct smu_context
*smu
,
1680 enum amd_dpm_forced_level level
,
1681 enum amd_pp_task task_id
,
1686 if (!smu
->pm_enabled
|| !smu
->adev
->pm
.dpm_enabled
)
1690 mutex_lock(&smu
->mutex
);
1693 case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE
:
1694 ret
= smu_pre_display_config_changed(smu
);
1697 ret
= smu_adjust_power_state_dynamic(smu
, level
, false);
1699 case AMD_PP_TASK_COMPLETE_INIT
:
1700 case AMD_PP_TASK_READJUST_POWER_STATE
:
1701 ret
= smu_adjust_power_state_dynamic(smu
, level
, true);
1709 mutex_unlock(&smu
->mutex
);
1714 int smu_handle_dpm_task(void *handle
,
1715 enum amd_pp_task task_id
,
1716 enum amd_pm_state_type
*user_state
)
1718 struct smu_context
*smu
= handle
;
1719 struct smu_dpm_context
*smu_dpm
= &smu
->smu_dpm
;
1721 return smu_handle_task(smu
, smu_dpm
->dpm_level
, task_id
, true);
1726 int smu_switch_power_profile(void *handle
,
1727 enum PP_SMC_POWER_PROFILE type
,
1730 struct smu_context
*smu
= handle
;
1731 struct smu_dpm_context
*smu_dpm_ctx
= &(smu
->smu_dpm
);
1735 if (!smu
->pm_enabled
|| !smu
->adev
->pm
.dpm_enabled
)
1738 if (!(type
< PP_SMC_POWER_PROFILE_CUSTOM
))
1741 mutex_lock(&smu
->mutex
);
1744 smu
->workload_mask
&= ~(1 << smu
->workload_prority
[type
]);
1745 index
= fls(smu
->workload_mask
);
1746 index
= index
> 0 && index
<= WORKLOAD_POLICY_MAX
? index
- 1 : 0;
1747 workload
= smu
->workload_setting
[index
];
1749 smu
->workload_mask
|= (1 << smu
->workload_prority
[type
]);
1750 index
= fls(smu
->workload_mask
);
1751 index
= index
<= WORKLOAD_POLICY_MAX
? index
- 1 : 0;
1752 workload
= smu
->workload_setting
[index
];
1755 if (smu_dpm_ctx
->dpm_level
!= AMD_DPM_FORCED_LEVEL_MANUAL
&&
1756 smu_dpm_ctx
->dpm_level
!= AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM
)
1757 smu_bump_power_profile_mode(smu
, &workload
, 0);
1759 mutex_unlock(&smu
->mutex
);
1764 enum amd_dpm_forced_level
smu_get_performance_level(void *handle
)
1766 struct smu_context
*smu
= handle
;
1767 struct smu_dpm_context
*smu_dpm_ctx
= &(smu
->smu_dpm
);
1768 enum amd_dpm_forced_level level
;
1770 if (!smu
->pm_enabled
|| !smu
->adev
->pm
.dpm_enabled
)
1773 if (!smu
->is_apu
&& !smu_dpm_ctx
->dpm_context
)
1776 mutex_lock(&(smu
->mutex
));
1777 level
= smu_dpm_ctx
->dpm_level
;
1778 mutex_unlock(&(smu
->mutex
));
1783 int smu_force_performance_level(void *handle
, enum amd_dpm_forced_level level
)
1785 struct smu_context
*smu
= handle
;
1786 struct smu_dpm_context
*smu_dpm_ctx
= &(smu
->smu_dpm
);
1789 if (!smu
->pm_enabled
|| !smu
->adev
->pm
.dpm_enabled
)
1792 if (!smu
->is_apu
&& !smu_dpm_ctx
->dpm_context
)
1795 mutex_lock(&smu
->mutex
);
1797 ret
= smu_enable_umd_pstate(smu
, &level
);
1799 mutex_unlock(&smu
->mutex
);
1803 ret
= smu_handle_task(smu
, level
,
1804 AMD_PP_TASK_READJUST_POWER_STATE
,
1807 mutex_unlock(&smu
->mutex
);
1809 /* reset user dpm clock state */
1810 if (!ret
&& smu_dpm_ctx
->dpm_level
!= AMD_DPM_FORCED_LEVEL_MANUAL
) {
1811 memset(smu
->user_dpm_profile
.clk_mask
, 0, sizeof(smu
->user_dpm_profile
.clk_mask
));
1812 smu
->user_dpm_profile
.clk_dependency
= 0;
1818 int smu_set_display_count(struct smu_context
*smu
, uint32_t count
)
1822 if (!smu
->pm_enabled
|| !smu
->adev
->pm
.dpm_enabled
)
1825 mutex_lock(&smu
->mutex
);
1826 ret
= smu_init_display_count(smu
, count
);
1827 mutex_unlock(&smu
->mutex
);
1832 static int smu_force_smuclk_levels(struct smu_context
*smu
,
1833 enum smu_clk_type clk_type
,
1836 struct smu_dpm_context
*smu_dpm_ctx
= &(smu
->smu_dpm
);
1839 if (!smu
->pm_enabled
|| !smu
->adev
->pm
.dpm_enabled
)
1842 if (smu_dpm_ctx
->dpm_level
!= AMD_DPM_FORCED_LEVEL_MANUAL
) {
1843 dev_dbg(smu
->adev
->dev
, "force clock level is for dpm manual mode only.\n");
1847 mutex_lock(&smu
->mutex
);
1849 if (smu
->ppt_funcs
&& smu
->ppt_funcs
->force_clk_levels
) {
1850 ret
= smu
->ppt_funcs
->force_clk_levels(smu
, clk_type
, mask
);
1851 if (!ret
&& !(smu
->user_dpm_profile
.flags
& SMU_DPM_USER_PROFILE_RESTORE
)) {
1852 smu
->user_dpm_profile
.clk_mask
[clk_type
] = mask
;
1853 smu_set_user_clk_dependencies(smu
, clk_type
);
1857 mutex_unlock(&smu
->mutex
);
1862 int smu_force_ppclk_levels(void *handle
, enum pp_clock_type type
, uint32_t mask
)
1864 struct smu_context
*smu
= handle
;
1865 enum smu_clk_type clk_type
;
1869 clk_type
= SMU_SCLK
; break;
1871 clk_type
= SMU_MCLK
; break;
1873 clk_type
= SMU_PCIE
; break;
1875 clk_type
= SMU_SOCCLK
; break;
1877 clk_type
= SMU_FCLK
; break;
1879 clk_type
= SMU_DCEFCLK
; break;
1881 clk_type
= SMU_VCLK
; break;
1883 clk_type
= SMU_DCLK
; break;
1885 clk_type
= SMU_OD_SCLK
; break;
1887 clk_type
= SMU_OD_MCLK
; break;
1889 clk_type
= SMU_OD_VDDC_CURVE
; break;
1891 clk_type
= SMU_OD_RANGE
; break;
1896 return smu_force_smuclk_levels(smu
, clk_type
, mask
);
1900 * On system suspending or resetting, the dpm_enabled
1901 * flag will be cleared. So that those SMU services which
1902 * are not supported will be gated.
1903 * However, the mp1 state setting should still be granted
1904 * even if the dpm_enabled cleared.
1906 int smu_set_mp1_state(void *handle
,
1907 enum pp_mp1_state mp1_state
)
1909 struct smu_context
*smu
= handle
;
1913 if (!smu
->pm_enabled
)
1916 mutex_lock(&smu
->mutex
);
1918 switch (mp1_state
) {
1919 case PP_MP1_STATE_SHUTDOWN
:
1920 msg
= SMU_MSG_PrepareMp1ForShutdown
;
1922 case PP_MP1_STATE_UNLOAD
:
1923 msg
= SMU_MSG_PrepareMp1ForUnload
;
1925 case PP_MP1_STATE_RESET
:
1926 msg
= SMU_MSG_PrepareMp1ForReset
;
1928 case PP_MP1_STATE_NONE
:
1930 mutex_unlock(&smu
->mutex
);
1934 ret
= smu_send_smc_msg(smu
, msg
, NULL
);
1935 /* some asics may not support those messages */
1939 dev_err(smu
->adev
->dev
, "[PrepareMp1] Failed!\n");
1941 mutex_unlock(&smu
->mutex
);
1946 int smu_set_df_cstate(void *handle
,
1947 enum pp_df_cstate state
)
1949 struct smu_context
*smu
= handle
;
1952 if (!smu
->pm_enabled
|| !smu
->adev
->pm
.dpm_enabled
)
1955 if (!smu
->ppt_funcs
|| !smu
->ppt_funcs
->set_df_cstate
)
1958 mutex_lock(&smu
->mutex
);
1960 ret
= smu
->ppt_funcs
->set_df_cstate(smu
, state
);
1962 dev_err(smu
->adev
->dev
, "[SetDfCstate] failed!\n");
1964 mutex_unlock(&smu
->mutex
);
1969 int smu_allow_xgmi_power_down(struct smu_context
*smu
, bool en
)
1973 if (!smu
->pm_enabled
|| !smu
->adev
->pm
.dpm_enabled
)
1976 if (!smu
->ppt_funcs
|| !smu
->ppt_funcs
->allow_xgmi_power_down
)
1979 mutex_lock(&smu
->mutex
);
1981 ret
= smu
->ppt_funcs
->allow_xgmi_power_down(smu
, en
);
1983 dev_err(smu
->adev
->dev
, "[AllowXgmiPowerDown] failed!\n");
1985 mutex_unlock(&smu
->mutex
);
1990 int smu_write_watermarks_table(struct smu_context
*smu
)
1994 if (!smu
->pm_enabled
|| !smu
->adev
->pm
.dpm_enabled
)
1997 mutex_lock(&smu
->mutex
);
1999 ret
= smu_set_watermarks_table(smu
, NULL
);
2001 mutex_unlock(&smu
->mutex
);
2006 int smu_set_watermarks_for_clock_ranges(struct smu_context
*smu
,
2007 struct pp_smu_wm_range_sets
*clock_ranges
)
2011 if (!smu
->pm_enabled
|| !smu
->adev
->pm
.dpm_enabled
)
2014 if (smu
->disable_watermark
)
2017 mutex_lock(&smu
->mutex
);
2019 ret
= smu_set_watermarks_table(smu
, clock_ranges
);
2021 mutex_unlock(&smu
->mutex
);
2026 int smu_set_ac_dc(struct smu_context
*smu
)
2030 if (!smu
->pm_enabled
|| !smu
->adev
->pm
.dpm_enabled
)
2033 /* controlled by firmware */
2034 if (smu
->dc_controlled_by_gpio
)
2037 mutex_lock(&smu
->mutex
);
2038 ret
= smu_set_power_source(smu
,
2039 smu
->adev
->pm
.ac_power
? SMU_POWER_SOURCE_AC
:
2040 SMU_POWER_SOURCE_DC
);
2042 dev_err(smu
->adev
->dev
, "Failed to switch to %s mode!\n",
2043 smu
->adev
->pm
.ac_power
? "AC" : "DC");
2044 mutex_unlock(&smu
->mutex
);
2049 const struct amd_ip_funcs smu_ip_funcs
= {
2051 .early_init
= smu_early_init
,
2052 .late_init
= smu_late_init
,
2053 .sw_init
= smu_sw_init
,
2054 .sw_fini
= smu_sw_fini
,
2055 .hw_init
= smu_hw_init
,
2056 .hw_fini
= smu_hw_fini
,
2057 .suspend
= smu_suspend
,
2058 .resume
= smu_resume
,
2060 .check_soft_reset
= NULL
,
2061 .wait_for_idle
= NULL
,
2063 .set_clockgating_state
= smu_set_clockgating_state
,
2064 .set_powergating_state
= smu_set_powergating_state
,
2065 .enable_umd_pstate
= smu_enable_umd_pstate
,
2068 const struct amdgpu_ip_block_version smu_v11_0_ip_block
=
2070 .type
= AMD_IP_BLOCK_TYPE_SMC
,
2074 .funcs
= &smu_ip_funcs
,
2077 const struct amdgpu_ip_block_version smu_v12_0_ip_block
=
2079 .type
= AMD_IP_BLOCK_TYPE_SMC
,
2083 .funcs
= &smu_ip_funcs
,
2086 const struct amdgpu_ip_block_version smu_v13_0_ip_block
=
2088 .type
= AMD_IP_BLOCK_TYPE_SMC
,
2092 .funcs
= &smu_ip_funcs
,
2095 int smu_load_microcode(struct smu_context
*smu
)
2099 if (!smu
->pm_enabled
|| !smu
->adev
->pm
.dpm_enabled
)
2102 mutex_lock(&smu
->mutex
);
2104 if (smu
->ppt_funcs
->load_microcode
)
2105 ret
= smu
->ppt_funcs
->load_microcode(smu
);
2107 mutex_unlock(&smu
->mutex
);
2112 int smu_check_fw_status(struct smu_context
*smu
)
2116 if (!smu
->pm_enabled
|| !smu
->adev
->pm
.dpm_enabled
)
2119 mutex_lock(&smu
->mutex
);
2121 if (smu
->ppt_funcs
->check_fw_status
)
2122 ret
= smu
->ppt_funcs
->check_fw_status(smu
);
2124 mutex_unlock(&smu
->mutex
);
2129 int smu_set_gfx_cgpg(struct smu_context
*smu
, bool enabled
)
2133 mutex_lock(&smu
->mutex
);
2135 if (smu
->ppt_funcs
->set_gfx_cgpg
)
2136 ret
= smu
->ppt_funcs
->set_gfx_cgpg(smu
, enabled
);
2138 mutex_unlock(&smu
->mutex
);
2143 int smu_set_fan_speed_rpm(void *handle
, uint32_t speed
)
2145 struct smu_context
*smu
= handle
;
2149 if (!smu
->pm_enabled
|| !smu
->adev
->pm
.dpm_enabled
)
2152 mutex_lock(&smu
->mutex
);
2154 if (smu
->ppt_funcs
->set_fan_speed_percent
) {
2155 percent
= speed
* 100 / smu
->fan_max_rpm
;
2156 ret
= smu
->ppt_funcs
->set_fan_speed_percent(smu
, percent
);
2157 if (!ret
&& !(smu
->user_dpm_profile
.flags
& SMU_DPM_USER_PROFILE_RESTORE
))
2158 smu
->user_dpm_profile
.fan_speed_percent
= percent
;
2161 mutex_unlock(&smu
->mutex
);
2166 int smu_get_power_limit(struct smu_context
*smu
,
2168 enum smu_ppt_limit_level limit_level
)
2170 uint32_t limit_type
= *limit
>> 24;
2173 if (!smu
->pm_enabled
|| !smu
->adev
->pm
.dpm_enabled
)
2176 mutex_lock(&smu
->mutex
);
2178 if (limit_type
!= SMU_DEFAULT_PPT_LIMIT
) {
2179 if (smu
->ppt_funcs
->get_ppt_limit
)
2180 ret
= smu
->ppt_funcs
->get_ppt_limit(smu
, limit
, limit_type
, limit_level
);
2182 switch (limit_level
) {
2183 case SMU_PPT_LIMIT_CURRENT
:
2184 *limit
= smu
->current_power_limit
;
2186 case SMU_PPT_LIMIT_DEFAULT
:
2187 *limit
= smu
->default_power_limit
;
2189 case SMU_PPT_LIMIT_MAX
:
2190 *limit
= smu
->max_power_limit
;
2197 mutex_unlock(&smu
->mutex
);
2202 int smu_set_power_limit(void *handle
, uint32_t limit
)
2204 struct smu_context
*smu
= handle
;
2205 uint32_t limit_type
= limit
>> 24;
2208 if (!smu
->pm_enabled
|| !smu
->adev
->pm
.dpm_enabled
)
2211 mutex_lock(&smu
->mutex
);
2213 if (limit_type
!= SMU_DEFAULT_PPT_LIMIT
)
2214 if (smu
->ppt_funcs
->set_power_limit
) {
2215 ret
= smu
->ppt_funcs
->set_power_limit(smu
, limit
);
2219 if (limit
> smu
->max_power_limit
) {
2220 dev_err(smu
->adev
->dev
,
2221 "New power limit (%d) is over the max allowed %d\n",
2222 limit
, smu
->max_power_limit
);
2227 limit
= smu
->current_power_limit
;
2229 if (smu
->ppt_funcs
->set_power_limit
) {
2230 ret
= smu
->ppt_funcs
->set_power_limit(smu
, limit
);
2231 if (!ret
&& !(smu
->user_dpm_profile
.flags
& SMU_DPM_USER_PROFILE_RESTORE
))
2232 smu
->user_dpm_profile
.power_limit
= limit
;
2236 mutex_unlock(&smu
->mutex
);
2241 static int smu_print_smuclk_levels(struct smu_context
*smu
, enum smu_clk_type clk_type
, char *buf
)
2245 if (!smu
->pm_enabled
|| !smu
->adev
->pm
.dpm_enabled
)
2248 mutex_lock(&smu
->mutex
);
2250 if (smu
->ppt_funcs
->print_clk_levels
)
2251 ret
= smu
->ppt_funcs
->print_clk_levels(smu
, clk_type
, buf
);
2253 mutex_unlock(&smu
->mutex
);
2258 int smu_print_ppclk_levels(void *handle
, enum pp_clock_type type
, char *buf
)
2260 struct smu_context
*smu
= handle
;
2261 enum smu_clk_type clk_type
;
2265 clk_type
= SMU_SCLK
; break;
2267 clk_type
= SMU_MCLK
; break;
2269 clk_type
= SMU_PCIE
; break;
2271 clk_type
= SMU_SOCCLK
; break;
2273 clk_type
= SMU_FCLK
; break;
2275 clk_type
= SMU_DCEFCLK
; break;
2277 clk_type
= SMU_VCLK
; break;
2279 clk_type
= SMU_DCLK
; break;
2281 clk_type
= SMU_OD_SCLK
; break;
2283 clk_type
= SMU_OD_MCLK
; break;
2285 clk_type
= SMU_OD_VDDC_CURVE
; break;
2287 clk_type
= SMU_OD_RANGE
; break;
2288 case OD_VDDGFX_OFFSET
:
2289 clk_type
= SMU_OD_VDDGFX_OFFSET
; break;
2291 clk_type
= SMU_OD_CCLK
; break;
2296 return smu_print_smuclk_levels(smu
, clk_type
, buf
);
2299 int smu_od_edit_dpm_table(void *handle
,
2300 enum PP_OD_DPM_TABLE_COMMAND type
,
2301 long *input
, uint32_t size
)
2303 struct smu_context
*smu
= handle
;
2306 if (!smu
->pm_enabled
|| !smu
->adev
->pm
.dpm_enabled
)
2309 mutex_lock(&smu
->mutex
);
2311 if (smu
->ppt_funcs
->od_edit_dpm_table
) {
2312 ret
= smu
->ppt_funcs
->od_edit_dpm_table(smu
, type
, input
, size
);
2315 mutex_unlock(&smu
->mutex
);
2320 int smu_read_sensor(void *handle
, int sensor
, void *data
, int *size_arg
)
2322 struct smu_context
*smu
= handle
;
2323 struct smu_umd_pstate_table
*pstate_table
=
2326 uint32_t *size
, size_val
;
2328 if (!smu
->pm_enabled
|| !smu
->adev
->pm
.dpm_enabled
)
2331 if (!data
|| !size_arg
)
2334 size_val
= *size_arg
;
2337 mutex_lock(&smu
->mutex
);
2339 if (smu
->ppt_funcs
->read_sensor
)
2340 if (!smu
->ppt_funcs
->read_sensor(smu
, sensor
, data
, size
))
2344 case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK
:
2345 *((uint32_t *)data
) = pstate_table
->gfxclk_pstate
.standard
* 100;
2348 case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK
:
2349 *((uint32_t *)data
) = pstate_table
->uclk_pstate
.standard
* 100;
2352 case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK
:
2353 ret
= smu_feature_get_enabled_mask(smu
, (uint32_t *)data
, 2);
2356 case AMDGPU_PP_SENSOR_UVD_POWER
:
2357 *(uint32_t *)data
= smu_feature_is_enabled(smu
, SMU_FEATURE_DPM_UVD_BIT
) ? 1 : 0;
2360 case AMDGPU_PP_SENSOR_VCE_POWER
:
2361 *(uint32_t *)data
= smu_feature_is_enabled(smu
, SMU_FEATURE_DPM_VCE_BIT
) ? 1 : 0;
2364 case AMDGPU_PP_SENSOR_VCN_POWER_STATE
:
2365 *(uint32_t *)data
= atomic_read(&smu
->smu_power
.power_gate
.vcn_gated
) ? 0: 1;
2368 case AMDGPU_PP_SENSOR_MIN_FAN_RPM
:
2369 *(uint32_t *)data
= 0;
2379 mutex_unlock(&smu
->mutex
);
2381 // assign uint32_t to int
2382 *size_arg
= size_val
;
2387 int smu_get_power_profile_mode(void *handle
, char *buf
)
2389 struct smu_context
*smu
= handle
;
2392 if (!smu
->pm_enabled
|| !smu
->adev
->pm
.dpm_enabled
)
2395 mutex_lock(&smu
->mutex
);
2397 if (smu
->ppt_funcs
->get_power_profile_mode
)
2398 ret
= smu
->ppt_funcs
->get_power_profile_mode(smu
, buf
);
2400 mutex_unlock(&smu
->mutex
);
2405 int smu_set_power_profile_mode(void *handle
, long *param
, uint32_t param_size
)
2407 struct smu_context
*smu
= handle
;
2410 if (!smu
->pm_enabled
|| !smu
->adev
->pm
.dpm_enabled
)
2413 mutex_lock(&smu
->mutex
);
2415 smu_bump_power_profile_mode(smu
, param
, param_size
);
2417 mutex_unlock(&smu
->mutex
);
2423 u32
smu_get_fan_control_mode(void *handle
)
2425 struct smu_context
*smu
= handle
;
2428 if (!smu
->pm_enabled
|| !smu
->adev
->pm
.dpm_enabled
)
2429 return AMD_FAN_CTRL_NONE
;
2431 mutex_lock(&smu
->mutex
);
2433 if (smu
->ppt_funcs
->get_fan_control_mode
)
2434 ret
= smu
->ppt_funcs
->get_fan_control_mode(smu
);
2436 mutex_unlock(&smu
->mutex
);
2441 int smu_set_fan_control_mode(struct smu_context
*smu
, int value
)
2445 if (!smu
->pm_enabled
|| !smu
->adev
->pm
.dpm_enabled
)
2448 mutex_lock(&smu
->mutex
);
2450 if (smu
->ppt_funcs
->set_fan_control_mode
) {
2451 ret
= smu
->ppt_funcs
->set_fan_control_mode(smu
, value
);
2452 if (!ret
&& !(smu
->user_dpm_profile
.flags
& SMU_DPM_USER_PROFILE_RESTORE
))
2453 smu
->user_dpm_profile
.fan_mode
= value
;
2456 mutex_unlock(&smu
->mutex
);
2458 /* reset user dpm fan speed */
2459 if (!ret
&& value
!= AMD_FAN_CTRL_MANUAL
&&
2460 !(smu
->user_dpm_profile
.flags
& SMU_DPM_USER_PROFILE_RESTORE
))
2461 smu
->user_dpm_profile
.fan_speed_percent
= 0;
2466 void smu_pp_set_fan_control_mode(void *handle
, u32 value
) {
2467 struct smu_context
*smu
= handle
;
2469 smu_set_fan_control_mode(smu
, value
);
2473 int smu_get_fan_speed_percent(void *handle
, u32
*speed
)
2475 struct smu_context
*smu
= handle
;
2479 if (!smu
->pm_enabled
|| !smu
->adev
->pm
.dpm_enabled
)
2482 mutex_lock(&smu
->mutex
);
2484 if (smu
->ppt_funcs
->get_fan_speed_percent
) {
2485 ret
= smu
->ppt_funcs
->get_fan_speed_percent(smu
, &percent
);
2487 *speed
= percent
> 100 ? 100 : percent
;
2491 mutex_unlock(&smu
->mutex
);
2497 int smu_set_fan_speed_percent(void *handle
, u32 speed
)
2499 struct smu_context
*smu
= handle
;
2502 if (!smu
->pm_enabled
|| !smu
->adev
->pm
.dpm_enabled
)
2505 mutex_lock(&smu
->mutex
);
2507 if (smu
->ppt_funcs
->set_fan_speed_percent
) {
2510 ret
= smu
->ppt_funcs
->set_fan_speed_percent(smu
, speed
);
2511 if (!ret
&& !(smu
->user_dpm_profile
.flags
& SMU_DPM_USER_PROFILE_RESTORE
))
2512 smu
->user_dpm_profile
.fan_speed_percent
= speed
;
2515 mutex_unlock(&smu
->mutex
);
2520 int smu_get_fan_speed_rpm(void *handle
, uint32_t *speed
)
2522 struct smu_context
*smu
= handle
;
2526 if (!smu
->pm_enabled
|| !smu
->adev
->pm
.dpm_enabled
)
2529 mutex_lock(&smu
->mutex
);
2531 if (smu
->ppt_funcs
->get_fan_speed_percent
) {
2532 ret
= smu
->ppt_funcs
->get_fan_speed_percent(smu
, &percent
);
2533 *speed
= percent
* smu
->fan_max_rpm
/ 100;
2536 mutex_unlock(&smu
->mutex
);
2541 int smu_set_deep_sleep_dcefclk(struct smu_context
*smu
, int clk
)
2545 if (!smu
->pm_enabled
|| !smu
->adev
->pm
.dpm_enabled
)
2548 mutex_lock(&smu
->mutex
);
2550 ret
= smu_set_min_dcef_deep_sleep(smu
, clk
);
2552 mutex_unlock(&smu
->mutex
);
2557 int smu_get_clock_by_type_with_latency(struct smu_context
*smu
,
2558 enum smu_clk_type clk_type
,
2559 struct pp_clock_levels_with_latency
*clocks
)
2563 if (!smu
->pm_enabled
|| !smu
->adev
->pm
.dpm_enabled
)
2566 mutex_lock(&smu
->mutex
);
2568 if (smu
->ppt_funcs
->get_clock_by_type_with_latency
)
2569 ret
= smu
->ppt_funcs
->get_clock_by_type_with_latency(smu
, clk_type
, clocks
);
2571 mutex_unlock(&smu
->mutex
);
2576 int smu_display_clock_voltage_request(struct smu_context
*smu
,
2577 struct pp_display_clock_request
*clock_req
)
2581 if (!smu
->pm_enabled
|| !smu
->adev
->pm
.dpm_enabled
)
2584 mutex_lock(&smu
->mutex
);
2586 if (smu
->ppt_funcs
->display_clock_voltage_request
)
2587 ret
= smu
->ppt_funcs
->display_clock_voltage_request(smu
, clock_req
);
2589 mutex_unlock(&smu
->mutex
);
2595 int smu_display_disable_memory_clock_switch(struct smu_context
*smu
, bool disable_memory_clock_switch
)
2599 if (!smu
->pm_enabled
|| !smu
->adev
->pm
.dpm_enabled
)
2602 mutex_lock(&smu
->mutex
);
2604 if (smu
->ppt_funcs
->display_disable_memory_clock_switch
)
2605 ret
= smu
->ppt_funcs
->display_disable_memory_clock_switch(smu
, disable_memory_clock_switch
);
2607 mutex_unlock(&smu
->mutex
);
2612 int smu_set_xgmi_pstate(void *handle
,
2615 struct smu_context
*smu
= handle
;
2618 if (!smu
->pm_enabled
|| !smu
->adev
->pm
.dpm_enabled
)
2621 mutex_lock(&smu
->mutex
);
2623 if (smu
->ppt_funcs
->set_xgmi_pstate
)
2624 ret
= smu
->ppt_funcs
->set_xgmi_pstate(smu
, pstate
);
2626 mutex_unlock(&smu
->mutex
);
2629 dev_err(smu
->adev
->dev
, "Failed to set XGMI pstate!\n");
2634 int smu_set_azalia_d3_pme(struct smu_context
*smu
)
2638 if (!smu
->pm_enabled
|| !smu
->adev
->pm
.dpm_enabled
)
2641 mutex_lock(&smu
->mutex
);
2643 if (smu
->ppt_funcs
->set_azalia_d3_pme
)
2644 ret
= smu
->ppt_funcs
->set_azalia_d3_pme(smu
);
2646 mutex_unlock(&smu
->mutex
);
2652 * On system suspending or resetting, the dpm_enabled
2653 * flag will be cleared. So that those SMU services which
2654 * are not supported will be gated.
2656 * However, the baco/mode1 reset should still be granted
2657 * as they are still supported and necessary.
2659 bool smu_baco_is_support(struct smu_context
*smu
)
2663 if (!smu
->pm_enabled
)
2666 mutex_lock(&smu
->mutex
);
2668 if (smu
->ppt_funcs
&& smu
->ppt_funcs
->baco_is_support
)
2669 ret
= smu
->ppt_funcs
->baco_is_support(smu
);
2671 mutex_unlock(&smu
->mutex
);
2676 int smu_get_baco_capability(void *handle
, bool *cap
)
2678 struct smu_context
*smu
= handle
;
2683 if (!smu
->pm_enabled
)
2686 mutex_lock(&smu
->mutex
);
2688 if (smu
->ppt_funcs
&& smu
->ppt_funcs
->baco_is_support
)
2689 *cap
= smu
->ppt_funcs
->baco_is_support(smu
);
2691 mutex_unlock(&smu
->mutex
);
2697 int smu_baco_get_state(struct smu_context
*smu
, enum smu_baco_state
*state
)
2699 if (smu
->ppt_funcs
->baco_get_state
)
2702 mutex_lock(&smu
->mutex
);
2703 *state
= smu
->ppt_funcs
->baco_get_state(smu
);
2704 mutex_unlock(&smu
->mutex
);
2709 int smu_baco_enter(struct smu_context
*smu
)
2713 if (!smu
->pm_enabled
)
2716 mutex_lock(&smu
->mutex
);
2718 if (smu
->ppt_funcs
->baco_enter
)
2719 ret
= smu
->ppt_funcs
->baco_enter(smu
);
2721 mutex_unlock(&smu
->mutex
);
2724 dev_err(smu
->adev
->dev
, "Failed to enter BACO state!\n");
2729 int smu_baco_exit(struct smu_context
*smu
)
2733 if (!smu
->pm_enabled
)
2736 mutex_lock(&smu
->mutex
);
2738 if (smu
->ppt_funcs
->baco_exit
)
2739 ret
= smu
->ppt_funcs
->baco_exit(smu
);
2741 mutex_unlock(&smu
->mutex
);
2744 dev_err(smu
->adev
->dev
, "Failed to exit BACO state!\n");
2749 int smu_baco_set_state(void *handle
, int state
)
2751 struct smu_context
*smu
= handle
;
2754 if (!smu
->pm_enabled
)
2758 mutex_lock(&smu
->mutex
);
2760 if (smu
->ppt_funcs
->baco_exit
)
2761 ret
= smu
->ppt_funcs
->baco_exit(smu
);
2763 mutex_unlock(&smu
->mutex
);
2764 } else if (state
== 1) {
2765 mutex_lock(&smu
->mutex
);
2767 if (smu
->ppt_funcs
->baco_enter
)
2768 ret
= smu
->ppt_funcs
->baco_enter(smu
);
2770 mutex_unlock(&smu
->mutex
);
2777 dev_err(smu
->adev
->dev
, "Failed to %s BACO state!\n",
2778 (state
)?"enter":"exit");
2783 bool smu_mode1_reset_is_support(struct smu_context
*smu
)
2787 if (!smu
->pm_enabled
)
2790 mutex_lock(&smu
->mutex
);
2792 if (smu
->ppt_funcs
&& smu
->ppt_funcs
->mode1_reset_is_support
)
2793 ret
= smu
->ppt_funcs
->mode1_reset_is_support(smu
);
2795 mutex_unlock(&smu
->mutex
);
2800 bool smu_mode2_reset_is_support(struct smu_context
*smu
)
2804 if (!smu
->pm_enabled
)
2807 mutex_lock(&smu
->mutex
);
2809 if (smu
->ppt_funcs
&& smu
->ppt_funcs
->mode2_reset_is_support
)
2810 ret
= smu
->ppt_funcs
->mode2_reset_is_support(smu
);
2812 mutex_unlock(&smu
->mutex
);
2817 int smu_mode1_reset(struct smu_context
*smu
)
2821 if (!smu
->pm_enabled
)
2824 mutex_lock(&smu
->mutex
);
2826 if (smu
->ppt_funcs
->mode1_reset
)
2827 ret
= smu
->ppt_funcs
->mode1_reset(smu
);
2829 mutex_unlock(&smu
->mutex
);
2834 int smu_mode2_reset(void *handle
)
2836 struct smu_context
*smu
= handle
;
2839 if (!smu
->pm_enabled
)
2842 mutex_lock(&smu
->mutex
);
2844 if (smu
->ppt_funcs
->mode2_reset
)
2845 ret
= smu
->ppt_funcs
->mode2_reset(smu
);
2847 mutex_unlock(&smu
->mutex
);
2850 dev_err(smu
->adev
->dev
, "Mode2 reset failed!\n");
2855 int smu_get_max_sustainable_clocks_by_dc(struct smu_context
*smu
,
2856 struct pp_smu_nv_clock_table
*max_clocks
)
2860 if (!smu
->pm_enabled
|| !smu
->adev
->pm
.dpm_enabled
)
2863 mutex_lock(&smu
->mutex
);
2865 if (smu
->ppt_funcs
->get_max_sustainable_clocks_by_dc
)
2866 ret
= smu
->ppt_funcs
->get_max_sustainable_clocks_by_dc(smu
, max_clocks
);
2868 mutex_unlock(&smu
->mutex
);
2873 int smu_get_uclk_dpm_states(struct smu_context
*smu
,
2874 unsigned int *clock_values_in_khz
,
2875 unsigned int *num_states
)
2879 if (!smu
->pm_enabled
|| !smu
->adev
->pm
.dpm_enabled
)
2882 mutex_lock(&smu
->mutex
);
2884 if (smu
->ppt_funcs
->get_uclk_dpm_states
)
2885 ret
= smu
->ppt_funcs
->get_uclk_dpm_states(smu
, clock_values_in_khz
, num_states
);
2887 mutex_unlock(&smu
->mutex
);
2892 enum amd_pm_state_type
smu_get_current_power_state(void *handle
)
2894 struct smu_context
*smu
= handle
;
2895 enum amd_pm_state_type pm_state
= POWER_STATE_TYPE_DEFAULT
;
2897 if (!smu
->pm_enabled
|| !smu
->adev
->pm
.dpm_enabled
)
2900 mutex_lock(&smu
->mutex
);
2902 if (smu
->ppt_funcs
->get_current_power_state
)
2903 pm_state
= smu
->ppt_funcs
->get_current_power_state(smu
);
2905 mutex_unlock(&smu
->mutex
);
2910 int smu_get_dpm_clock_table(struct smu_context
*smu
,
2911 struct dpm_clocks
*clock_table
)
2915 if (!smu
->pm_enabled
|| !smu
->adev
->pm
.dpm_enabled
)
2918 mutex_lock(&smu
->mutex
);
2920 if (smu
->ppt_funcs
->get_dpm_clock_table
)
2921 ret
= smu
->ppt_funcs
->get_dpm_clock_table(smu
, clock_table
);
2923 mutex_unlock(&smu
->mutex
);
2928 ssize_t
smu_sys_get_gpu_metrics(void *handle
, void **table
)
2930 struct smu_context
*smu
= handle
;
2933 if (!smu
->pm_enabled
|| !smu
->adev
->pm
.dpm_enabled
)
2936 if (!smu
->ppt_funcs
->get_gpu_metrics
)
2939 mutex_lock(&smu
->mutex
);
2941 size
= smu
->ppt_funcs
->get_gpu_metrics(smu
, table
);
2943 mutex_unlock(&smu
->mutex
);
2948 int smu_enable_mgpu_fan_boost(void *handle
)
2950 struct smu_context
*smu
= handle
;
2953 if (!smu
->pm_enabled
|| !smu
->adev
->pm
.dpm_enabled
)
2956 mutex_lock(&smu
->mutex
);
2958 if (smu
->ppt_funcs
->enable_mgpu_fan_boost
)
2959 ret
= smu
->ppt_funcs
->enable_mgpu_fan_boost(smu
);
2961 mutex_unlock(&smu
->mutex
);
2966 int smu_gfx_state_change_set(struct smu_context
*smu
, uint32_t state
)
2970 mutex_lock(&smu
->mutex
);
2971 if (smu
->ppt_funcs
->gfx_state_change_set
)
2972 ret
= smu
->ppt_funcs
->gfx_state_change_set(smu
, state
);
2973 mutex_unlock(&smu
->mutex
);
2978 int smu_set_light_sbr(struct smu_context
*smu
, bool enable
)
2982 mutex_lock(&smu
->mutex
);
2983 if (smu
->ppt_funcs
->set_light_sbr
)
2984 ret
= smu
->ppt_funcs
->set_light_sbr(smu
, enable
);
2985 mutex_unlock(&smu
->mutex
);
2991 static const struct amd_pm_funcs swsmu_pm_funcs
= {
2992 /* export for sysfs */
2993 .set_fan_control_mode
= smu_pp_set_fan_control_mode
,
2994 .get_fan_control_mode
= smu_get_fan_control_mode
,
2995 .set_fan_speed_percent
= smu_set_fan_speed_percent
,
2996 .get_fan_speed_percent
= smu_get_fan_speed_percent
,
2997 .force_performance_level
= smu_force_performance_level
,
2998 .read_sensor
= smu_read_sensor
,
2999 .get_performance_level
= smu_get_performance_level
,
3000 .get_current_power_state
= smu_get_current_power_state
,
3001 .get_fan_speed_rpm
= smu_get_fan_speed_rpm
,
3002 .set_fan_speed_rpm
= smu_set_fan_speed_rpm
,
3003 .get_pp_num_states
= smu_get_power_num_states
,
3004 .get_pp_table
= smu_sys_get_pp_table
,
3005 .set_pp_table
= smu_sys_set_pp_table
,
3006 .switch_power_profile
= smu_switch_power_profile
,
3007 /* export to amdgpu */
3008 .dispatch_tasks
= smu_handle_dpm_task
,
3009 .set_powergating_by_smu
= smu_dpm_set_power_gate
,
3010 .set_power_limit
= smu_set_power_limit
,
3011 .odn_edit_dpm_table
= smu_od_edit_dpm_table
,
3012 .set_mp1_state
= smu_set_mp1_state
,
3014 .get_sclk
= smu_get_sclk
,
3015 .get_mclk
= smu_get_mclk
,
3016 .enable_mgpu_fan_boost
= smu_enable_mgpu_fan_boost
,
3017 .get_asic_baco_capability
= smu_get_baco_capability
,
3018 .set_asic_baco_state
= smu_baco_set_state
,
3019 .get_ppfeature_status
= smu_sys_get_pp_feature_mask
,
3020 .set_ppfeature_status
= smu_sys_set_pp_feature_mask
,
3021 .asic_reset_mode_2
= smu_mode2_reset
,
3022 .set_df_cstate
= smu_set_df_cstate
,
3023 .set_xgmi_pstate
= smu_set_xgmi_pstate
,
3024 .get_gpu_metrics
= smu_sys_get_gpu_metrics
,
3025 .set_power_profile_mode
= smu_set_power_profile_mode
,
3026 .get_power_profile_mode
= smu_get_power_profile_mode
,
3027 .force_clock_level
= smu_force_ppclk_levels
,
3028 .print_clock_levels
= smu_print_ppclk_levels
,