2 * Copyright 2019 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/firmware.h>
27 #include "amdgpu_smu.h"
28 #include "soc15_common.h"
29 #include "smu_v11_0.h"
33 int smu_get_smc_version(struct smu_context
*smu
, uint32_t *if_version
, uint32_t *smu_version
)
37 if (!if_version
&& !smu_version
)
41 ret
= smu_send_smc_msg(smu
, SMU_MSG_GetDriverIfVersion
);
45 ret
= smu_read_smc_arg(smu
, if_version
);
51 ret
= smu_send_smc_msg(smu
, SMU_MSG_GetSmuVersion
);
55 ret
= smu_read_smc_arg(smu
, smu_version
);
63 int smu_dpm_set_power_gate(struct smu_context
*smu
, uint32_t block_type
,
69 case AMD_IP_BLOCK_TYPE_UVD
:
70 ret
= smu_dpm_set_uvd_enable(smu
, gate
);
72 case AMD_IP_BLOCK_TYPE_VCE
:
73 ret
= smu_dpm_set_vce_enable(smu
, gate
);
82 enum amd_pm_state_type
smu_get_current_power_state(struct smu_context
*smu
)
84 /* not support power state */
85 return POWER_STATE_TYPE_DEFAULT
;
88 int smu_get_power_num_states(struct smu_context
*smu
,
89 struct pp_states_info
*state_info
)
94 /* not support power state */
95 memset(state_info
, 0, sizeof(struct pp_states_info
));
101 int smu_common_read_sensor(struct smu_context
*smu
, enum amd_pp_sensors sensor
,
102 void *data
, uint32_t *size
)
107 case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK
:
108 *((uint32_t *)data
) = smu
->pstate_sclk
;
111 case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK
:
112 *((uint32_t *)data
) = smu
->pstate_mclk
;
115 case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK
:
116 ret
= smu_feature_get_enabled_mask(smu
, (uint32_t *)data
, 2);
130 int smu_update_table_with_arg(struct smu_context
*smu
, uint16_t table_id
, uint16_t exarg
,
131 void *table_data
, bool drv2smu
)
133 struct smu_table_context
*smu_table
= &smu
->smu_table
;
134 struct smu_table
*table
= NULL
;
136 uint32_t table_index
;
138 if (!table_data
|| table_id
>= smu_table
->table_count
)
141 table_index
= (exarg
<< 16) | table_id
;
143 table
= &smu_table
->tables
[table_id
];
146 memcpy(table
->cpu_addr
, table_data
, table
->size
);
148 ret
= smu_send_smc_msg_with_param(smu
, SMU_MSG_SetDriverDramAddrHigh
,
149 upper_32_bits(table
->mc_address
));
152 ret
= smu_send_smc_msg_with_param(smu
, SMU_MSG_SetDriverDramAddrLow
,
153 lower_32_bits(table
->mc_address
));
156 ret
= smu_send_smc_msg_with_param(smu
, drv2smu
?
157 SMU_MSG_TransferTableDram2Smu
:
158 SMU_MSG_TransferTableSmu2Dram
,
164 memcpy(table_data
, table
->cpu_addr
, table
->size
);
169 bool is_support_sw_smu(struct amdgpu_device
*adev
)
171 if (adev
->asic_type
== CHIP_VEGA20
)
172 return (amdgpu_dpm
== 2) ? true : false;
173 else if (adev
->asic_type
>= CHIP_NAVI10
)
179 int smu_sys_get_pp_table(struct smu_context
*smu
, void **table
)
181 struct smu_table_context
*smu_table
= &smu
->smu_table
;
183 if (!smu_table
->power_play_table
&& !smu_table
->hardcode_pptable
)
186 if (smu_table
->hardcode_pptable
)
187 *table
= smu_table
->hardcode_pptable
;
189 *table
= smu_table
->power_play_table
;
191 return smu_table
->power_play_table_size
;
194 int smu_sys_set_pp_table(struct smu_context
*smu
, void *buf
, size_t size
)
196 struct smu_table_context
*smu_table
= &smu
->smu_table
;
197 ATOM_COMMON_TABLE_HEADER
*header
= (ATOM_COMMON_TABLE_HEADER
*)buf
;
200 if (!smu
->pm_enabled
)
202 if (header
->usStructureSize
!= size
) {
203 pr_err("pp table size not matched !\n");
207 mutex_lock(&smu
->mutex
);
208 if (!smu_table
->hardcode_pptable
)
209 smu_table
->hardcode_pptable
= kzalloc(size
, GFP_KERNEL
);
210 if (!smu_table
->hardcode_pptable
) {
215 memcpy(smu_table
->hardcode_pptable
, buf
, size
);
216 smu_table
->power_play_table
= smu_table
->hardcode_pptable
;
217 smu_table
->power_play_table_size
= size
;
218 mutex_unlock(&smu
->mutex
);
220 ret
= smu_reset(smu
);
222 pr_info("smu reset failed, ret = %d\n", ret
);
227 mutex_unlock(&smu
->mutex
);
231 int smu_feature_init_dpm(struct smu_context
*smu
)
233 struct smu_feature
*feature
= &smu
->smu_feature
;
235 uint32_t allowed_feature_mask
[SMU_FEATURE_MAX
/32];
237 if (!smu
->pm_enabled
)
239 mutex_lock(&feature
->mutex
);
240 bitmap_zero(feature
->allowed
, SMU_FEATURE_MAX
);
241 mutex_unlock(&feature
->mutex
);
243 ret
= smu_get_allowed_feature_mask(smu
, allowed_feature_mask
,
248 mutex_lock(&feature
->mutex
);
249 bitmap_or(feature
->allowed
, feature
->allowed
,
250 (unsigned long *)allowed_feature_mask
,
251 feature
->feature_num
);
252 mutex_unlock(&feature
->mutex
);
257 int smu_feature_is_enabled(struct smu_context
*smu
, enum smu_feature_mask mask
)
259 struct smu_feature
*feature
= &smu
->smu_feature
;
263 feature_id
= smu_feature_get_index(smu
, mask
);
265 WARN_ON(feature_id
> feature
->feature_num
);
267 mutex_lock(&feature
->mutex
);
268 ret
= test_bit(feature_id
, feature
->enabled
);
269 mutex_unlock(&feature
->mutex
);
274 int smu_feature_set_enabled(struct smu_context
*smu
, enum smu_feature_mask mask
,
277 struct smu_feature
*feature
= &smu
->smu_feature
;
281 feature_id
= smu_feature_get_index(smu
, mask
);
283 WARN_ON(feature_id
> feature
->feature_num
);
285 mutex_lock(&feature
->mutex
);
286 ret
= smu_feature_update_enable_state(smu
, feature_id
, enable
);
291 test_and_set_bit(feature_id
, feature
->enabled
);
293 test_and_clear_bit(feature_id
, feature
->enabled
);
296 mutex_unlock(&feature
->mutex
);
301 int smu_feature_is_supported(struct smu_context
*smu
, enum smu_feature_mask mask
)
303 struct smu_feature
*feature
= &smu
->smu_feature
;
307 feature_id
= smu_feature_get_index(smu
, mask
);
309 WARN_ON(feature_id
> feature
->feature_num
);
311 mutex_lock(&feature
->mutex
);
312 ret
= test_bit(feature_id
, feature
->supported
);
313 mutex_unlock(&feature
->mutex
);
318 int smu_feature_set_supported(struct smu_context
*smu
,
319 enum smu_feature_mask mask
,
322 struct smu_feature
*feature
= &smu
->smu_feature
;
326 feature_id
= smu_feature_get_index(smu
, mask
);
328 WARN_ON(feature_id
> feature
->feature_num
);
330 mutex_lock(&feature
->mutex
);
332 test_and_set_bit(feature_id
, feature
->supported
);
334 test_and_clear_bit(feature_id
, feature
->supported
);
335 mutex_unlock(&feature
->mutex
);
340 static int smu_set_funcs(struct amdgpu_device
*adev
)
342 struct smu_context
*smu
= &adev
->smu
;
344 switch (adev
->asic_type
) {
347 if (adev
->pm
.pp_feature
& PP_OVERDRIVE_MASK
)
348 smu
->od_enabled
= true;
349 smu_v11_0_set_smu_funcs(smu
);
358 static int smu_early_init(void *handle
)
360 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
361 struct smu_context
*smu
= &adev
->smu
;
364 smu
->pm_enabled
= !!amdgpu_dpm
;
365 mutex_init(&smu
->mutex
);
367 return smu_set_funcs(adev
);
370 static int smu_late_init(void *handle
)
372 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
373 struct smu_context
*smu
= &adev
->smu
;
375 if (!smu
->pm_enabled
)
377 mutex_lock(&smu
->mutex
);
378 smu_handle_task(&adev
->smu
,
379 smu
->smu_dpm
.dpm_level
,
380 AMD_PP_TASK_COMPLETE_INIT
);
381 mutex_unlock(&smu
->mutex
);
386 int smu_get_atom_data_table(struct smu_context
*smu
, uint32_t table
,
387 uint16_t *size
, uint8_t *frev
, uint8_t *crev
,
390 struct amdgpu_device
*adev
= smu
->adev
;
393 if (!amdgpu_atom_parse_data_header(adev
->mode_info
.atom_context
, table
,
394 size
, frev
, crev
, &data_start
))
397 *addr
= (uint8_t *)adev
->mode_info
.atom_context
->bios
+ data_start
;
402 static int smu_initialize_pptable(struct smu_context
*smu
)
408 static int smu_smc_table_sw_init(struct smu_context
*smu
)
412 ret
= smu_initialize_pptable(smu
);
414 pr_err("Failed to init smu_initialize_pptable!\n");
419 * Create smu_table structure, and init smc tables such as
420 * TABLE_PPTABLE, TABLE_WATERMARKS, TABLE_SMU_METRICS, and etc.
422 ret
= smu_init_smc_tables(smu
);
424 pr_err("Failed to init smc tables!\n");
429 * Create smu_power_context structure, and allocate smu_dpm_context and
430 * context size to fill the smu_power_context data.
432 ret
= smu_init_power(smu
);
434 pr_err("Failed to init smu_init_power!\n");
441 static int smu_smc_table_sw_fini(struct smu_context
*smu
)
445 ret
= smu_fini_smc_tables(smu
);
447 pr_err("Failed to smu_fini_smc_tables!\n");
454 static int smu_sw_init(void *handle
)
456 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
457 struct smu_context
*smu
= &adev
->smu
;
460 smu
->pool_size
= adev
->pm
.smu_prv_buffer_size
;
461 smu
->smu_feature
.feature_num
= SMU_FEATURE_MAX
;
462 mutex_init(&smu
->smu_feature
.mutex
);
463 bitmap_zero(smu
->smu_feature
.supported
, SMU_FEATURE_MAX
);
464 bitmap_zero(smu
->smu_feature
.enabled
, SMU_FEATURE_MAX
);
465 bitmap_zero(smu
->smu_feature
.allowed
, SMU_FEATURE_MAX
);
466 smu
->watermarks_bitmap
= 0;
467 smu
->power_profile_mode
= PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT
;
468 smu
->default_power_profile_mode
= PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT
;
470 smu
->workload_mask
= 1 << smu
->workload_prority
[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT
];
471 smu
->workload_prority
[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT
] = 0;
472 smu
->workload_prority
[PP_SMC_POWER_PROFILE_FULLSCREEN3D
] = 1;
473 smu
->workload_prority
[PP_SMC_POWER_PROFILE_POWERSAVING
] = 2;
474 smu
->workload_prority
[PP_SMC_POWER_PROFILE_VIDEO
] = 3;
475 smu
->workload_prority
[PP_SMC_POWER_PROFILE_VR
] = 4;
476 smu
->workload_prority
[PP_SMC_POWER_PROFILE_COMPUTE
] = 5;
477 smu
->workload_prority
[PP_SMC_POWER_PROFILE_CUSTOM
] = 6;
479 smu
->workload_setting
[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT
;
480 smu
->workload_setting
[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D
;
481 smu
->workload_setting
[2] = PP_SMC_POWER_PROFILE_POWERSAVING
;
482 smu
->workload_setting
[3] = PP_SMC_POWER_PROFILE_VIDEO
;
483 smu
->workload_setting
[4] = PP_SMC_POWER_PROFILE_VR
;
484 smu
->workload_setting
[5] = PP_SMC_POWER_PROFILE_COMPUTE
;
485 smu
->workload_setting
[6] = PP_SMC_POWER_PROFILE_CUSTOM
;
486 smu
->display_config
= &adev
->pm
.pm_display_cfg
;
488 smu
->smu_dpm
.dpm_level
= AMD_DPM_FORCED_LEVEL_AUTO
;
489 smu
->smu_dpm
.requested_dpm_level
= AMD_DPM_FORCED_LEVEL_AUTO
;
490 ret
= smu_init_microcode(smu
);
492 pr_err("Failed to load smu firmware!\n");
496 ret
= smu_smc_table_sw_init(smu
);
498 pr_err("Failed to sw init smc table!\n");
505 static int smu_sw_fini(void *handle
)
507 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
508 struct smu_context
*smu
= &adev
->smu
;
511 ret
= smu_smc_table_sw_fini(smu
);
513 pr_err("Failed to sw fini smc table!\n");
517 ret
= smu_fini_power(smu
);
519 pr_err("Failed to init smu_fini_power!\n");
526 static int smu_init_fb_allocations(struct smu_context
*smu
)
528 struct amdgpu_device
*adev
= smu
->adev
;
529 struct smu_table_context
*smu_table
= &smu
->smu_table
;
530 struct smu_table
*tables
= smu_table
->tables
;
531 uint32_t table_count
= smu_table
->table_count
;
535 if (table_count
<= 0)
538 for (i
= 0 ; i
< table_count
; i
++) {
539 if (tables
[i
].size
== 0)
541 ret
= amdgpu_bo_create_kernel(adev
,
546 &tables
[i
].mc_address
,
547 &tables
[i
].cpu_addr
);
555 if (tables
[i
].size
== 0)
557 amdgpu_bo_free_kernel(&tables
[i
].bo
,
558 &tables
[i
].mc_address
,
559 &tables
[i
].cpu_addr
);
565 static int smu_fini_fb_allocations(struct smu_context
*smu
)
567 struct smu_table_context
*smu_table
= &smu
->smu_table
;
568 struct smu_table
*tables
= smu_table
->tables
;
569 uint32_t table_count
= smu_table
->table_count
;
572 if (table_count
== 0 || tables
== NULL
)
575 for (i
= 0 ; i
< table_count
; i
++) {
576 if (tables
[i
].size
== 0)
578 amdgpu_bo_free_kernel(&tables
[i
].bo
,
579 &tables
[i
].mc_address
,
580 &tables
[i
].cpu_addr
);
586 static int smu_override_pcie_parameters(struct smu_context
*smu
)
588 struct amdgpu_device
*adev
= smu
->adev
;
589 uint32_t pcie_gen
= 0, pcie_width
= 0, smu_pcie_arg
;
592 if (adev
->pm
.pcie_gen_mask
& CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4
)
594 else if (adev
->pm
.pcie_gen_mask
& CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3
)
596 else if (adev
->pm
.pcie_gen_mask
& CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2
)
598 else if (adev
->pm
.pcie_gen_mask
& CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1
)
601 /* Bit 31:16: LCLK DPM level. 0 is DPM0, and 1 is DPM1
602 * Bit 15:8: PCIE GEN, 0 to 3 corresponds to GEN1 to GEN4
603 * Bit 7:0: PCIE lane width, 1 to 7 corresponds is x1 to x32
605 if (adev
->pm
.pcie_mlw_mask
& CAIL_PCIE_LINK_WIDTH_SUPPORT_X16
)
607 else if (adev
->pm
.pcie_mlw_mask
& CAIL_PCIE_LINK_WIDTH_SUPPORT_X12
)
609 else if (adev
->pm
.pcie_mlw_mask
& CAIL_PCIE_LINK_WIDTH_SUPPORT_X8
)
611 else if (adev
->pm
.pcie_mlw_mask
& CAIL_PCIE_LINK_WIDTH_SUPPORT_X4
)
613 else if (adev
->pm
.pcie_mlw_mask
& CAIL_PCIE_LINK_WIDTH_SUPPORT_X2
)
615 else if (adev
->pm
.pcie_mlw_mask
& CAIL_PCIE_LINK_WIDTH_SUPPORT_X1
)
618 smu_pcie_arg
= (1 << 16) | (pcie_gen
<< 8) | pcie_width
;
619 ret
= smu_send_smc_msg_with_param(smu
,
620 SMU_MSG_OverridePcieParameters
,
623 pr_err("[%s] Attempt to override pcie params failed!\n", __func__
);
627 static int smu_smc_table_hw_init(struct smu_context
*smu
,
630 struct amdgpu_device
*adev
= smu
->adev
;
633 if (smu_is_dpm_running(smu
) && adev
->in_suspend
) {
634 pr_info("dpm has been enabled\n");
638 ret
= smu_init_display(smu
);
643 /* get boot_values from vbios to set revision, gfxclk, and etc. */
644 ret
= smu_get_vbios_bootup_values(smu
);
648 ret
= smu_setup_pptable(smu
);
653 * check if the format_revision in vbios is up to pptable header
654 * version, and the structure size is not 0.
656 ret
= smu_check_pptable(smu
);
661 * allocate vram bos to store smc table contents.
663 ret
= smu_init_fb_allocations(smu
);
668 * Parse pptable format and fill PPTable_t smc_pptable to
669 * smu_table_context structure. And read the smc_dpm_table from vbios,
670 * then fill it into smc_pptable.
672 ret
= smu_parse_pptable(smu
);
677 * Send msg GetDriverIfVersion to check if the return value is equal
678 * with DRIVER_IF_VERSION of smc header.
680 ret
= smu_check_fw_version(smu
);
686 * Copy pptable bo in the vram to smc with SMU MSGs such as
687 * SetDriverDramAddr and TransferTableDram2Smu.
689 ret
= smu_write_pptable(smu
);
693 /* issue RunAfllBtc msg */
694 ret
= smu_run_afll_btc(smu
);
698 ret
= smu_feature_set_allowed_mask(smu
);
702 ret
= smu_system_features_control(smu
, true);
706 ret
= smu_override_pcie_parameters(smu
);
710 ret
= smu_notify_display_change(smu
);
715 * Set min deep sleep dce fclk with bootup value from vbios via
716 * SetMinDeepSleepDcefclk MSG.
718 ret
= smu_set_min_dcef_deep_sleep(smu
);
723 * Set initialized values (get from vbios) to dpm tables context such as
724 * gfxclk, memclk, dcefclk, and etc. And enable the DPM feature for each
728 ret
= smu_populate_smc_pptable(smu
);
732 ret
= smu_init_max_sustainable_clocks(smu
);
737 ret
= smu_set_od8_default_settings(smu
, initialize
);
742 ret
= smu_populate_umd_state_clk(smu
);
746 ret
= smu_get_power_limit(smu
, &smu
->default_power_limit
, false);
752 * Set PMSTATUSLOG table bo address with SetToolsDramAddr MSG for tools.
754 ret
= smu_set_tool_table_location(smu
);
756 if (!smu_is_dpm_running(smu
))
757 pr_info("dpm has been disabled\n");
763 * smu_alloc_memory_pool - allocate memory pool in the system memory
765 * @smu: amdgpu_device pointer
767 * This memory pool will be used for SMC use and msg SetSystemVirtualDramAddr
768 * and DramLogSetDramAddr can notify it changed.
770 * Returns 0 on success, error on failure.
772 static int smu_alloc_memory_pool(struct smu_context
*smu
)
774 struct amdgpu_device
*adev
= smu
->adev
;
775 struct smu_table_context
*smu_table
= &smu
->smu_table
;
776 struct smu_table
*memory_pool
= &smu_table
->memory_pool
;
777 uint64_t pool_size
= smu
->pool_size
;
780 if (pool_size
== SMU_MEMORY_POOL_SIZE_ZERO
)
783 memory_pool
->size
= pool_size
;
784 memory_pool
->align
= PAGE_SIZE
;
785 memory_pool
->domain
= AMDGPU_GEM_DOMAIN_GTT
;
788 case SMU_MEMORY_POOL_SIZE_256_MB
:
789 case SMU_MEMORY_POOL_SIZE_512_MB
:
790 case SMU_MEMORY_POOL_SIZE_1_GB
:
791 case SMU_MEMORY_POOL_SIZE_2_GB
:
792 ret
= amdgpu_bo_create_kernel(adev
,
797 &memory_pool
->mc_address
,
798 &memory_pool
->cpu_addr
);
807 static int smu_free_memory_pool(struct smu_context
*smu
)
809 struct smu_table_context
*smu_table
= &smu
->smu_table
;
810 struct smu_table
*memory_pool
= &smu_table
->memory_pool
;
813 if (memory_pool
->size
== SMU_MEMORY_POOL_SIZE_ZERO
)
816 amdgpu_bo_free_kernel(&memory_pool
->bo
,
817 &memory_pool
->mc_address
,
818 &memory_pool
->cpu_addr
);
820 memset(memory_pool
, 0, sizeof(struct smu_table
));
825 static int smu_hw_init(void *handle
)
828 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
829 struct smu_context
*smu
= &adev
->smu
;
831 if (adev
->firmware
.load_type
== AMDGPU_FW_LOAD_PSP
) {
832 ret
= smu_check_fw_status(smu
);
834 pr_err("SMC firmware status is not correct\n");
839 mutex_lock(&smu
->mutex
);
841 ret
= smu_feature_init_dpm(smu
);
845 ret
= smu_smc_table_hw_init(smu
, true);
849 ret
= smu_alloc_memory_pool(smu
);
854 * Use msg SetSystemVirtualDramAddr and DramLogSetDramAddr can notify
857 ret
= smu_notify_memory_pool_location(smu
);
861 ret
= smu_start_thermal_control(smu
);
865 mutex_unlock(&smu
->mutex
);
867 if (!smu
->pm_enabled
)
868 adev
->pm
.dpm_enabled
= false;
870 adev
->pm
.dpm_enabled
= true;
872 pr_info("SMU is initialized successfully!\n");
877 mutex_unlock(&smu
->mutex
);
881 static int smu_hw_fini(void *handle
)
883 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
884 struct smu_context
*smu
= &adev
->smu
;
885 struct smu_table_context
*table_context
= &smu
->smu_table
;
888 kfree(table_context
->driver_pptable
);
889 table_context
->driver_pptable
= NULL
;
891 kfree(table_context
->max_sustainable_clocks
);
892 table_context
->max_sustainable_clocks
= NULL
;
894 kfree(table_context
->od_feature_capabilities
);
895 table_context
->od_feature_capabilities
= NULL
;
897 kfree(table_context
->od_settings_max
);
898 table_context
->od_settings_max
= NULL
;
900 kfree(table_context
->od_settings_min
);
901 table_context
->od_settings_min
= NULL
;
903 kfree(table_context
->overdrive_table
);
904 table_context
->overdrive_table
= NULL
;
906 kfree(table_context
->od8_settings
);
907 table_context
->od8_settings
= NULL
;
909 ret
= smu_fini_fb_allocations(smu
);
913 ret
= smu_free_memory_pool(smu
);
920 int smu_reset(struct smu_context
*smu
)
922 struct amdgpu_device
*adev
= smu
->adev
;
925 ret
= smu_hw_fini(adev
);
929 ret
= smu_hw_init(adev
);
936 static int smu_suspend(void *handle
)
939 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
940 struct smu_context
*smu
= &adev
->smu
;
942 ret
= smu_system_features_control(smu
, false);
946 smu
->watermarks_bitmap
&= ~(WATERMARKS_LOADED
);
951 static int smu_resume(void *handle
)
954 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
955 struct smu_context
*smu
= &adev
->smu
;
957 pr_info("SMU is resuming...\n");
959 mutex_lock(&smu
->mutex
);
961 ret
= smu_smc_table_hw_init(smu
, false);
965 ret
= smu_start_thermal_control(smu
);
969 mutex_unlock(&smu
->mutex
);
971 pr_info("SMU is resumed successfully!\n");
975 mutex_unlock(&smu
->mutex
);
979 int smu_display_configuration_change(struct smu_context
*smu
,
980 const struct amd_pp_display_configuration
*display_config
)
983 int num_of_active_display
= 0;
985 if (!smu
->pm_enabled
|| !is_support_sw_smu(smu
->adev
))
991 mutex_lock(&smu
->mutex
);
993 smu_set_deep_sleep_dcefclk(smu
,
994 display_config
->min_dcef_deep_sleep_set_clk
/ 100);
996 for (index
= 0; index
< display_config
->num_path_including_non_display
; index
++) {
997 if (display_config
->displays
[index
].controller_id
!= 0)
998 num_of_active_display
++;
1001 smu_set_active_display_count(smu
, num_of_active_display
);
1003 smu_store_cc6_data(smu
, display_config
->cpu_pstate_separation_time
,
1004 display_config
->cpu_cc6_disable
,
1005 display_config
->cpu_pstate_disable
,
1006 display_config
->nb_pstate_switch_disable
);
1008 mutex_unlock(&smu
->mutex
);
1013 static int smu_get_clock_info(struct smu_context
*smu
,
1014 struct smu_clock_info
*clk_info
,
1015 enum smu_perf_level_designation designation
)
1018 struct smu_performance_level level
= {0};
1023 ret
= smu_get_perf_level(smu
, PERF_LEVEL_ACTIVITY
, &level
);
1027 clk_info
->min_mem_clk
= level
.memory_clock
;
1028 clk_info
->min_eng_clk
= level
.core_clock
;
1029 clk_info
->min_bus_bandwidth
= level
.non_local_mem_freq
* level
.non_local_mem_width
;
1031 ret
= smu_get_perf_level(smu
, designation
, &level
);
1035 clk_info
->min_mem_clk
= level
.memory_clock
;
1036 clk_info
->min_eng_clk
= level
.core_clock
;
1037 clk_info
->min_bus_bandwidth
= level
.non_local_mem_freq
* level
.non_local_mem_width
;
1042 int smu_get_current_clocks(struct smu_context
*smu
,
1043 struct amd_pp_clock_info
*clocks
)
1045 struct amd_pp_simple_clock_info simple_clocks
= {0};
1046 struct smu_clock_info hw_clocks
;
1049 if (!is_support_sw_smu(smu
->adev
))
1052 mutex_lock(&smu
->mutex
);
1054 smu_get_dal_power_level(smu
, &simple_clocks
);
1056 if (smu
->support_power_containment
)
1057 ret
= smu_get_clock_info(smu
, &hw_clocks
,
1058 PERF_LEVEL_POWER_CONTAINMENT
);
1060 ret
= smu_get_clock_info(smu
, &hw_clocks
, PERF_LEVEL_ACTIVITY
);
1063 pr_err("Error in smu_get_clock_info\n");
1067 clocks
->min_engine_clock
= hw_clocks
.min_eng_clk
;
1068 clocks
->max_engine_clock
= hw_clocks
.max_eng_clk
;
1069 clocks
->min_memory_clock
= hw_clocks
.min_mem_clk
;
1070 clocks
->max_memory_clock
= hw_clocks
.max_mem_clk
;
1071 clocks
->min_bus_bandwidth
= hw_clocks
.min_bus_bandwidth
;
1072 clocks
->max_bus_bandwidth
= hw_clocks
.max_bus_bandwidth
;
1073 clocks
->max_engine_clock_in_sr
= hw_clocks
.max_eng_clk
;
1074 clocks
->min_engine_clock_in_sr
= hw_clocks
.min_eng_clk
;
1076 if (simple_clocks
.level
== 0)
1077 clocks
->max_clocks_state
= PP_DAL_POWERLEVEL_7
;
1079 clocks
->max_clocks_state
= simple_clocks
.level
;
1081 if (!smu_get_current_shallow_sleep_clocks(smu
, &hw_clocks
)) {
1082 clocks
->max_engine_clock_in_sr
= hw_clocks
.max_eng_clk
;
1083 clocks
->min_engine_clock_in_sr
= hw_clocks
.min_eng_clk
;
1087 mutex_unlock(&smu
->mutex
);
1091 static int smu_set_clockgating_state(void *handle
,
1092 enum amd_clockgating_state state
)
1097 static int smu_set_powergating_state(void *handle
,
1098 enum amd_powergating_state state
)
1103 static int smu_enable_umd_pstate(void *handle
,
1104 enum amd_dpm_forced_level
*level
)
1106 uint32_t profile_mode_mask
= AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD
|
1107 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK
|
1108 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK
|
1109 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK
;
1111 struct smu_context
*smu
= (struct smu_context
*)(handle
);
1112 struct smu_dpm_context
*smu_dpm_ctx
= &(smu
->smu_dpm
);
1113 if (!smu
->pm_enabled
|| !smu_dpm_ctx
->dpm_context
)
1116 if (!(smu_dpm_ctx
->dpm_level
& profile_mode_mask
)) {
1117 /* enter umd pstate, save current level, disable gfx cg*/
1118 if (*level
& profile_mode_mask
) {
1119 smu_dpm_ctx
->saved_dpm_level
= smu_dpm_ctx
->dpm_level
;
1120 smu_dpm_ctx
->enable_umd_pstate
= true;
1121 amdgpu_device_ip_set_clockgating_state(smu
->adev
,
1122 AMD_IP_BLOCK_TYPE_GFX
,
1123 AMD_CG_STATE_UNGATE
);
1124 amdgpu_device_ip_set_powergating_state(smu
->adev
,
1125 AMD_IP_BLOCK_TYPE_GFX
,
1126 AMD_PG_STATE_UNGATE
);
1129 /* exit umd pstate, restore level, enable gfx cg*/
1130 if (!(*level
& profile_mode_mask
)) {
1131 if (*level
== AMD_DPM_FORCED_LEVEL_PROFILE_EXIT
)
1132 *level
= smu_dpm_ctx
->saved_dpm_level
;
1133 smu_dpm_ctx
->enable_umd_pstate
= false;
1134 amdgpu_device_ip_set_clockgating_state(smu
->adev
,
1135 AMD_IP_BLOCK_TYPE_GFX
,
1137 amdgpu_device_ip_set_powergating_state(smu
->adev
,
1138 AMD_IP_BLOCK_TYPE_GFX
,
1146 int smu_adjust_power_state_dynamic(struct smu_context
*smu
,
1147 enum amd_dpm_forced_level level
,
1148 bool skip_display_settings
)
1152 uint32_t sclk_mask
, mclk_mask
, soc_mask
;
1154 struct smu_dpm_context
*smu_dpm_ctx
= &(smu
->smu_dpm
);
1156 if (!smu
->pm_enabled
)
1158 if (!skip_display_settings
) {
1159 ret
= smu_display_config_changed(smu
);
1161 pr_err("Failed to change display config!");
1166 if (!smu
->pm_enabled
)
1168 ret
= smu_apply_clocks_adjust_rules(smu
);
1170 pr_err("Failed to apply clocks adjust rules!");
1174 if (!skip_display_settings
) {
1175 ret
= smu_notify_smc_dispaly_config(smu
);
1177 pr_err("Failed to notify smc display config!");
1182 if (smu_dpm_ctx
->dpm_level
!= level
) {
1184 case AMD_DPM_FORCED_LEVEL_HIGH
:
1185 ret
= smu_force_dpm_limit_value(smu
, true);
1187 case AMD_DPM_FORCED_LEVEL_LOW
:
1188 ret
= smu_force_dpm_limit_value(smu
, false);
1191 case AMD_DPM_FORCED_LEVEL_AUTO
:
1192 ret
= smu_unforce_dpm_levels(smu
);
1195 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD
:
1196 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK
:
1197 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK
:
1198 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK
:
1199 ret
= smu_get_profiling_clk_mask(smu
, level
,
1205 smu_force_clk_levels(smu
, PP_SCLK
, 1 << sclk_mask
);
1206 smu_force_clk_levels(smu
, PP_MCLK
, 1 << mclk_mask
);
1209 case AMD_DPM_FORCED_LEVEL_MANUAL
:
1210 case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT
:
1216 smu_dpm_ctx
->dpm_level
= level
;
1219 if (smu_dpm_ctx
->dpm_level
!= AMD_DPM_FORCED_LEVEL_MANUAL
) {
1220 index
= fls(smu
->workload_mask
);
1221 index
= index
> 0 && index
<= WORKLOAD_POLICY_MAX
? index
- 1 : 0;
1222 workload
= smu
->workload_setting
[index
];
1224 if (smu
->power_profile_mode
!= workload
)
1225 smu_set_power_profile_mode(smu
, &workload
, 0);
1231 int smu_handle_task(struct smu_context
*smu
,
1232 enum amd_dpm_forced_level level
,
1233 enum amd_pp_task task_id
)
1238 case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE
:
1239 ret
= smu_pre_display_config_changed(smu
);
1242 ret
= smu_set_cpu_power_state(smu
);
1245 ret
= smu_adjust_power_state_dynamic(smu
, level
, false);
1247 case AMD_PP_TASK_COMPLETE_INIT
:
1248 case AMD_PP_TASK_READJUST_POWER_STATE
:
1249 ret
= smu_adjust_power_state_dynamic(smu
, level
, true);
1258 const struct amd_ip_funcs smu_ip_funcs
= {
1260 .early_init
= smu_early_init
,
1261 .late_init
= smu_late_init
,
1262 .sw_init
= smu_sw_init
,
1263 .sw_fini
= smu_sw_fini
,
1264 .hw_init
= smu_hw_init
,
1265 .hw_fini
= smu_hw_fini
,
1266 .suspend
= smu_suspend
,
1267 .resume
= smu_resume
,
1269 .check_soft_reset
= NULL
,
1270 .wait_for_idle
= NULL
,
1272 .set_clockgating_state
= smu_set_clockgating_state
,
1273 .set_powergating_state
= smu_set_powergating_state
,
1274 .enable_umd_pstate
= smu_enable_umd_pstate
,
1277 const struct amdgpu_ip_block_version smu_v11_0_ip_block
=
1279 .type
= AMD_IP_BLOCK_TYPE_SMC
,
1283 .funcs
= &smu_ip_funcs
,