2 * Copyright 2019 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/firmware.h>
24 #include <linux/module.h>
25 #include <linux/pci.h>
27 #define SMU_11_0_PARTIAL_PPTABLE
30 #include "amdgpu_smu.h"
31 #include "smu_internal.h"
32 #include "atomfirmware.h"
33 #include "amdgpu_atomfirmware.h"
34 #include "smu_v11_0.h"
35 #include "smu_v11_0_pptable.h"
36 #include "soc15_common.h"
39 #include "amdgpu_ras.h"
41 #include "asic_reg/thm/thm_11_0_2_offset.h"
42 #include "asic_reg/thm/thm_11_0_2_sh_mask.h"
43 #include "asic_reg/mp/mp_11_0_offset.h"
44 #include "asic_reg/mp/mp_11_0_sh_mask.h"
45 #include "asic_reg/smuio/smuio_11_0_0_offset.h"
46 #include "asic_reg/smuio/smuio_11_0_0_sh_mask.h"
48 MODULE_FIRMWARE("amdgpu/vega20_smc.bin");
49 MODULE_FIRMWARE("amdgpu/arcturus_smc.bin");
50 MODULE_FIRMWARE("amdgpu/navi10_smc.bin");
51 MODULE_FIRMWARE("amdgpu/navi14_smc.bin");
52 MODULE_FIRMWARE("amdgpu/navi12_smc.bin");
54 #define SMU11_VOLTAGE_SCALE 4
56 static int smu_v11_0_send_msg_without_waiting(struct smu_context
*smu
,
59 struct amdgpu_device
*adev
= smu
->adev
;
60 WREG32_SOC15_NO_KIQ(MP1
, 0, mmMP1_SMN_C2PMSG_66
, msg
);
64 static int smu_v11_0_read_arg(struct smu_context
*smu
, uint32_t *arg
)
66 struct amdgpu_device
*adev
= smu
->adev
;
68 *arg
= RREG32_SOC15_NO_KIQ(MP1
, 0, mmMP1_SMN_C2PMSG_82
);
72 static int smu_v11_0_wait_for_response(struct smu_context
*smu
)
74 struct amdgpu_device
*adev
= smu
->adev
;
75 uint32_t cur_value
, i
, timeout
= adev
->usec_timeout
* 10;
77 for (i
= 0; i
< timeout
; i
++) {
78 cur_value
= RREG32_SOC15_NO_KIQ(MP1
, 0, mmMP1_SMN_C2PMSG_90
);
79 if ((cur_value
& MP1_C2PMSG_90__CONTENT_MASK
) != 0)
80 return cur_value
== 0x1 ? 0 : -EIO
;
85 /* timeout means wrong logic */
89 return RREG32_SOC15_NO_KIQ(MP1
, 0, mmMP1_SMN_C2PMSG_90
) == 0x1 ? 0 : -EIO
;
93 smu_v11_0_send_msg_with_param(struct smu_context
*smu
,
94 enum smu_message_type msg
,
98 struct amdgpu_device
*adev
= smu
->adev
;
99 int ret
= 0, index
= 0;
101 index
= smu_msg_get_index(smu
, msg
);
105 mutex_lock(&smu
->message_lock
);
106 ret
= smu_v11_0_wait_for_response(smu
);
108 pr_err("Msg issuing pre-check failed and "
109 "SMU may be not in the right state!\n");
113 WREG32_SOC15_NO_KIQ(MP1
, 0, mmMP1_SMN_C2PMSG_90
, 0);
115 WREG32_SOC15_NO_KIQ(MP1
, 0, mmMP1_SMN_C2PMSG_82
, param
);
117 smu_v11_0_send_msg_without_waiting(smu
, (uint16_t)index
);
119 ret
= smu_v11_0_wait_for_response(smu
);
121 pr_err("failed send message: %10s (%d) \tparam: 0x%08x response %#x\n",
122 smu_get_message_name(smu
, msg
), index
, param
, ret
);
127 ret
= smu_v11_0_read_arg(smu
, read_arg
);
129 pr_err("failed to read message arg: %10s (%d) \tparam: 0x%08x response %#x\n",
130 smu_get_message_name(smu
, msg
), index
, param
, ret
);
135 mutex_unlock(&smu
->message_lock
);
139 int smu_v11_0_init_microcode(struct smu_context
*smu
)
141 struct amdgpu_device
*adev
= smu
->adev
;
142 const char *chip_name
;
145 const struct smc_firmware_header_v1_0
*hdr
;
146 const struct common_firmware_header
*header
;
147 struct amdgpu_firmware_info
*ucode
= NULL
;
149 switch (adev
->asic_type
) {
151 chip_name
= "vega20";
154 chip_name
= "arcturus";
157 chip_name
= "navi10";
160 chip_name
= "navi14";
163 chip_name
= "navi12";
169 snprintf(fw_name
, sizeof(fw_name
), "amdgpu/%s_smc.bin", chip_name
);
171 err
= request_firmware(&adev
->pm
.fw
, fw_name
, adev
->dev
);
174 err
= amdgpu_ucode_validate(adev
->pm
.fw
);
178 hdr
= (const struct smc_firmware_header_v1_0
*) adev
->pm
.fw
->data
;
179 amdgpu_ucode_print_smc_hdr(&hdr
->header
);
180 adev
->pm
.fw_version
= le32_to_cpu(hdr
->header
.ucode_version
);
182 if (adev
->firmware
.load_type
== AMDGPU_FW_LOAD_PSP
) {
183 ucode
= &adev
->firmware
.ucode
[AMDGPU_UCODE_ID_SMC
];
184 ucode
->ucode_id
= AMDGPU_UCODE_ID_SMC
;
185 ucode
->fw
= adev
->pm
.fw
;
186 header
= (const struct common_firmware_header
*)ucode
->fw
->data
;
187 adev
->firmware
.fw_size
+=
188 ALIGN(le32_to_cpu(header
->ucode_size_bytes
), PAGE_SIZE
);
193 DRM_ERROR("smu_v11_0: Failed to load firmware \"%s\"\n",
195 release_firmware(adev
->pm
.fw
);
201 int smu_v11_0_load_microcode(struct smu_context
*smu
)
203 struct amdgpu_device
*adev
= smu
->adev
;
205 const struct smc_firmware_header_v1_0
*hdr
;
206 uint32_t addr_start
= MP1_SRAM
;
208 uint32_t smc_fw_size
;
209 uint32_t mp1_fw_flags
;
211 hdr
= (const struct smc_firmware_header_v1_0
*) adev
->pm
.fw
->data
;
212 src
= (const uint32_t *)(adev
->pm
.fw
->data
+
213 le32_to_cpu(hdr
->header
.ucode_array_offset_bytes
));
214 smc_fw_size
= hdr
->header
.ucode_size_bytes
;
216 for (i
= 1; i
< smc_fw_size
/4 - 1; i
++) {
217 WREG32_PCIE(addr_start
, src
[i
]);
221 WREG32_PCIE(MP1_Public
| (smnMP1_PUB_CTRL
& 0xffffffff),
222 1 & MP1_SMN_PUB_CTRL__RESET_MASK
);
223 WREG32_PCIE(MP1_Public
| (smnMP1_PUB_CTRL
& 0xffffffff),
224 1 & ~MP1_SMN_PUB_CTRL__RESET_MASK
);
226 for (i
= 0; i
< adev
->usec_timeout
; i
++) {
227 mp1_fw_flags
= RREG32_PCIE(MP1_Public
|
228 (smnMP1_FIRMWARE_FLAGS
& 0xffffffff));
229 if ((mp1_fw_flags
& MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK
) >>
230 MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT
)
235 if (i
== adev
->usec_timeout
)
241 int smu_v11_0_check_fw_status(struct smu_context
*smu
)
243 struct amdgpu_device
*adev
= smu
->adev
;
244 uint32_t mp1_fw_flags
;
246 mp1_fw_flags
= RREG32_PCIE(MP1_Public
|
247 (smnMP1_FIRMWARE_FLAGS
& 0xffffffff));
249 if ((mp1_fw_flags
& MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK
) >>
250 MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT
)
256 int smu_v11_0_check_fw_version(struct smu_context
*smu
)
258 uint32_t if_version
= 0xff, smu_version
= 0xff;
260 uint8_t smu_minor
, smu_debug
;
263 ret
= smu_get_smc_version(smu
, &if_version
, &smu_version
);
267 smu_major
= (smu_version
>> 16) & 0xffff;
268 smu_minor
= (smu_version
>> 8) & 0xff;
269 smu_debug
= (smu_version
>> 0) & 0xff;
271 switch (smu
->adev
->asic_type
) {
273 smu
->smc_driver_if_version
= SMU11_DRIVER_IF_VERSION_VG20
;
276 smu
->smc_driver_if_version
= SMU11_DRIVER_IF_VERSION_ARCT
;
279 smu
->smc_driver_if_version
= SMU11_DRIVER_IF_VERSION_NV10
;
282 smu
->smc_driver_if_version
= SMU11_DRIVER_IF_VERSION_NV12
;
285 smu
->smc_driver_if_version
= SMU11_DRIVER_IF_VERSION_NV14
;
288 pr_err("smu unsupported asic type:%d.\n", smu
->adev
->asic_type
);
289 smu
->smc_driver_if_version
= SMU11_DRIVER_IF_VERSION_INV
;
294 * 1. if_version mismatch is not critical as our fw is designed
295 * to be backward compatible.
296 * 2. New fw usually brings some optimizations. But that's visible
297 * only on the paired driver.
298 * Considering above, we just leave user a warning message instead
299 * of halt driver loading.
301 if (if_version
!= smu
->smc_driver_if_version
) {
302 pr_info("smu driver if version = 0x%08x, smu fw if version = 0x%08x, "
303 "smu fw version = 0x%08x (%d.%d.%d)\n",
304 smu
->smc_driver_if_version
, if_version
,
305 smu_version
, smu_major
, smu_minor
, smu_debug
);
306 pr_warn("SMU driver if version not matched\n");
312 static int smu_v11_0_set_pptable_v2_0(struct smu_context
*smu
, void **table
, uint32_t *size
)
314 struct amdgpu_device
*adev
= smu
->adev
;
315 uint32_t ppt_offset_bytes
;
316 const struct smc_firmware_header_v2_0
*v2
;
318 v2
= (const struct smc_firmware_header_v2_0
*) adev
->pm
.fw
->data
;
320 ppt_offset_bytes
= le32_to_cpu(v2
->ppt_offset_bytes
);
321 *size
= le32_to_cpu(v2
->ppt_size_bytes
);
322 *table
= (uint8_t *)v2
+ ppt_offset_bytes
;
327 static int smu_v11_0_set_pptable_v2_1(struct smu_context
*smu
, void **table
,
328 uint32_t *size
, uint32_t pptable_id
)
330 struct amdgpu_device
*adev
= smu
->adev
;
331 const struct smc_firmware_header_v2_1
*v2_1
;
332 struct smc_soft_pptable_entry
*entries
;
333 uint32_t pptable_count
= 0;
336 v2_1
= (const struct smc_firmware_header_v2_1
*) adev
->pm
.fw
->data
;
337 entries
= (struct smc_soft_pptable_entry
*)
338 ((uint8_t *)v2_1
+ le32_to_cpu(v2_1
->pptable_entry_offset
));
339 pptable_count
= le32_to_cpu(v2_1
->pptable_count
);
340 for (i
= 0; i
< pptable_count
; i
++) {
341 if (le32_to_cpu(entries
[i
].id
) == pptable_id
) {
342 *table
= ((uint8_t *)v2_1
+ le32_to_cpu(entries
[i
].ppt_offset_bytes
));
343 *size
= le32_to_cpu(entries
[i
].ppt_size_bytes
);
348 if (i
== pptable_count
)
354 int smu_v11_0_setup_pptable(struct smu_context
*smu
)
356 struct amdgpu_device
*adev
= smu
->adev
;
357 const struct smc_firmware_header_v1_0
*hdr
;
360 uint16_t atom_table_size
;
363 uint16_t version_major
, version_minor
;
365 hdr
= (const struct smc_firmware_header_v1_0
*) adev
->pm
.fw
->data
;
366 version_major
= le16_to_cpu(hdr
->header
.header_version_major
);
367 version_minor
= le16_to_cpu(hdr
->header
.header_version_minor
);
368 if (version_major
== 2 && smu
->smu_table
.boot_values
.pp_table_id
> 0) {
369 pr_info("use driver provided pptable %d\n", smu
->smu_table
.boot_values
.pp_table_id
);
370 switch (version_minor
) {
372 ret
= smu_v11_0_set_pptable_v2_0(smu
, &table
, &size
);
375 ret
= smu_v11_0_set_pptable_v2_1(smu
, &table
, &size
,
376 smu
->smu_table
.boot_values
.pp_table_id
);
386 pr_info("use vbios provided pptable\n");
387 index
= get_index_into_master_table(atom_master_list_of_data_tables_v2_1
,
390 ret
= smu_get_atom_data_table(smu
, index
, &atom_table_size
, &frev
, &crev
,
394 size
= atom_table_size
;
397 if (!smu
->smu_table
.power_play_table
)
398 smu
->smu_table
.power_play_table
= table
;
399 if (!smu
->smu_table
.power_play_table_size
)
400 smu
->smu_table
.power_play_table_size
= size
;
405 static int smu_v11_0_init_dpm_context(struct smu_context
*smu
)
407 struct smu_dpm_context
*smu_dpm
= &smu
->smu_dpm
;
409 if (smu_dpm
->dpm_context
|| smu_dpm
->dpm_context_size
!= 0)
412 return smu_alloc_dpm_context(smu
);
415 static int smu_v11_0_fini_dpm_context(struct smu_context
*smu
)
417 struct smu_dpm_context
*smu_dpm
= &smu
->smu_dpm
;
419 if (!smu_dpm
->dpm_context
|| smu_dpm
->dpm_context_size
== 0)
422 kfree(smu_dpm
->dpm_context
);
423 kfree(smu_dpm
->golden_dpm_context
);
424 kfree(smu_dpm
->dpm_current_power_state
);
425 kfree(smu_dpm
->dpm_request_power_state
);
426 smu_dpm
->dpm_context
= NULL
;
427 smu_dpm
->golden_dpm_context
= NULL
;
428 smu_dpm
->dpm_context_size
= 0;
429 smu_dpm
->dpm_current_power_state
= NULL
;
430 smu_dpm
->dpm_request_power_state
= NULL
;
435 int smu_v11_0_init_smc_tables(struct smu_context
*smu
)
437 struct smu_table_context
*smu_table
= &smu
->smu_table
;
438 struct smu_table
*tables
= NULL
;
441 if (smu_table
->tables
)
444 tables
= kcalloc(SMU_TABLE_COUNT
, sizeof(struct smu_table
),
449 smu_table
->tables
= tables
;
451 ret
= smu_tables_init(smu
, tables
);
455 ret
= smu_v11_0_init_dpm_context(smu
);
462 int smu_v11_0_fini_smc_tables(struct smu_context
*smu
)
464 struct smu_table_context
*smu_table
= &smu
->smu_table
;
467 if (!smu_table
->tables
)
470 kfree(smu_table
->tables
);
471 kfree(smu_table
->metrics_table
);
472 kfree(smu_table
->watermarks_table
);
473 smu_table
->tables
= NULL
;
474 smu_table
->metrics_table
= NULL
;
475 smu_table
->watermarks_table
= NULL
;
476 smu_table
->metrics_time
= 0;
478 ret
= smu_v11_0_fini_dpm_context(smu
);
484 int smu_v11_0_init_power(struct smu_context
*smu
)
486 struct smu_power_context
*smu_power
= &smu
->smu_power
;
488 if (smu_power
->power_context
|| smu_power
->power_context_size
!= 0)
491 smu_power
->power_context
= kzalloc(sizeof(struct smu_11_0_dpm_context
),
493 if (!smu_power
->power_context
)
495 smu_power
->power_context_size
= sizeof(struct smu_11_0_dpm_context
);
500 int smu_v11_0_fini_power(struct smu_context
*smu
)
502 struct smu_power_context
*smu_power
= &smu
->smu_power
;
504 if (!smu_power
->power_context
|| smu_power
->power_context_size
== 0)
507 kfree(smu_power
->power_context
);
508 smu_power
->power_context
= NULL
;
509 smu_power
->power_context_size
= 0;
514 int smu_v11_0_get_vbios_bootup_values(struct smu_context
*smu
)
519 struct atom_common_table_header
*header
;
520 struct atom_firmware_info_v3_3
*v_3_3
;
521 struct atom_firmware_info_v3_1
*v_3_1
;
523 index
= get_index_into_master_table(atom_master_list_of_data_tables_v2_1
,
526 ret
= smu_get_atom_data_table(smu
, index
, &size
, &frev
, &crev
,
527 (uint8_t **)&header
);
531 if (header
->format_revision
!= 3) {
532 pr_err("unknown atom_firmware_info version! for smu11\n");
536 switch (header
->content_revision
) {
540 v_3_1
= (struct atom_firmware_info_v3_1
*)header
;
541 smu
->smu_table
.boot_values
.revision
= v_3_1
->firmware_revision
;
542 smu
->smu_table
.boot_values
.gfxclk
= v_3_1
->bootup_sclk_in10khz
;
543 smu
->smu_table
.boot_values
.uclk
= v_3_1
->bootup_mclk_in10khz
;
544 smu
->smu_table
.boot_values
.socclk
= 0;
545 smu
->smu_table
.boot_values
.dcefclk
= 0;
546 smu
->smu_table
.boot_values
.vddc
= v_3_1
->bootup_vddc_mv
;
547 smu
->smu_table
.boot_values
.vddci
= v_3_1
->bootup_vddci_mv
;
548 smu
->smu_table
.boot_values
.mvddc
= v_3_1
->bootup_mvddc_mv
;
549 smu
->smu_table
.boot_values
.vdd_gfx
= v_3_1
->bootup_vddgfx_mv
;
550 smu
->smu_table
.boot_values
.cooling_id
= v_3_1
->coolingsolution_id
;
551 smu
->smu_table
.boot_values
.pp_table_id
= 0;
555 v_3_3
= (struct atom_firmware_info_v3_3
*)header
;
556 smu
->smu_table
.boot_values
.revision
= v_3_3
->firmware_revision
;
557 smu
->smu_table
.boot_values
.gfxclk
= v_3_3
->bootup_sclk_in10khz
;
558 smu
->smu_table
.boot_values
.uclk
= v_3_3
->bootup_mclk_in10khz
;
559 smu
->smu_table
.boot_values
.socclk
= 0;
560 smu
->smu_table
.boot_values
.dcefclk
= 0;
561 smu
->smu_table
.boot_values
.vddc
= v_3_3
->bootup_vddc_mv
;
562 smu
->smu_table
.boot_values
.vddci
= v_3_3
->bootup_vddci_mv
;
563 smu
->smu_table
.boot_values
.mvddc
= v_3_3
->bootup_mvddc_mv
;
564 smu
->smu_table
.boot_values
.vdd_gfx
= v_3_3
->bootup_vddgfx_mv
;
565 smu
->smu_table
.boot_values
.cooling_id
= v_3_3
->coolingsolution_id
;
566 smu
->smu_table
.boot_values
.pp_table_id
= v_3_3
->pplib_pptable_id
;
569 smu
->smu_table
.boot_values
.format_revision
= header
->format_revision
;
570 smu
->smu_table
.boot_values
.content_revision
= header
->content_revision
;
575 int smu_v11_0_get_clk_info_from_vbios(struct smu_context
*smu
)
578 struct amdgpu_device
*adev
= smu
->adev
;
579 struct atom_get_smu_clock_info_parameters_v3_1 input
= {0};
580 struct atom_get_smu_clock_info_output_parameters_v3_1
*output
;
582 input
.clk_id
= SMU11_SYSPLL0_SOCCLK_ID
;
583 input
.command
= GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ
;
584 index
= get_index_into_master_table(atom_master_list_of_command_functions_v2_1
,
587 ret
= amdgpu_atom_execute_table(adev
->mode_info
.atom_context
, index
,
592 output
= (struct atom_get_smu_clock_info_output_parameters_v3_1
*)&input
;
593 smu
->smu_table
.boot_values
.socclk
= le32_to_cpu(output
->atom_smu_outputclkfreq
.smu_clock_freq_hz
) / 10000;
595 memset(&input
, 0, sizeof(input
));
596 input
.clk_id
= SMU11_SYSPLL0_DCEFCLK_ID
;
597 input
.command
= GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ
;
598 index
= get_index_into_master_table(atom_master_list_of_command_functions_v2_1
,
601 ret
= amdgpu_atom_execute_table(adev
->mode_info
.atom_context
, index
,
606 output
= (struct atom_get_smu_clock_info_output_parameters_v3_1
*)&input
;
607 smu
->smu_table
.boot_values
.dcefclk
= le32_to_cpu(output
->atom_smu_outputclkfreq
.smu_clock_freq_hz
) / 10000;
609 memset(&input
, 0, sizeof(input
));
610 input
.clk_id
= SMU11_SYSPLL0_ECLK_ID
;
611 input
.command
= GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ
;
612 index
= get_index_into_master_table(atom_master_list_of_command_functions_v2_1
,
615 ret
= amdgpu_atom_execute_table(adev
->mode_info
.atom_context
, index
,
620 output
= (struct atom_get_smu_clock_info_output_parameters_v3_1
*)&input
;
621 smu
->smu_table
.boot_values
.eclk
= le32_to_cpu(output
->atom_smu_outputclkfreq
.smu_clock_freq_hz
) / 10000;
623 memset(&input
, 0, sizeof(input
));
624 input
.clk_id
= SMU11_SYSPLL0_VCLK_ID
;
625 input
.command
= GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ
;
626 index
= get_index_into_master_table(atom_master_list_of_command_functions_v2_1
,
629 ret
= amdgpu_atom_execute_table(adev
->mode_info
.atom_context
, index
,
634 output
= (struct atom_get_smu_clock_info_output_parameters_v3_1
*)&input
;
635 smu
->smu_table
.boot_values
.vclk
= le32_to_cpu(output
->atom_smu_outputclkfreq
.smu_clock_freq_hz
) / 10000;
637 memset(&input
, 0, sizeof(input
));
638 input
.clk_id
= SMU11_SYSPLL0_DCLK_ID
;
639 input
.command
= GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ
;
640 index
= get_index_into_master_table(atom_master_list_of_command_functions_v2_1
,
643 ret
= amdgpu_atom_execute_table(adev
->mode_info
.atom_context
, index
,
648 output
= (struct atom_get_smu_clock_info_output_parameters_v3_1
*)&input
;
649 smu
->smu_table
.boot_values
.dclk
= le32_to_cpu(output
->atom_smu_outputclkfreq
.smu_clock_freq_hz
) / 10000;
651 if ((smu
->smu_table
.boot_values
.format_revision
== 3) &&
652 (smu
->smu_table
.boot_values
.content_revision
>= 2)) {
653 memset(&input
, 0, sizeof(input
));
654 input
.clk_id
= SMU11_SYSPLL1_0_FCLK_ID
;
655 input
.syspll_id
= SMU11_SYSPLL1_2_ID
;
656 input
.command
= GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ
;
657 index
= get_index_into_master_table(atom_master_list_of_command_functions_v2_1
,
660 ret
= amdgpu_atom_execute_table(adev
->mode_info
.atom_context
, index
,
665 output
= (struct atom_get_smu_clock_info_output_parameters_v3_1
*)&input
;
666 smu
->smu_table
.boot_values
.fclk
= le32_to_cpu(output
->atom_smu_outputclkfreq
.smu_clock_freq_hz
) / 10000;
672 int smu_v11_0_notify_memory_pool_location(struct smu_context
*smu
)
674 struct smu_table_context
*smu_table
= &smu
->smu_table
;
675 struct smu_table
*memory_pool
= &smu_table
->memory_pool
;
678 uint32_t address_low
, address_high
;
680 if (memory_pool
->size
== 0 || memory_pool
->cpu_addr
== NULL
)
683 address
= (uintptr_t)memory_pool
->cpu_addr
;
684 address_high
= (uint32_t)upper_32_bits(address
);
685 address_low
= (uint32_t)lower_32_bits(address
);
687 ret
= smu_send_smc_msg_with_param(smu
,
688 SMU_MSG_SetSystemVirtualDramAddrHigh
,
693 ret
= smu_send_smc_msg_with_param(smu
,
694 SMU_MSG_SetSystemVirtualDramAddrLow
,
700 address
= memory_pool
->mc_address
;
701 address_high
= (uint32_t)upper_32_bits(address
);
702 address_low
= (uint32_t)lower_32_bits(address
);
704 ret
= smu_send_smc_msg_with_param(smu
, SMU_MSG_DramLogSetDramAddrHigh
,
708 ret
= smu_send_smc_msg_with_param(smu
, SMU_MSG_DramLogSetDramAddrLow
,
712 ret
= smu_send_smc_msg_with_param(smu
, SMU_MSG_DramLogSetDramSize
,
713 (uint32_t)memory_pool
->size
, NULL
);
720 int smu_v11_0_check_pptable(struct smu_context
*smu
)
724 ret
= smu_check_powerplay_table(smu
);
728 int smu_v11_0_parse_pptable(struct smu_context
*smu
)
732 struct smu_table_context
*table_context
= &smu
->smu_table
;
733 struct smu_table
*table
= &table_context
->tables
[SMU_TABLE_PPTABLE
];
735 /* during TDR we need to free and alloc the pptable */
736 if (table_context
->driver_pptable
)
737 kfree(table_context
->driver_pptable
);
739 table_context
->driver_pptable
= kzalloc(table
->size
, GFP_KERNEL
);
741 if (!table_context
->driver_pptable
)
744 ret
= smu_store_powerplay_table(smu
);
748 ret
= smu_append_powerplay_table(smu
);
753 int smu_v11_0_populate_smc_pptable(struct smu_context
*smu
)
757 ret
= smu_set_default_dpm_table(smu
);
762 int smu_v11_0_write_pptable(struct smu_context
*smu
)
764 struct smu_table_context
*table_context
= &smu
->smu_table
;
767 ret
= smu_update_table(smu
, SMU_TABLE_PPTABLE
, 0,
768 table_context
->driver_pptable
, true);
773 int smu_v11_0_set_deep_sleep_dcefclk(struct smu_context
*smu
, uint32_t clk
)
777 if (amdgpu_sriov_vf(smu
->adev
))
780 ret
= smu_send_smc_msg_with_param(smu
,
781 SMU_MSG_SetMinDeepSleepDcefclk
, clk
, NULL
);
783 pr_err("SMU11 attempt to set divider for DCEFCLK Failed!");
788 int smu_v11_0_set_min_dcef_deep_sleep(struct smu_context
*smu
)
790 struct smu_table_context
*table_context
= &smu
->smu_table
;
795 return smu_v11_0_set_deep_sleep_dcefclk(smu
, table_context
->boot_values
.dcefclk
/ 100);
798 int smu_v11_0_set_driver_table_location(struct smu_context
*smu
)
800 struct smu_table
*driver_table
= &smu
->smu_table
.driver_table
;
803 if (driver_table
->mc_address
) {
804 ret
= smu_send_smc_msg_with_param(smu
,
805 SMU_MSG_SetDriverDramAddrHigh
,
806 upper_32_bits(driver_table
->mc_address
),
809 ret
= smu_send_smc_msg_with_param(smu
,
810 SMU_MSG_SetDriverDramAddrLow
,
811 lower_32_bits(driver_table
->mc_address
),
818 int smu_v11_0_set_tool_table_location(struct smu_context
*smu
)
821 struct smu_table
*tool_table
= &smu
->smu_table
.tables
[SMU_TABLE_PMSTATUSLOG
];
823 if (amdgpu_sriov_vf(smu
->adev
))
826 if (tool_table
->mc_address
) {
827 ret
= smu_send_smc_msg_with_param(smu
,
828 SMU_MSG_SetToolsDramAddrHigh
,
829 upper_32_bits(tool_table
->mc_address
),
832 ret
= smu_send_smc_msg_with_param(smu
,
833 SMU_MSG_SetToolsDramAddrLow
,
834 lower_32_bits(tool_table
->mc_address
),
841 int smu_v11_0_init_display_count(struct smu_context
*smu
, uint32_t count
)
845 if (amdgpu_sriov_vf(smu
->adev
))
848 if (!smu
->pm_enabled
)
851 ret
= smu_send_smc_msg_with_param(smu
, SMU_MSG_NumOfDisplays
, count
, NULL
);
856 int smu_v11_0_set_allowed_mask(struct smu_context
*smu
)
858 struct smu_feature
*feature
= &smu
->smu_feature
;
860 uint32_t feature_mask
[2];
862 if (amdgpu_sriov_vf(smu
->adev
))
865 mutex_lock(&feature
->mutex
);
866 if (bitmap_empty(feature
->allowed
, SMU_FEATURE_MAX
) || feature
->feature_num
< 64)
869 bitmap_copy((unsigned long *)feature_mask
, feature
->allowed
, 64);
871 ret
= smu_send_smc_msg_with_param(smu
, SMU_MSG_SetAllowedFeaturesMaskHigh
,
872 feature_mask
[1], NULL
);
876 ret
= smu_send_smc_msg_with_param(smu
, SMU_MSG_SetAllowedFeaturesMaskLow
,
877 feature_mask
[0], NULL
);
882 mutex_unlock(&feature
->mutex
);
886 int smu_v11_0_get_enabled_mask(struct smu_context
*smu
,
887 uint32_t *feature_mask
, uint32_t num
)
889 uint32_t feature_mask_high
= 0, feature_mask_low
= 0;
890 struct smu_feature
*feature
= &smu
->smu_feature
;
893 if (amdgpu_sriov_vf(smu
->adev
) && !amdgpu_sriov_is_pp_one_vf(smu
->adev
))
896 if (!feature_mask
|| num
< 2)
899 if (bitmap_empty(feature
->enabled
, feature
->feature_num
)) {
900 ret
= smu_send_smc_msg(smu
, SMU_MSG_GetEnabledSmuFeaturesHigh
, &feature_mask_high
);
904 ret
= smu_send_smc_msg(smu
, SMU_MSG_GetEnabledSmuFeaturesLow
, &feature_mask_low
);
908 feature_mask
[0] = feature_mask_low
;
909 feature_mask
[1] = feature_mask_high
;
911 bitmap_copy((unsigned long *)feature_mask
, feature
->enabled
,
912 feature
->feature_num
);
918 int smu_v11_0_system_features_control(struct smu_context
*smu
,
921 struct smu_feature
*feature
= &smu
->smu_feature
;
922 uint32_t feature_mask
[2];
925 ret
= smu_send_smc_msg(smu
, (en
? SMU_MSG_EnableAllSmuFeatures
:
926 SMU_MSG_DisableAllSmuFeatures
), NULL
);
930 bitmap_zero(feature
->enabled
, feature
->feature_num
);
931 bitmap_zero(feature
->supported
, feature
->feature_num
);
934 ret
= smu_feature_get_enabled_mask(smu
, feature_mask
, 2);
938 bitmap_copy(feature
->enabled
, (unsigned long *)&feature_mask
,
939 feature
->feature_num
);
940 bitmap_copy(feature
->supported
, (unsigned long *)&feature_mask
,
941 feature
->feature_num
);
947 int smu_v11_0_notify_display_change(struct smu_context
*smu
)
951 if (amdgpu_sriov_vf(smu
->adev
))
954 if (!smu
->pm_enabled
)
957 if (smu_feature_is_enabled(smu
, SMU_FEATURE_DPM_UCLK_BIT
) &&
958 smu
->adev
->gmc
.vram_type
== AMDGPU_VRAM_TYPE_HBM
)
959 ret
= smu_send_smc_msg_with_param(smu
, SMU_MSG_SetUclkFastSwitch
, 1, NULL
);
965 smu_v11_0_get_max_sustainable_clock(struct smu_context
*smu
, uint32_t *clock
,
966 enum smu_clk_type clock_select
)
971 if ((smu_msg_get_index(smu
, SMU_MSG_GetDcModeMaxDpmFreq
) < 0) ||
972 (smu_msg_get_index(smu
, SMU_MSG_GetMaxDpmFreq
) < 0))
975 clk_id
= smu_clk_get_index(smu
, clock_select
);
979 ret
= smu_send_smc_msg_with_param(smu
, SMU_MSG_GetDcModeMaxDpmFreq
,
980 clk_id
<< 16, clock
);
982 pr_err("[GetMaxSustainableClock] Failed to get max DC clock from SMC!");
989 /* if DC limit is zero, return AC limit */
990 ret
= smu_send_smc_msg_with_param(smu
, SMU_MSG_GetMaxDpmFreq
,
991 clk_id
<< 16, clock
);
993 pr_err("[GetMaxSustainableClock] failed to get max AC clock from SMC!");
1000 int smu_v11_0_init_max_sustainable_clocks(struct smu_context
*smu
)
1002 struct smu_11_0_max_sustainable_clocks
*max_sustainable_clocks
;
1005 if (!smu
->smu_table
.max_sustainable_clocks
)
1006 max_sustainable_clocks
= kzalloc(sizeof(struct smu_11_0_max_sustainable_clocks
),
1009 max_sustainable_clocks
= smu
->smu_table
.max_sustainable_clocks
;
1011 smu
->smu_table
.max_sustainable_clocks
= (void *)max_sustainable_clocks
;
1013 max_sustainable_clocks
->uclock
= smu
->smu_table
.boot_values
.uclk
/ 100;
1014 max_sustainable_clocks
->soc_clock
= smu
->smu_table
.boot_values
.socclk
/ 100;
1015 max_sustainable_clocks
->dcef_clock
= smu
->smu_table
.boot_values
.dcefclk
/ 100;
1016 max_sustainable_clocks
->display_clock
= 0xFFFFFFFF;
1017 max_sustainable_clocks
->phy_clock
= 0xFFFFFFFF;
1018 max_sustainable_clocks
->pixel_clock
= 0xFFFFFFFF;
1020 if (smu_feature_is_enabled(smu
, SMU_FEATURE_DPM_UCLK_BIT
)) {
1021 ret
= smu_v11_0_get_max_sustainable_clock(smu
,
1022 &(max_sustainable_clocks
->uclock
),
1025 pr_err("[%s] failed to get max UCLK from SMC!",
1031 if (smu_feature_is_enabled(smu
, SMU_FEATURE_DPM_SOCCLK_BIT
)) {
1032 ret
= smu_v11_0_get_max_sustainable_clock(smu
,
1033 &(max_sustainable_clocks
->soc_clock
),
1036 pr_err("[%s] failed to get max SOCCLK from SMC!",
1042 if (smu_feature_is_enabled(smu
, SMU_FEATURE_DPM_DCEFCLK_BIT
)) {
1043 ret
= smu_v11_0_get_max_sustainable_clock(smu
,
1044 &(max_sustainable_clocks
->dcef_clock
),
1047 pr_err("[%s] failed to get max DCEFCLK from SMC!",
1052 ret
= smu_v11_0_get_max_sustainable_clock(smu
,
1053 &(max_sustainable_clocks
->display_clock
),
1056 pr_err("[%s] failed to get max DISPCLK from SMC!",
1060 ret
= smu_v11_0_get_max_sustainable_clock(smu
,
1061 &(max_sustainable_clocks
->phy_clock
),
1064 pr_err("[%s] failed to get max PHYCLK from SMC!",
1068 ret
= smu_v11_0_get_max_sustainable_clock(smu
,
1069 &(max_sustainable_clocks
->pixel_clock
),
1072 pr_err("[%s] failed to get max PIXCLK from SMC!",
1078 if (max_sustainable_clocks
->soc_clock
< max_sustainable_clocks
->uclock
)
1079 max_sustainable_clocks
->uclock
= max_sustainable_clocks
->soc_clock
;
1084 uint32_t smu_v11_0_get_max_power_limit(struct smu_context
*smu
) {
1085 uint32_t od_limit
, max_power_limit
;
1086 struct smu_11_0_powerplay_table
*powerplay_table
= NULL
;
1087 struct smu_table_context
*table_context
= &smu
->smu_table
;
1088 powerplay_table
= table_context
->power_play_table
;
1090 max_power_limit
= smu_get_pptable_power_limit(smu
);
1092 if (!max_power_limit
) {
1093 // If we couldn't get the table limit, fall back on first-read value
1094 if (!smu
->default_power_limit
)
1095 smu
->default_power_limit
= smu
->power_limit
;
1096 max_power_limit
= smu
->default_power_limit
;
1099 if (smu
->od_enabled
) {
1100 od_limit
= le32_to_cpu(powerplay_table
->overdrive_table
.max
[SMU_11_0_ODSETTING_POWERPERCENTAGE
]);
1102 pr_debug("ODSETTING_POWERPERCENTAGE: %d (default: %d)\n", od_limit
, smu
->default_power_limit
);
1104 max_power_limit
*= (100 + od_limit
);
1105 max_power_limit
/= 100;
1108 return max_power_limit
;
1111 int smu_v11_0_set_power_limit(struct smu_context
*smu
, uint32_t n
)
1114 uint32_t max_power_limit
;
1116 if (amdgpu_sriov_vf(smu
->adev
))
1119 max_power_limit
= smu_v11_0_get_max_power_limit(smu
);
1121 if (n
> max_power_limit
) {
1122 pr_err("New power limit (%d) is over the max allowed %d\n",
1129 n
= smu
->default_power_limit
;
1131 if (!smu_feature_is_enabled(smu
, SMU_FEATURE_PPT_BIT
)) {
1132 pr_err("Setting new power limit is not supported!\n");
1136 ret
= smu_send_smc_msg_with_param(smu
, SMU_MSG_SetPptLimit
, n
, NULL
);
1138 pr_err("[%s] Set power limit Failed!\n", __func__
);
1141 smu
->power_limit
= n
;
1146 int smu_v11_0_get_current_clk_freq(struct smu_context
*smu
,
1147 enum smu_clk_type clk_id
,
1154 if (clk_id
>= SMU_CLK_COUNT
|| !value
)
1157 asic_clk_id
= smu_clk_get_index(smu
, clk_id
);
1158 if (asic_clk_id
< 0)
1161 /* if don't has GetDpmClockFreq Message, try get current clock by SmuMetrics_t */
1162 if (smu_msg_get_index(smu
, SMU_MSG_GetDpmClockFreq
) < 0)
1163 ret
= smu_get_current_clk_freq_by_table(smu
, clk_id
, &freq
);
1165 ret
= smu_send_smc_msg_with_param(smu
, SMU_MSG_GetDpmClockFreq
,
1166 (asic_clk_id
<< 16), &freq
);
1177 static int smu_v11_0_set_thermal_range(struct smu_context
*smu
,
1178 struct smu_temperature_range range
)
1180 struct amdgpu_device
*adev
= smu
->adev
;
1181 int low
= SMU_THERMAL_MINIMUM_ALERT_TEMP
;
1182 int high
= SMU_THERMAL_MAXIMUM_ALERT_TEMP
;
1184 struct smu_table_context
*table_context
= &smu
->smu_table
;
1185 struct smu_11_0_powerplay_table
*powerplay_table
= table_context
->power_play_table
;
1187 low
= max(SMU_THERMAL_MINIMUM_ALERT_TEMP
,
1188 range
.min
/ SMU_TEMPERATURE_UNITS_PER_CENTIGRADES
);
1189 high
= min((uint16_t)SMU_THERMAL_MAXIMUM_ALERT_TEMP
, powerplay_table
->software_shutdown_temp
);
1194 val
= RREG32_SOC15(THM
, 0, mmTHM_THERMAL_INT_CTRL
);
1195 val
= REG_SET_FIELD(val
, THM_THERMAL_INT_CTRL
, MAX_IH_CREDIT
, 5);
1196 val
= REG_SET_FIELD(val
, THM_THERMAL_INT_CTRL
, THERM_IH_HW_ENA
, 1);
1197 val
= REG_SET_FIELD(val
, THM_THERMAL_INT_CTRL
, THERM_INTH_MASK
, 0);
1198 val
= REG_SET_FIELD(val
, THM_THERMAL_INT_CTRL
, THERM_INTL_MASK
, 0);
1199 val
= REG_SET_FIELD(val
, THM_THERMAL_INT_CTRL
, DIG_THERM_INTH
, (high
& 0xff));
1200 val
= REG_SET_FIELD(val
, THM_THERMAL_INT_CTRL
, DIG_THERM_INTL
, (low
& 0xff));
1201 val
= val
& (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK
);
1203 WREG32_SOC15(THM
, 0, mmTHM_THERMAL_INT_CTRL
, val
);
1208 static int smu_v11_0_enable_thermal_alert(struct smu_context
*smu
)
1210 struct amdgpu_device
*adev
= smu
->adev
;
1213 val
|= (1 << THM_THERMAL_INT_ENA__THERM_INTH_CLR__SHIFT
);
1214 val
|= (1 << THM_THERMAL_INT_ENA__THERM_INTL_CLR__SHIFT
);
1215 val
|= (1 << THM_THERMAL_INT_ENA__THERM_TRIGGER_CLR__SHIFT
);
1217 WREG32_SOC15(THM
, 0, mmTHM_THERMAL_INT_ENA
, val
);
1222 int smu_v11_0_start_thermal_control(struct smu_context
*smu
)
1225 struct smu_temperature_range range
;
1226 struct amdgpu_device
*adev
= smu
->adev
;
1228 memcpy(&range
, &smu11_thermal_policy
[0], sizeof(struct smu_temperature_range
));
1230 ret
= smu_get_thermal_temperature_range(smu
, &range
);
1234 if (smu
->smu_table
.thermal_controller_type
) {
1235 ret
= smu_v11_0_set_thermal_range(smu
, range
);
1239 ret
= smu_v11_0_enable_thermal_alert(smu
);
1243 ret
= smu_set_thermal_fan_table(smu
);
1248 adev
->pm
.dpm
.thermal
.min_temp
= range
.min
;
1249 adev
->pm
.dpm
.thermal
.max_temp
= range
.max
;
1250 adev
->pm
.dpm
.thermal
.max_edge_emergency_temp
= range
.edge_emergency_max
;
1251 adev
->pm
.dpm
.thermal
.min_hotspot_temp
= range
.hotspot_min
;
1252 adev
->pm
.dpm
.thermal
.max_hotspot_crit_temp
= range
.hotspot_crit_max
;
1253 adev
->pm
.dpm
.thermal
.max_hotspot_emergency_temp
= range
.hotspot_emergency_max
;
1254 adev
->pm
.dpm
.thermal
.min_mem_temp
= range
.mem_min
;
1255 adev
->pm
.dpm
.thermal
.max_mem_crit_temp
= range
.mem_crit_max
;
1256 adev
->pm
.dpm
.thermal
.max_mem_emergency_temp
= range
.mem_emergency_max
;
1261 int smu_v11_0_stop_thermal_control(struct smu_context
*smu
)
1263 struct amdgpu_device
*adev
= smu
->adev
;
1265 WREG32_SOC15(THM
, 0, mmTHM_THERMAL_INT_ENA
, 0);
1270 static uint16_t convert_to_vddc(uint8_t vid
)
1272 return (uint16_t) ((6200 - (vid
* 25)) / SMU11_VOLTAGE_SCALE
);
1275 static int smu_v11_0_get_gfx_vdd(struct smu_context
*smu
, uint32_t *value
)
1277 struct amdgpu_device
*adev
= smu
->adev
;
1278 uint32_t vdd
= 0, val_vid
= 0;
1282 val_vid
= (RREG32_SOC15(SMUIO
, 0, mmSMUSVI0_TEL_PLANE0
) &
1283 SMUSVI0_TEL_PLANE0__SVI0_PLANE0_VDDCOR_MASK
) >>
1284 SMUSVI0_TEL_PLANE0__SVI0_PLANE0_VDDCOR__SHIFT
;
1286 vdd
= (uint32_t)convert_to_vddc((uint8_t)val_vid
);
1294 int smu_v11_0_read_sensor(struct smu_context
*smu
,
1295 enum amd_pp_sensors sensor
,
1296 void *data
, uint32_t *size
)
1304 case AMDGPU_PP_SENSOR_GFX_MCLK
:
1305 ret
= smu_get_current_clk_freq(smu
, SMU_UCLK
, (uint32_t *)data
);
1308 case AMDGPU_PP_SENSOR_GFX_SCLK
:
1309 ret
= smu_get_current_clk_freq(smu
, SMU_GFXCLK
, (uint32_t *)data
);
1312 case AMDGPU_PP_SENSOR_VDDGFX
:
1313 ret
= smu_v11_0_get_gfx_vdd(smu
, (uint32_t *)data
);
1316 case AMDGPU_PP_SENSOR_MIN_FAN_RPM
:
1317 *(uint32_t *)data
= 0;
1321 ret
= smu_common_read_sensor(smu
, sensor
, data
, size
);
1332 smu_v11_0_display_clock_voltage_request(struct smu_context
*smu
,
1333 struct pp_display_clock_request
1336 enum amd_pp_clock_type clk_type
= clock_req
->clock_type
;
1338 enum smu_clk_type clk_select
= 0;
1339 uint32_t clk_freq
= clock_req
->clock_freq_in_khz
/ 1000;
1341 if (smu_feature_is_enabled(smu
, SMU_FEATURE_DPM_DCEFCLK_BIT
) ||
1342 smu_feature_is_enabled(smu
, SMU_FEATURE_DPM_UCLK_BIT
)) {
1344 case amd_pp_dcef_clock
:
1345 clk_select
= SMU_DCEFCLK
;
1347 case amd_pp_disp_clock
:
1348 clk_select
= SMU_DISPCLK
;
1350 case amd_pp_pixel_clock
:
1351 clk_select
= SMU_PIXCLK
;
1353 case amd_pp_phy_clock
:
1354 clk_select
= SMU_PHYCLK
;
1356 case amd_pp_mem_clock
:
1357 clk_select
= SMU_UCLK
;
1360 pr_info("[%s] Invalid Clock Type!", __func__
);
1368 if (clk_select
== SMU_UCLK
&& smu
->disable_uclk_switch
)
1371 ret
= smu_set_hard_freq_range(smu
, clk_select
, clk_freq
, 0);
1373 if(clk_select
== SMU_UCLK
)
1374 smu
->hard_min_uclk_req_from_dal
= clk_freq
;
1381 int smu_v11_0_gfx_off_control(struct smu_context
*smu
, bool enable
)
1384 struct amdgpu_device
*adev
= smu
->adev
;
1386 switch (adev
->asic_type
) {
1392 if (!(adev
->pm
.pp_feature
& PP_GFXOFF_MASK
))
1395 ret
= smu_send_smc_msg(smu
, SMU_MSG_AllowGfxOff
, NULL
);
1397 ret
= smu_send_smc_msg(smu
, SMU_MSG_DisallowGfxOff
, NULL
);
1407 smu_v11_0_get_fan_control_mode(struct smu_context
*smu
)
1409 if (!smu_feature_is_enabled(smu
, SMU_FEATURE_FAN_CONTROL_BIT
))
1410 return AMD_FAN_CTRL_MANUAL
;
1412 return AMD_FAN_CTRL_AUTO
;
1416 smu_v11_0_auto_fan_control(struct smu_context
*smu
, bool auto_fan_control
)
1420 if (!smu_feature_is_supported(smu
, SMU_FEATURE_FAN_CONTROL_BIT
))
1423 ret
= smu_feature_set_enabled(smu
, SMU_FEATURE_FAN_CONTROL_BIT
, auto_fan_control
);
1425 pr_err("[%s]%s smc FAN CONTROL feature failed!",
1426 __func__
, (auto_fan_control
? "Start" : "Stop"));
1432 smu_v11_0_set_fan_static_mode(struct smu_context
*smu
, uint32_t mode
)
1434 struct amdgpu_device
*adev
= smu
->adev
;
1436 WREG32_SOC15(THM
, 0, mmCG_FDO_CTRL2
,
1437 REG_SET_FIELD(RREG32_SOC15(THM
, 0, mmCG_FDO_CTRL2
),
1438 CG_FDO_CTRL2
, TMIN
, 0));
1439 WREG32_SOC15(THM
, 0, mmCG_FDO_CTRL2
,
1440 REG_SET_FIELD(RREG32_SOC15(THM
, 0, mmCG_FDO_CTRL2
),
1441 CG_FDO_CTRL2
, FDO_PWM_MODE
, mode
));
1447 smu_v11_0_set_fan_speed_percent(struct smu_context
*smu
, uint32_t speed
)
1449 struct amdgpu_device
*adev
= smu
->adev
;
1450 uint32_t duty100
, duty
;
1456 if (smu_v11_0_auto_fan_control(smu
, 0))
1459 duty100
= REG_GET_FIELD(RREG32_SOC15(THM
, 0, mmCG_FDO_CTRL1
),
1460 CG_FDO_CTRL1
, FMAX_DUTY100
);
1464 tmp64
= (uint64_t)speed
* duty100
;
1466 duty
= (uint32_t)tmp64
;
1468 WREG32_SOC15(THM
, 0, mmCG_FDO_CTRL0
,
1469 REG_SET_FIELD(RREG32_SOC15(THM
, 0, mmCG_FDO_CTRL0
),
1470 CG_FDO_CTRL0
, FDO_STATIC_DUTY
, duty
));
1472 return smu_v11_0_set_fan_static_mode(smu
, FDO_PWM_MODE_STATIC
);
1476 smu_v11_0_set_fan_control_mode(struct smu_context
*smu
,
1482 case AMD_FAN_CTRL_NONE
:
1483 ret
= smu_v11_0_set_fan_speed_percent(smu
, 100);
1485 case AMD_FAN_CTRL_MANUAL
:
1486 ret
= smu_v11_0_auto_fan_control(smu
, 0);
1488 case AMD_FAN_CTRL_AUTO
:
1489 ret
= smu_v11_0_auto_fan_control(smu
, 1);
1496 pr_err("[%s]Set fan control mode failed!", __func__
);
1503 int smu_v11_0_set_fan_speed_rpm(struct smu_context
*smu
,
1506 struct amdgpu_device
*adev
= smu
->adev
;
1508 uint32_t tach_period
, crystal_clock_freq
;
1513 ret
= smu_v11_0_auto_fan_control(smu
, 0);
1517 crystal_clock_freq
= amdgpu_asic_get_xclk(adev
);
1518 tach_period
= 60 * crystal_clock_freq
* 10000 / (8 * speed
);
1519 WREG32_SOC15(THM
, 0, mmCG_TACH_CTRL
,
1520 REG_SET_FIELD(RREG32_SOC15(THM
, 0, mmCG_TACH_CTRL
),
1521 CG_TACH_CTRL
, TARGET_PERIOD
,
1524 ret
= smu_v11_0_set_fan_static_mode(smu
, FDO_PWM_MODE_STATIC_RPM
);
1529 int smu_v11_0_set_xgmi_pstate(struct smu_context
*smu
,
1533 ret
= smu_send_smc_msg_with_param(smu
,
1534 SMU_MSG_SetXgmiMode
,
1535 pstate
? XGMI_MODE_PSTATE_D0
: XGMI_MODE_PSTATE_D3
,
1540 static int smu_v11_0_ack_ac_dc_interrupt(struct smu_context
*smu
)
1542 return smu_send_smc_msg(smu
,
1543 SMU_MSG_ReenableAcDcInterrupt
,
1547 #define THM_11_0__SRCID__THM_DIG_THERM_L2H 0 /* ASIC_TEMP > CG_THERMAL_INT.DIG_THERM_INTH */
1548 #define THM_11_0__SRCID__THM_DIG_THERM_H2L 1 /* ASIC_TEMP < CG_THERMAL_INT.DIG_THERM_INTL */
1550 static int smu_v11_0_irq_process(struct amdgpu_device
*adev
,
1551 struct amdgpu_irq_src
*source
,
1552 struct amdgpu_iv_entry
*entry
)
1554 uint32_t client_id
= entry
->client_id
;
1555 uint32_t src_id
= entry
->src_id
;
1557 if (client_id
== SOC15_IH_CLIENTID_THM
) {
1559 case THM_11_0__SRCID__THM_DIG_THERM_L2H
:
1560 pr_warn("GPU over temperature range detected on PCIe %d:%d.%d!\n",
1561 PCI_BUS_NUM(adev
->pdev
->devfn
),
1562 PCI_SLOT(adev
->pdev
->devfn
),
1563 PCI_FUNC(adev
->pdev
->devfn
));
1565 case THM_11_0__SRCID__THM_DIG_THERM_H2L
:
1566 pr_warn("GPU under temperature range detected on PCIe %d:%d.%d!\n",
1567 PCI_BUS_NUM(adev
->pdev
->devfn
),
1568 PCI_SLOT(adev
->pdev
->devfn
),
1569 PCI_FUNC(adev
->pdev
->devfn
));
1572 pr_warn("GPU under temperature range unknown src id (%d), detected on PCIe %d:%d.%d!\n",
1574 PCI_BUS_NUM(adev
->pdev
->devfn
),
1575 PCI_SLOT(adev
->pdev
->devfn
),
1576 PCI_FUNC(adev
->pdev
->devfn
));
1580 } else if (client_id
== SOC15_IH_CLIENTID_MP1
) {
1582 smu_v11_0_ack_ac_dc_interrupt(&adev
->smu
);
1588 static const struct amdgpu_irq_src_funcs smu_v11_0_irq_funcs
=
1590 .process
= smu_v11_0_irq_process
,
1593 int smu_v11_0_register_irq_handler(struct smu_context
*smu
)
1595 struct amdgpu_device
*adev
= smu
->adev
;
1596 struct amdgpu_irq_src
*irq_src
= smu
->irq_source
;
1599 /* already register */
1603 irq_src
= kzalloc(sizeof(struct amdgpu_irq_src
), GFP_KERNEL
);
1606 smu
->irq_source
= irq_src
;
1608 irq_src
->funcs
= &smu_v11_0_irq_funcs
;
1610 ret
= amdgpu_irq_add_id(adev
, SOC15_IH_CLIENTID_THM
,
1611 THM_11_0__SRCID__THM_DIG_THERM_L2H
,
1616 ret
= amdgpu_irq_add_id(adev
, SOC15_IH_CLIENTID_THM
,
1617 THM_11_0__SRCID__THM_DIG_THERM_H2L
,
1622 ret
= amdgpu_irq_add_id(adev
, SOC15_IH_CLIENTID_MP1
,
1631 int smu_v11_0_get_max_sustainable_clocks_by_dc(struct smu_context
*smu
,
1632 struct pp_smu_nv_clock_table
*max_clocks
)
1634 struct smu_table_context
*table_context
= &smu
->smu_table
;
1635 struct smu_11_0_max_sustainable_clocks
*sustainable_clocks
= NULL
;
1637 if (!max_clocks
|| !table_context
->max_sustainable_clocks
)
1640 sustainable_clocks
= table_context
->max_sustainable_clocks
;
1642 max_clocks
->dcfClockInKhz
=
1643 (unsigned int) sustainable_clocks
->dcef_clock
* 1000;
1644 max_clocks
->displayClockInKhz
=
1645 (unsigned int) sustainable_clocks
->display_clock
* 1000;
1646 max_clocks
->phyClockInKhz
=
1647 (unsigned int) sustainable_clocks
->phy_clock
* 1000;
1648 max_clocks
->pixelClockInKhz
=
1649 (unsigned int) sustainable_clocks
->pixel_clock
* 1000;
1650 max_clocks
->uClockInKhz
=
1651 (unsigned int) sustainable_clocks
->uclock
* 1000;
1652 max_clocks
->socClockInKhz
=
1653 (unsigned int) sustainable_clocks
->soc_clock
* 1000;
1654 max_clocks
->dscClockInKhz
= 0;
1655 max_clocks
->dppClockInKhz
= 0;
1656 max_clocks
->fabricClockInKhz
= 0;
1661 int smu_v11_0_set_azalia_d3_pme(struct smu_context
*smu
)
1665 ret
= smu_send_smc_msg(smu
, SMU_MSG_BacoAudioD3PME
, NULL
);
1670 static int smu_v11_0_baco_set_armd3_sequence(struct smu_context
*smu
, enum smu_v11_0_baco_seq baco_seq
)
1672 return smu_send_smc_msg_with_param(smu
, SMU_MSG_ArmD3
, baco_seq
, NULL
);
1675 bool smu_v11_0_baco_is_support(struct smu_context
*smu
)
1677 struct smu_baco_context
*smu_baco
= &smu
->smu_baco
;
1680 mutex_lock(&smu_baco
->mutex
);
1681 baco_support
= smu_baco
->platform_support
;
1682 mutex_unlock(&smu_baco
->mutex
);
1687 /* Arcturus does not support this bit mask */
1688 if (smu_feature_is_supported(smu
, SMU_FEATURE_BACO_BIT
) &&
1689 !smu_feature_is_enabled(smu
, SMU_FEATURE_BACO_BIT
))
1695 enum smu_baco_state
smu_v11_0_baco_get_state(struct smu_context
*smu
)
1697 struct smu_baco_context
*smu_baco
= &smu
->smu_baco
;
1698 enum smu_baco_state baco_state
;
1700 mutex_lock(&smu_baco
->mutex
);
1701 baco_state
= smu_baco
->state
;
1702 mutex_unlock(&smu_baco
->mutex
);
1707 int smu_v11_0_baco_set_state(struct smu_context
*smu
, enum smu_baco_state state
)
1709 struct smu_baco_context
*smu_baco
= &smu
->smu_baco
;
1710 struct amdgpu_device
*adev
= smu
->adev
;
1711 struct amdgpu_ras
*ras
= amdgpu_ras_get_context(adev
);
1715 if (smu_v11_0_baco_get_state(smu
) == state
)
1718 mutex_lock(&smu_baco
->mutex
);
1720 if (state
== SMU_BACO_STATE_ENTER
) {
1721 if (!ras
|| !ras
->supported
) {
1722 data
= RREG32_SOC15(THM
, 0, mmTHM_BACO_CNTL
);
1724 WREG32_SOC15(THM
, 0, mmTHM_BACO_CNTL
, data
);
1726 ret
= smu_send_smc_msg_with_param(smu
, SMU_MSG_EnterBaco
, 0, NULL
);
1728 ret
= smu_send_smc_msg_with_param(smu
, SMU_MSG_EnterBaco
, 1, NULL
);
1731 ret
= smu_send_smc_msg(smu
, SMU_MSG_ExitBaco
, NULL
);
1735 if (ras
&& ras
->supported
) {
1736 ret
= smu_send_smc_msg(smu
, SMU_MSG_PrepareMp1ForUnload
, NULL
);
1741 /* clear vbios scratch 6 and 7 for coming asic reinit */
1742 WREG32(adev
->bios_scratch_reg_offset
+ 6, 0);
1743 WREG32(adev
->bios_scratch_reg_offset
+ 7, 0);
1748 smu_baco
->state
= state
;
1750 mutex_unlock(&smu_baco
->mutex
);
1754 int smu_v11_0_baco_enter(struct smu_context
*smu
)
1756 struct amdgpu_device
*adev
= smu
->adev
;
1759 /* Arcturus does not need this audio workaround */
1760 if (adev
->asic_type
!= CHIP_ARCTURUS
) {
1761 ret
= smu_v11_0_baco_set_armd3_sequence(smu
, BACO_SEQ_BACO
);
1766 ret
= smu_v11_0_baco_set_state(smu
, SMU_BACO_STATE_ENTER
);
1775 int smu_v11_0_baco_exit(struct smu_context
*smu
)
1779 ret
= smu_v11_0_baco_set_state(smu
, SMU_BACO_STATE_EXIT
);
1786 int smu_v11_0_get_dpm_ultimate_freq(struct smu_context
*smu
, enum smu_clk_type clk_type
,
1787 uint32_t *min
, uint32_t *max
)
1789 int ret
= 0, clk_id
= 0;
1792 clk_id
= smu_clk_get_index(smu
, clk_type
);
1797 param
= (clk_id
& 0xffff) << 16;
1800 ret
= smu_send_smc_msg_with_param(smu
, SMU_MSG_GetMaxDpmFreq
, param
, max
);
1806 ret
= smu_send_smc_msg_with_param(smu
, SMU_MSG_GetMinDpmFreq
, param
, min
);
1815 int smu_v11_0_set_soft_freq_limited_range(struct smu_context
*smu
, enum smu_clk_type clk_type
,
1816 uint32_t min
, uint32_t max
)
1818 int ret
= 0, clk_id
= 0;
1821 clk_id
= smu_clk_get_index(smu
, clk_type
);
1826 param
= (uint32_t)((clk_id
<< 16) | (max
& 0xffff));
1827 ret
= smu_send_smc_msg_with_param(smu
, SMU_MSG_SetSoftMaxByFreq
,
1834 param
= (uint32_t)((clk_id
<< 16) | (min
& 0xffff));
1835 ret
= smu_send_smc_msg_with_param(smu
, SMU_MSG_SetSoftMinByFreq
,
1844 int smu_v11_0_override_pcie_parameters(struct smu_context
*smu
)
1846 struct amdgpu_device
*adev
= smu
->adev
;
1847 uint32_t pcie_gen
= 0, pcie_width
= 0;
1850 if (amdgpu_sriov_vf(smu
->adev
))
1853 if (adev
->pm
.pcie_gen_mask
& CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4
)
1855 else if (adev
->pm
.pcie_gen_mask
& CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3
)
1857 else if (adev
->pm
.pcie_gen_mask
& CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2
)
1859 else if (adev
->pm
.pcie_gen_mask
& CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1
)
1862 /* Bit 31:16: LCLK DPM level. 0 is DPM0, and 1 is DPM1
1863 * Bit 15:8: PCIE GEN, 0 to 3 corresponds to GEN1 to GEN4
1864 * Bit 7:0: PCIE lane width, 1 to 7 corresponds is x1 to x32
1866 if (adev
->pm
.pcie_mlw_mask
& CAIL_PCIE_LINK_WIDTH_SUPPORT_X16
)
1868 else if (adev
->pm
.pcie_mlw_mask
& CAIL_PCIE_LINK_WIDTH_SUPPORT_X12
)
1870 else if (adev
->pm
.pcie_mlw_mask
& CAIL_PCIE_LINK_WIDTH_SUPPORT_X8
)
1872 else if (adev
->pm
.pcie_mlw_mask
& CAIL_PCIE_LINK_WIDTH_SUPPORT_X4
)
1874 else if (adev
->pm
.pcie_mlw_mask
& CAIL_PCIE_LINK_WIDTH_SUPPORT_X2
)
1876 else if (adev
->pm
.pcie_mlw_mask
& CAIL_PCIE_LINK_WIDTH_SUPPORT_X1
)
1879 ret
= smu_update_pcie_parameters(smu
, pcie_gen
, pcie_width
);
1882 pr_err("[%s] Attempt to override pcie params failed!\n", __func__
);
1888 int smu_v11_0_set_default_od_settings(struct smu_context
*smu
, bool initialize
, size_t overdrive_table_size
)
1890 struct smu_table_context
*table_context
= &smu
->smu_table
;
1894 if (table_context
->overdrive_table
) {
1897 table_context
->overdrive_table
= kzalloc(overdrive_table_size
, GFP_KERNEL
);
1898 if (!table_context
->overdrive_table
) {
1901 ret
= smu_update_table(smu
, SMU_TABLE_OVERDRIVE
, 0, table_context
->overdrive_table
, false);
1903 pr_err("Failed to export overdrive table!\n");
1906 if (!table_context
->boot_overdrive_table
) {
1907 table_context
->boot_overdrive_table
= kmemdup(table_context
->overdrive_table
, overdrive_table_size
, GFP_KERNEL
);
1908 if (!table_context
->boot_overdrive_table
) {
1913 ret
= smu_update_table(smu
, SMU_TABLE_OVERDRIVE
, 0, table_context
->overdrive_table
, true);
1915 pr_err("Failed to import overdrive table!\n");
1921 int smu_v11_0_set_performance_level(struct smu_context
*smu
,
1922 enum amd_dpm_forced_level level
)
1925 uint32_t sclk_mask
, mclk_mask
, soc_mask
;
1928 case AMD_DPM_FORCED_LEVEL_HIGH
:
1929 ret
= smu_force_dpm_limit_value(smu
, true);
1931 case AMD_DPM_FORCED_LEVEL_LOW
:
1932 ret
= smu_force_dpm_limit_value(smu
, false);
1934 case AMD_DPM_FORCED_LEVEL_AUTO
:
1935 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD
:
1936 ret
= smu_unforce_dpm_levels(smu
);
1938 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK
:
1939 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK
:
1940 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK
:
1941 ret
= smu_get_profiling_clk_mask(smu
, level
,
1947 smu_force_clk_levels(smu
, SMU_SCLK
, 1 << sclk_mask
, false);
1948 smu_force_clk_levels(smu
, SMU_MCLK
, 1 << mclk_mask
, false);
1949 smu_force_clk_levels(smu
, SMU_SOCCLK
, 1 << soc_mask
, false);
1951 case AMD_DPM_FORCED_LEVEL_MANUAL
:
1952 case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT
:
1959 int smu_v11_0_set_power_source(struct smu_context
*smu
,
1960 enum smu_power_src_type power_src
)
1964 pwr_source
= smu_power_get_index(smu
, (uint32_t)power_src
);
1968 return smu_send_smc_msg_with_param(smu
,
1969 SMU_MSG_NotifyPowerSource
,