]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
e6eec55be487bcb62e2cd548fb3c5253adafcaa6
[mirror_ubuntu-hirsute-kernel.git] / drivers / gpu / drm / amd / powerplay / amdgpu_smu.c
1 /*
2 * Copyright 2019 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23 #include "pp_debug.h"
24 #include <linux/firmware.h>
25 #include <drm/drmP.h>
26 #include "amdgpu.h"
27 #include "amdgpu_smu.h"
28 #include "soc15_common.h"
29 #include "smu_v11_0.h"
30 #include "atom.h"
31 #include "amd_pcie.h"
32
33 int smu_get_smc_version(struct smu_context *smu, uint32_t *if_version, uint32_t *smu_version)
34 {
35 int ret = 0;
36
37 if (!if_version && !smu_version)
38 return -EINVAL;
39
40 if (if_version) {
41 ret = smu_send_smc_msg(smu, SMU_MSG_GetDriverIfVersion);
42 if (ret)
43 return ret;
44
45 ret = smu_read_smc_arg(smu, if_version);
46 if (ret)
47 return ret;
48 }
49
50 if (smu_version) {
51 ret = smu_send_smc_msg(smu, SMU_MSG_GetSmuVersion);
52 if (ret)
53 return ret;
54
55 ret = smu_read_smc_arg(smu, smu_version);
56 if (ret)
57 return ret;
58 }
59
60 return ret;
61 }
62
63 int smu_dpm_set_power_gate(struct smu_context *smu, uint32_t block_type,
64 bool gate)
65 {
66 int ret = 0;
67
68 switch (block_type) {
69 case AMD_IP_BLOCK_TYPE_UVD:
70 ret = smu_dpm_set_uvd_enable(smu, gate);
71 break;
72 case AMD_IP_BLOCK_TYPE_VCE:
73 ret = smu_dpm_set_vce_enable(smu, gate);
74 break;
75 default:
76 break;
77 }
78
79 return ret;
80 }
81
82 enum amd_pm_state_type smu_get_current_power_state(struct smu_context *smu)
83 {
84 /* not support power state */
85 return POWER_STATE_TYPE_DEFAULT;
86 }
87
88 int smu_get_power_num_states(struct smu_context *smu,
89 struct pp_states_info *state_info)
90 {
91 if (!state_info)
92 return -EINVAL;
93
94 /* not support power state */
95 memset(state_info, 0, sizeof(struct pp_states_info));
96 state_info->nums = 0;
97
98 return 0;
99 }
100
101 int smu_common_read_sensor(struct smu_context *smu, enum amd_pp_sensors sensor,
102 void *data, uint32_t *size)
103 {
104 int ret = 0;
105
106 switch (sensor) {
107 case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK:
108 *((uint32_t *)data) = smu->pstate_sclk;
109 *size = 4;
110 break;
111 case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK:
112 *((uint32_t *)data) = smu->pstate_mclk;
113 *size = 4;
114 break;
115 case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK:
116 ret = smu_feature_get_enabled_mask(smu, (uint32_t *)data, 2);
117 *size = 8;
118 break;
119 default:
120 ret = -EINVAL;
121 break;
122 }
123
124 if (ret)
125 *size = 0;
126
127 return ret;
128 }
129
130 int smu_update_table_with_arg(struct smu_context *smu, uint16_t table_id, uint16_t exarg,
131 void *table_data, bool drv2smu)
132 {
133 struct smu_table_context *smu_table = &smu->smu_table;
134 struct smu_table *table = NULL;
135 int ret = 0;
136 uint32_t table_index;
137
138 if (!table_data || table_id >= smu_table->table_count)
139 return -EINVAL;
140
141 table_index = (exarg << 16) | table_id;
142
143 table = &smu_table->tables[table_id];
144
145 if (drv2smu)
146 memcpy(table->cpu_addr, table_data, table->size);
147
148 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetDriverDramAddrHigh,
149 upper_32_bits(table->mc_address));
150 if (ret)
151 return ret;
152 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetDriverDramAddrLow,
153 lower_32_bits(table->mc_address));
154 if (ret)
155 return ret;
156 ret = smu_send_smc_msg_with_param(smu, drv2smu ?
157 SMU_MSG_TransferTableDram2Smu :
158 SMU_MSG_TransferTableSmu2Dram,
159 table_index);
160 if (ret)
161 return ret;
162
163 if (!drv2smu)
164 memcpy(table_data, table->cpu_addr, table->size);
165
166 return ret;
167 }
168
169 bool is_support_sw_smu(struct amdgpu_device *adev)
170 {
171 if (adev->asic_type == CHIP_VEGA20)
172 return (amdgpu_dpm == 2) ? true : false;
173 else if (adev->asic_type >= CHIP_NAVI10)
174 return true;
175 else
176 return false;
177 }
178
179 int smu_sys_get_pp_table(struct smu_context *smu, void **table)
180 {
181 struct smu_table_context *smu_table = &smu->smu_table;
182
183 if (!smu_table->power_play_table && !smu_table->hardcode_pptable)
184 return -EINVAL;
185
186 if (smu_table->hardcode_pptable)
187 *table = smu_table->hardcode_pptable;
188 else
189 *table = smu_table->power_play_table;
190
191 return smu_table->power_play_table_size;
192 }
193
194 int smu_sys_set_pp_table(struct smu_context *smu, void *buf, size_t size)
195 {
196 struct smu_table_context *smu_table = &smu->smu_table;
197 ATOM_COMMON_TABLE_HEADER *header = (ATOM_COMMON_TABLE_HEADER *)buf;
198 int ret = 0;
199
200 if (!smu->pm_enabled)
201 return -EINVAL;
202 if (header->usStructureSize != size) {
203 pr_err("pp table size not matched !\n");
204 return -EIO;
205 }
206
207 mutex_lock(&smu->mutex);
208 if (!smu_table->hardcode_pptable)
209 smu_table->hardcode_pptable = kzalloc(size, GFP_KERNEL);
210 if (!smu_table->hardcode_pptable) {
211 ret = -ENOMEM;
212 goto failed;
213 }
214
215 memcpy(smu_table->hardcode_pptable, buf, size);
216 smu_table->power_play_table = smu_table->hardcode_pptable;
217 smu_table->power_play_table_size = size;
218 mutex_unlock(&smu->mutex);
219
220 ret = smu_reset(smu);
221 if (ret)
222 pr_info("smu reset failed, ret = %d\n", ret);
223
224 return ret;
225
226 failed:
227 mutex_unlock(&smu->mutex);
228 return ret;
229 }
230
231 int smu_feature_init_dpm(struct smu_context *smu)
232 {
233 struct smu_feature *feature = &smu->smu_feature;
234 int ret = 0;
235 uint32_t allowed_feature_mask[SMU_FEATURE_MAX/32];
236
237 if (!smu->pm_enabled)
238 return ret;
239 mutex_lock(&feature->mutex);
240 bitmap_zero(feature->allowed, SMU_FEATURE_MAX);
241 mutex_unlock(&feature->mutex);
242
243 ret = smu_get_allowed_feature_mask(smu, allowed_feature_mask,
244 SMU_FEATURE_MAX/32);
245 if (ret)
246 return ret;
247
248 mutex_lock(&feature->mutex);
249 bitmap_or(feature->allowed, feature->allowed,
250 (unsigned long *)allowed_feature_mask,
251 feature->feature_num);
252 mutex_unlock(&feature->mutex);
253
254 return ret;
255 }
256
257 int smu_feature_is_enabled(struct smu_context *smu, enum smu_feature_mask mask)
258 {
259 struct smu_feature *feature = &smu->smu_feature;
260 uint32_t feature_id;
261 int ret = 0;
262
263 feature_id = smu_feature_get_index(smu, mask);
264
265 WARN_ON(feature_id > feature->feature_num);
266
267 mutex_lock(&feature->mutex);
268 ret = test_bit(feature_id, feature->enabled);
269 mutex_unlock(&feature->mutex);
270
271 return ret;
272 }
273
274 int smu_feature_set_enabled(struct smu_context *smu, enum smu_feature_mask mask,
275 bool enable)
276 {
277 struct smu_feature *feature = &smu->smu_feature;
278 uint32_t feature_id;
279 int ret = 0;
280
281 feature_id = smu_feature_get_index(smu, mask);
282
283 WARN_ON(feature_id > feature->feature_num);
284
285 mutex_lock(&feature->mutex);
286 ret = smu_feature_update_enable_state(smu, feature_id, enable);
287 if (ret)
288 goto failed;
289
290 if (enable)
291 test_and_set_bit(feature_id, feature->enabled);
292 else
293 test_and_clear_bit(feature_id, feature->enabled);
294
295 failed:
296 mutex_unlock(&feature->mutex);
297
298 return ret;
299 }
300
301 int smu_feature_is_supported(struct smu_context *smu, enum smu_feature_mask mask)
302 {
303 struct smu_feature *feature = &smu->smu_feature;
304 uint32_t feature_id;
305 int ret = 0;
306
307 feature_id = smu_feature_get_index(smu, mask);
308
309 WARN_ON(feature_id > feature->feature_num);
310
311 mutex_lock(&feature->mutex);
312 ret = test_bit(feature_id, feature->supported);
313 mutex_unlock(&feature->mutex);
314
315 return ret;
316 }
317
318 int smu_feature_set_supported(struct smu_context *smu,
319 enum smu_feature_mask mask,
320 bool enable)
321 {
322 struct smu_feature *feature = &smu->smu_feature;
323 uint32_t feature_id;
324 int ret = 0;
325
326 feature_id = smu_feature_get_index(smu, mask);
327
328 WARN_ON(feature_id > feature->feature_num);
329
330 mutex_lock(&feature->mutex);
331 if (enable)
332 test_and_set_bit(feature_id, feature->supported);
333 else
334 test_and_clear_bit(feature_id, feature->supported);
335 mutex_unlock(&feature->mutex);
336
337 return ret;
338 }
339
340 static int smu_set_funcs(struct amdgpu_device *adev)
341 {
342 struct smu_context *smu = &adev->smu;
343
344 switch (adev->asic_type) {
345 case CHIP_VEGA20:
346 case CHIP_NAVI10:
347 if (adev->pm.pp_feature & PP_OVERDRIVE_MASK)
348 smu->od_enabled = true;
349 smu_v11_0_set_smu_funcs(smu);
350 break;
351 default:
352 return -EINVAL;
353 }
354
355 return 0;
356 }
357
358 static int smu_early_init(void *handle)
359 {
360 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
361 struct smu_context *smu = &adev->smu;
362
363 smu->adev = adev;
364 smu->pm_enabled = !!amdgpu_dpm;
365 mutex_init(&smu->mutex);
366
367 return smu_set_funcs(adev);
368 }
369
370 static int smu_late_init(void *handle)
371 {
372 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
373 struct smu_context *smu = &adev->smu;
374
375 if (!smu->pm_enabled)
376 return 0;
377 mutex_lock(&smu->mutex);
378 smu_handle_task(&adev->smu,
379 smu->smu_dpm.dpm_level,
380 AMD_PP_TASK_COMPLETE_INIT);
381 mutex_unlock(&smu->mutex);
382
383 return 0;
384 }
385
386 int smu_get_atom_data_table(struct smu_context *smu, uint32_t table,
387 uint16_t *size, uint8_t *frev, uint8_t *crev,
388 uint8_t **addr)
389 {
390 struct amdgpu_device *adev = smu->adev;
391 uint16_t data_start;
392
393 if (!amdgpu_atom_parse_data_header(adev->mode_info.atom_context, table,
394 size, frev, crev, &data_start))
395 return -EINVAL;
396
397 *addr = (uint8_t *)adev->mode_info.atom_context->bios + data_start;
398
399 return 0;
400 }
401
402 static int smu_initialize_pptable(struct smu_context *smu)
403 {
404 /* TODO */
405 return 0;
406 }
407
408 static int smu_smc_table_sw_init(struct smu_context *smu)
409 {
410 int ret;
411
412 ret = smu_initialize_pptable(smu);
413 if (ret) {
414 pr_err("Failed to init smu_initialize_pptable!\n");
415 return ret;
416 }
417
418 /**
419 * Create smu_table structure, and init smc tables such as
420 * TABLE_PPTABLE, TABLE_WATERMARKS, TABLE_SMU_METRICS, and etc.
421 */
422 ret = smu_init_smc_tables(smu);
423 if (ret) {
424 pr_err("Failed to init smc tables!\n");
425 return ret;
426 }
427
428 /**
429 * Create smu_power_context structure, and allocate smu_dpm_context and
430 * context size to fill the smu_power_context data.
431 */
432 ret = smu_init_power(smu);
433 if (ret) {
434 pr_err("Failed to init smu_init_power!\n");
435 return ret;
436 }
437
438 return 0;
439 }
440
441 static int smu_smc_table_sw_fini(struct smu_context *smu)
442 {
443 int ret;
444
445 ret = smu_fini_smc_tables(smu);
446 if (ret) {
447 pr_err("Failed to smu_fini_smc_tables!\n");
448 return ret;
449 }
450
451 return 0;
452 }
453
454 static int smu_sw_init(void *handle)
455 {
456 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
457 struct smu_context *smu = &adev->smu;
458 int ret;
459
460 smu->pool_size = adev->pm.smu_prv_buffer_size;
461 smu->smu_feature.feature_num = SMU_FEATURE_MAX;
462 mutex_init(&smu->smu_feature.mutex);
463 bitmap_zero(smu->smu_feature.supported, SMU_FEATURE_MAX);
464 bitmap_zero(smu->smu_feature.enabled, SMU_FEATURE_MAX);
465 bitmap_zero(smu->smu_feature.allowed, SMU_FEATURE_MAX);
466 smu->watermarks_bitmap = 0;
467 smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
468 smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
469
470 smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
471 smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0;
472 smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1;
473 smu->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2;
474 smu->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 3;
475 smu->workload_prority[PP_SMC_POWER_PROFILE_VR] = 4;
476 smu->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5;
477 smu->workload_prority[PP_SMC_POWER_PROFILE_CUSTOM] = 6;
478
479 smu->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
480 smu->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
481 smu->workload_setting[2] = PP_SMC_POWER_PROFILE_POWERSAVING;
482 smu->workload_setting[3] = PP_SMC_POWER_PROFILE_VIDEO;
483 smu->workload_setting[4] = PP_SMC_POWER_PROFILE_VR;
484 smu->workload_setting[5] = PP_SMC_POWER_PROFILE_COMPUTE;
485 smu->workload_setting[6] = PP_SMC_POWER_PROFILE_CUSTOM;
486 smu->display_config = &adev->pm.pm_display_cfg;
487
488 smu->smu_dpm.dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
489 smu->smu_dpm.requested_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
490 ret = smu_init_microcode(smu);
491 if (ret) {
492 pr_err("Failed to load smu firmware!\n");
493 return ret;
494 }
495
496 ret = smu_smc_table_sw_init(smu);
497 if (ret) {
498 pr_err("Failed to sw init smc table!\n");
499 return ret;
500 }
501
502 return 0;
503 }
504
505 static int smu_sw_fini(void *handle)
506 {
507 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
508 struct smu_context *smu = &adev->smu;
509 int ret;
510
511 ret = smu_smc_table_sw_fini(smu);
512 if (ret) {
513 pr_err("Failed to sw fini smc table!\n");
514 return ret;
515 }
516
517 ret = smu_fini_power(smu);
518 if (ret) {
519 pr_err("Failed to init smu_fini_power!\n");
520 return ret;
521 }
522
523 return 0;
524 }
525
526 static int smu_init_fb_allocations(struct smu_context *smu)
527 {
528 struct amdgpu_device *adev = smu->adev;
529 struct smu_table_context *smu_table = &smu->smu_table;
530 struct smu_table *tables = smu_table->tables;
531 uint32_t table_count = smu_table->table_count;
532 uint32_t i = 0;
533 int32_t ret = 0;
534
535 if (table_count <= 0)
536 return -EINVAL;
537
538 for (i = 0 ; i < table_count; i++) {
539 if (tables[i].size == 0)
540 continue;
541 ret = amdgpu_bo_create_kernel(adev,
542 tables[i].size,
543 tables[i].align,
544 tables[i].domain,
545 &tables[i].bo,
546 &tables[i].mc_address,
547 &tables[i].cpu_addr);
548 if (ret)
549 goto failed;
550 }
551
552 return 0;
553 failed:
554 for (; i > 0; i--) {
555 if (tables[i].size == 0)
556 continue;
557 amdgpu_bo_free_kernel(&tables[i].bo,
558 &tables[i].mc_address,
559 &tables[i].cpu_addr);
560
561 }
562 return ret;
563 }
564
565 static int smu_fini_fb_allocations(struct smu_context *smu)
566 {
567 struct smu_table_context *smu_table = &smu->smu_table;
568 struct smu_table *tables = smu_table->tables;
569 uint32_t table_count = smu_table->table_count;
570 uint32_t i = 0;
571
572 if (table_count == 0 || tables == NULL)
573 return 0;
574
575 for (i = 0 ; i < table_count; i++) {
576 if (tables[i].size == 0)
577 continue;
578 amdgpu_bo_free_kernel(&tables[i].bo,
579 &tables[i].mc_address,
580 &tables[i].cpu_addr);
581 }
582
583 return 0;
584 }
585
586 static int smu_override_pcie_parameters(struct smu_context *smu)
587 {
588 struct amdgpu_device *adev = smu->adev;
589 uint32_t pcie_gen = 0, pcie_width = 0, smu_pcie_arg;
590 int ret;
591
592 if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4)
593 pcie_gen = 3;
594 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
595 pcie_gen = 2;
596 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
597 pcie_gen = 1;
598 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1)
599 pcie_gen = 0;
600
601 /* Bit 31:16: LCLK DPM level. 0 is DPM0, and 1 is DPM1
602 * Bit 15:8: PCIE GEN, 0 to 3 corresponds to GEN1 to GEN4
603 * Bit 7:0: PCIE lane width, 1 to 7 corresponds is x1 to x32
604 */
605 if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16)
606 pcie_width = 6;
607 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12)
608 pcie_width = 5;
609 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8)
610 pcie_width = 4;
611 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4)
612 pcie_width = 3;
613 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2)
614 pcie_width = 2;
615 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1)
616 pcie_width = 1;
617
618 smu_pcie_arg = (1 << 16) | (pcie_gen << 8) | pcie_width;
619 ret = smu_send_smc_msg_with_param(smu,
620 SMU_MSG_OverridePcieParameters,
621 smu_pcie_arg);
622 if (ret)
623 pr_err("[%s] Attempt to override pcie params failed!\n", __func__);
624 return ret;
625 }
626
627 static int smu_smc_table_hw_init(struct smu_context *smu,
628 bool initialize)
629 {
630 struct amdgpu_device *adev = smu->adev;
631 int ret;
632
633 if (smu_is_dpm_running(smu) && adev->in_suspend) {
634 pr_info("dpm has been enabled\n");
635 return 0;
636 }
637
638 ret = smu_init_display(smu);
639 if (ret)
640 return ret;
641
642 if (initialize) {
643 /* get boot_values from vbios to set revision, gfxclk, and etc. */
644 ret = smu_get_vbios_bootup_values(smu);
645 if (ret)
646 return ret;
647
648 ret = smu_setup_pptable(smu);
649 if (ret)
650 return ret;
651
652 /*
653 * check if the format_revision in vbios is up to pptable header
654 * version, and the structure size is not 0.
655 */
656 ret = smu_check_pptable(smu);
657 if (ret)
658 return ret;
659
660 /*
661 * allocate vram bos to store smc table contents.
662 */
663 ret = smu_init_fb_allocations(smu);
664 if (ret)
665 return ret;
666
667 /*
668 * Parse pptable format and fill PPTable_t smc_pptable to
669 * smu_table_context structure. And read the smc_dpm_table from vbios,
670 * then fill it into smc_pptable.
671 */
672 ret = smu_parse_pptable(smu);
673 if (ret)
674 return ret;
675
676 /*
677 * Send msg GetDriverIfVersion to check if the return value is equal
678 * with DRIVER_IF_VERSION of smc header.
679 */
680 ret = smu_check_fw_version(smu);
681 if (ret)
682 return ret;
683 }
684
685 /*
686 * Copy pptable bo in the vram to smc with SMU MSGs such as
687 * SetDriverDramAddr and TransferTableDram2Smu.
688 */
689 ret = smu_write_pptable(smu);
690 if (ret)
691 return ret;
692
693 /* issue RunAfllBtc msg */
694 ret = smu_run_afll_btc(smu);
695 if (ret)
696 return ret;
697
698 ret = smu_feature_set_allowed_mask(smu);
699 if (ret)
700 return ret;
701
702 ret = smu_system_features_control(smu, true);
703 if (ret)
704 return ret;
705
706 ret = smu_override_pcie_parameters(smu);
707 if (ret)
708 return ret;
709
710 ret = smu_notify_display_change(smu);
711 if (ret)
712 return ret;
713
714 /*
715 * Set min deep sleep dce fclk with bootup value from vbios via
716 * SetMinDeepSleepDcefclk MSG.
717 */
718 ret = smu_set_min_dcef_deep_sleep(smu);
719 if (ret)
720 return ret;
721
722 /*
723 * Set initialized values (get from vbios) to dpm tables context such as
724 * gfxclk, memclk, dcefclk, and etc. And enable the DPM feature for each
725 * type of clks.
726 */
727 if (initialize) {
728 ret = smu_populate_smc_pptable(smu);
729 if (ret)
730 return ret;
731
732 ret = smu_init_max_sustainable_clocks(smu);
733 if (ret)
734 return ret;
735 }
736
737 ret = smu_set_od8_default_settings(smu, initialize);
738 if (ret)
739 return ret;
740
741 if (initialize) {
742 ret = smu_populate_umd_state_clk(smu);
743 if (ret)
744 return ret;
745
746 ret = smu_get_power_limit(smu, &smu->default_power_limit, false);
747 if (ret)
748 return ret;
749 }
750
751 /*
752 * Set PMSTATUSLOG table bo address with SetToolsDramAddr MSG for tools.
753 */
754 ret = smu_set_tool_table_location(smu);
755
756 if (!smu_is_dpm_running(smu))
757 pr_info("dpm has been disabled\n");
758
759 return ret;
760 }
761
762 /**
763 * smu_alloc_memory_pool - allocate memory pool in the system memory
764 *
765 * @smu: amdgpu_device pointer
766 *
767 * This memory pool will be used for SMC use and msg SetSystemVirtualDramAddr
768 * and DramLogSetDramAddr can notify it changed.
769 *
770 * Returns 0 on success, error on failure.
771 */
772 static int smu_alloc_memory_pool(struct smu_context *smu)
773 {
774 struct amdgpu_device *adev = smu->adev;
775 struct smu_table_context *smu_table = &smu->smu_table;
776 struct smu_table *memory_pool = &smu_table->memory_pool;
777 uint64_t pool_size = smu->pool_size;
778 int ret = 0;
779
780 if (pool_size == SMU_MEMORY_POOL_SIZE_ZERO)
781 return ret;
782
783 memory_pool->size = pool_size;
784 memory_pool->align = PAGE_SIZE;
785 memory_pool->domain = AMDGPU_GEM_DOMAIN_GTT;
786
787 switch (pool_size) {
788 case SMU_MEMORY_POOL_SIZE_256_MB:
789 case SMU_MEMORY_POOL_SIZE_512_MB:
790 case SMU_MEMORY_POOL_SIZE_1_GB:
791 case SMU_MEMORY_POOL_SIZE_2_GB:
792 ret = amdgpu_bo_create_kernel(adev,
793 memory_pool->size,
794 memory_pool->align,
795 memory_pool->domain,
796 &memory_pool->bo,
797 &memory_pool->mc_address,
798 &memory_pool->cpu_addr);
799 break;
800 default:
801 break;
802 }
803
804 return ret;
805 }
806
807 static int smu_free_memory_pool(struct smu_context *smu)
808 {
809 struct smu_table_context *smu_table = &smu->smu_table;
810 struct smu_table *memory_pool = &smu_table->memory_pool;
811 int ret = 0;
812
813 if (memory_pool->size == SMU_MEMORY_POOL_SIZE_ZERO)
814 return ret;
815
816 amdgpu_bo_free_kernel(&memory_pool->bo,
817 &memory_pool->mc_address,
818 &memory_pool->cpu_addr);
819
820 memset(memory_pool, 0, sizeof(struct smu_table));
821
822 return ret;
823 }
824
825 static int smu_hw_init(void *handle)
826 {
827 int ret;
828 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
829 struct smu_context *smu = &adev->smu;
830
831 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
832 ret = smu_check_fw_status(smu);
833 if (ret) {
834 pr_err("SMC firmware status is not correct\n");
835 return ret;
836 }
837 }
838
839 mutex_lock(&smu->mutex);
840
841 ret = smu_feature_init_dpm(smu);
842 if (ret)
843 goto failed;
844
845 ret = smu_smc_table_hw_init(smu, true);
846 if (ret)
847 goto failed;
848
849 ret = smu_alloc_memory_pool(smu);
850 if (ret)
851 goto failed;
852
853 /*
854 * Use msg SetSystemVirtualDramAddr and DramLogSetDramAddr can notify
855 * pool location.
856 */
857 ret = smu_notify_memory_pool_location(smu);
858 if (ret)
859 goto failed;
860
861 ret = smu_start_thermal_control(smu);
862 if (ret)
863 goto failed;
864
865 mutex_unlock(&smu->mutex);
866
867 if (!smu->pm_enabled)
868 adev->pm.dpm_enabled = false;
869 else
870 adev->pm.dpm_enabled = true;
871
872 pr_info("SMU is initialized successfully!\n");
873
874 return 0;
875
876 failed:
877 mutex_unlock(&smu->mutex);
878 return ret;
879 }
880
881 static int smu_hw_fini(void *handle)
882 {
883 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
884 struct smu_context *smu = &adev->smu;
885 struct smu_table_context *table_context = &smu->smu_table;
886 int ret = 0;
887
888 kfree(table_context->driver_pptable);
889 table_context->driver_pptable = NULL;
890
891 kfree(table_context->max_sustainable_clocks);
892 table_context->max_sustainable_clocks = NULL;
893
894 kfree(table_context->od_feature_capabilities);
895 table_context->od_feature_capabilities = NULL;
896
897 kfree(table_context->od_settings_max);
898 table_context->od_settings_max = NULL;
899
900 kfree(table_context->od_settings_min);
901 table_context->od_settings_min = NULL;
902
903 kfree(table_context->overdrive_table);
904 table_context->overdrive_table = NULL;
905
906 kfree(table_context->od8_settings);
907 table_context->od8_settings = NULL;
908
909 ret = smu_fini_fb_allocations(smu);
910 if (ret)
911 return ret;
912
913 ret = smu_free_memory_pool(smu);
914 if (ret)
915 return ret;
916
917 return 0;
918 }
919
920 int smu_reset(struct smu_context *smu)
921 {
922 struct amdgpu_device *adev = smu->adev;
923 int ret = 0;
924
925 ret = smu_hw_fini(adev);
926 if (ret)
927 return ret;
928
929 ret = smu_hw_init(adev);
930 if (ret)
931 return ret;
932
933 return ret;
934 }
935
936 static int smu_suspend(void *handle)
937 {
938 int ret;
939 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
940 struct smu_context *smu = &adev->smu;
941
942 ret = smu_system_features_control(smu, false);
943 if (ret)
944 return ret;
945
946 smu->watermarks_bitmap &= ~(WATERMARKS_LOADED);
947
948 return 0;
949 }
950
951 static int smu_resume(void *handle)
952 {
953 int ret;
954 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
955 struct smu_context *smu = &adev->smu;
956
957 pr_info("SMU is resuming...\n");
958
959 mutex_lock(&smu->mutex);
960
961 ret = smu_smc_table_hw_init(smu, false);
962 if (ret)
963 goto failed;
964
965 ret = smu_start_thermal_control(smu);
966 if (ret)
967 goto failed;
968
969 mutex_unlock(&smu->mutex);
970
971 pr_info("SMU is resumed successfully!\n");
972
973 return 0;
974 failed:
975 mutex_unlock(&smu->mutex);
976 return ret;
977 }
978
979 int smu_display_configuration_change(struct smu_context *smu,
980 const struct amd_pp_display_configuration *display_config)
981 {
982 int index = 0;
983 int num_of_active_display = 0;
984
985 if (!smu->pm_enabled || !is_support_sw_smu(smu->adev))
986 return -EINVAL;
987
988 if (!display_config)
989 return -EINVAL;
990
991 mutex_lock(&smu->mutex);
992
993 smu_set_deep_sleep_dcefclk(smu,
994 display_config->min_dcef_deep_sleep_set_clk / 100);
995
996 for (index = 0; index < display_config->num_path_including_non_display; index++) {
997 if (display_config->displays[index].controller_id != 0)
998 num_of_active_display++;
999 }
1000
1001 smu_set_active_display_count(smu, num_of_active_display);
1002
1003 smu_store_cc6_data(smu, display_config->cpu_pstate_separation_time,
1004 display_config->cpu_cc6_disable,
1005 display_config->cpu_pstate_disable,
1006 display_config->nb_pstate_switch_disable);
1007
1008 mutex_unlock(&smu->mutex);
1009
1010 return 0;
1011 }
1012
1013 static int smu_get_clock_info(struct smu_context *smu,
1014 struct smu_clock_info *clk_info,
1015 enum smu_perf_level_designation designation)
1016 {
1017 int ret;
1018 struct smu_performance_level level = {0};
1019
1020 if (!clk_info)
1021 return -EINVAL;
1022
1023 ret = smu_get_perf_level(smu, PERF_LEVEL_ACTIVITY, &level);
1024 if (ret)
1025 return -EINVAL;
1026
1027 clk_info->min_mem_clk = level.memory_clock;
1028 clk_info->min_eng_clk = level.core_clock;
1029 clk_info->min_bus_bandwidth = level.non_local_mem_freq * level.non_local_mem_width;
1030
1031 ret = smu_get_perf_level(smu, designation, &level);
1032 if (ret)
1033 return -EINVAL;
1034
1035 clk_info->min_mem_clk = level.memory_clock;
1036 clk_info->min_eng_clk = level.core_clock;
1037 clk_info->min_bus_bandwidth = level.non_local_mem_freq * level.non_local_mem_width;
1038
1039 return 0;
1040 }
1041
1042 int smu_get_current_clocks(struct smu_context *smu,
1043 struct amd_pp_clock_info *clocks)
1044 {
1045 struct amd_pp_simple_clock_info simple_clocks = {0};
1046 struct smu_clock_info hw_clocks;
1047 int ret = 0;
1048
1049 if (!is_support_sw_smu(smu->adev))
1050 return -EINVAL;
1051
1052 mutex_lock(&smu->mutex);
1053
1054 smu_get_dal_power_level(smu, &simple_clocks);
1055
1056 if (smu->support_power_containment)
1057 ret = smu_get_clock_info(smu, &hw_clocks,
1058 PERF_LEVEL_POWER_CONTAINMENT);
1059 else
1060 ret = smu_get_clock_info(smu, &hw_clocks, PERF_LEVEL_ACTIVITY);
1061
1062 if (ret) {
1063 pr_err("Error in smu_get_clock_info\n");
1064 goto failed;
1065 }
1066
1067 clocks->min_engine_clock = hw_clocks.min_eng_clk;
1068 clocks->max_engine_clock = hw_clocks.max_eng_clk;
1069 clocks->min_memory_clock = hw_clocks.min_mem_clk;
1070 clocks->max_memory_clock = hw_clocks.max_mem_clk;
1071 clocks->min_bus_bandwidth = hw_clocks.min_bus_bandwidth;
1072 clocks->max_bus_bandwidth = hw_clocks.max_bus_bandwidth;
1073 clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1074 clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1075
1076 if (simple_clocks.level == 0)
1077 clocks->max_clocks_state = PP_DAL_POWERLEVEL_7;
1078 else
1079 clocks->max_clocks_state = simple_clocks.level;
1080
1081 if (!smu_get_current_shallow_sleep_clocks(smu, &hw_clocks)) {
1082 clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1083 clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1084 }
1085
1086 failed:
1087 mutex_unlock(&smu->mutex);
1088 return ret;
1089 }
1090
1091 static int smu_set_clockgating_state(void *handle,
1092 enum amd_clockgating_state state)
1093 {
1094 return 0;
1095 }
1096
1097 static int smu_set_powergating_state(void *handle,
1098 enum amd_powergating_state state)
1099 {
1100 return 0;
1101 }
1102
1103 static int smu_enable_umd_pstate(void *handle,
1104 enum amd_dpm_forced_level *level)
1105 {
1106 uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
1107 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
1108 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
1109 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
1110
1111 struct smu_context *smu = (struct smu_context*)(handle);
1112 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1113 if (!smu->pm_enabled || !smu_dpm_ctx->dpm_context)
1114 return -EINVAL;
1115
1116 if (!(smu_dpm_ctx->dpm_level & profile_mode_mask)) {
1117 /* enter umd pstate, save current level, disable gfx cg*/
1118 if (*level & profile_mode_mask) {
1119 smu_dpm_ctx->saved_dpm_level = smu_dpm_ctx->dpm_level;
1120 smu_dpm_ctx->enable_umd_pstate = true;
1121 amdgpu_device_ip_set_clockgating_state(smu->adev,
1122 AMD_IP_BLOCK_TYPE_GFX,
1123 AMD_CG_STATE_UNGATE);
1124 amdgpu_device_ip_set_powergating_state(smu->adev,
1125 AMD_IP_BLOCK_TYPE_GFX,
1126 AMD_PG_STATE_UNGATE);
1127 }
1128 } else {
1129 /* exit umd pstate, restore level, enable gfx cg*/
1130 if (!(*level & profile_mode_mask)) {
1131 if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
1132 *level = smu_dpm_ctx->saved_dpm_level;
1133 smu_dpm_ctx->enable_umd_pstate = false;
1134 amdgpu_device_ip_set_clockgating_state(smu->adev,
1135 AMD_IP_BLOCK_TYPE_GFX,
1136 AMD_CG_STATE_GATE);
1137 amdgpu_device_ip_set_powergating_state(smu->adev,
1138 AMD_IP_BLOCK_TYPE_GFX,
1139 AMD_PG_STATE_GATE);
1140 }
1141 }
1142
1143 return 0;
1144 }
1145
1146 int smu_adjust_power_state_dynamic(struct smu_context *smu,
1147 enum amd_dpm_forced_level level,
1148 bool skip_display_settings)
1149 {
1150 int ret = 0;
1151 int index = 0;
1152 uint32_t sclk_mask, mclk_mask, soc_mask;
1153 long workload;
1154 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1155
1156 if (!smu->pm_enabled)
1157 return -EINVAL;
1158 if (!skip_display_settings) {
1159 ret = smu_display_config_changed(smu);
1160 if (ret) {
1161 pr_err("Failed to change display config!");
1162 return ret;
1163 }
1164 }
1165
1166 if (!smu->pm_enabled)
1167 return -EINVAL;
1168 ret = smu_apply_clocks_adjust_rules(smu);
1169 if (ret) {
1170 pr_err("Failed to apply clocks adjust rules!");
1171 return ret;
1172 }
1173
1174 if (!skip_display_settings) {
1175 ret = smu_notify_smc_dispaly_config(smu);
1176 if (ret) {
1177 pr_err("Failed to notify smc display config!");
1178 return ret;
1179 }
1180 }
1181
1182 if (smu_dpm_ctx->dpm_level != level) {
1183 switch (level) {
1184 case AMD_DPM_FORCED_LEVEL_HIGH:
1185 ret = smu_force_dpm_limit_value(smu, true);
1186 break;
1187 case AMD_DPM_FORCED_LEVEL_LOW:
1188 ret = smu_force_dpm_limit_value(smu, false);
1189 break;
1190
1191 case AMD_DPM_FORCED_LEVEL_AUTO:
1192 ret = smu_unforce_dpm_levels(smu);
1193 break;
1194
1195 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
1196 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
1197 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
1198 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
1199 ret = smu_get_profiling_clk_mask(smu, level,
1200 &sclk_mask,
1201 &mclk_mask,
1202 &soc_mask);
1203 if (ret)
1204 return ret;
1205 smu_force_clk_levels(smu, PP_SCLK, 1 << sclk_mask);
1206 smu_force_clk_levels(smu, PP_MCLK, 1 << mclk_mask);
1207 break;
1208
1209 case AMD_DPM_FORCED_LEVEL_MANUAL:
1210 case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
1211 default:
1212 break;
1213 }
1214
1215 if (!ret)
1216 smu_dpm_ctx->dpm_level = level;
1217 }
1218
1219 if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
1220 index = fls(smu->workload_mask);
1221 index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
1222 workload = smu->workload_setting[index];
1223
1224 if (smu->power_profile_mode != workload)
1225 smu_set_power_profile_mode(smu, &workload, 0);
1226 }
1227
1228 return ret;
1229 }
1230
1231 int smu_handle_task(struct smu_context *smu,
1232 enum amd_dpm_forced_level level,
1233 enum amd_pp_task task_id)
1234 {
1235 int ret = 0;
1236
1237 switch (task_id) {
1238 case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE:
1239 ret = smu_pre_display_config_changed(smu);
1240 if (ret)
1241 return ret;
1242 ret = smu_set_cpu_power_state(smu);
1243 if (ret)
1244 return ret;
1245 ret = smu_adjust_power_state_dynamic(smu, level, false);
1246 break;
1247 case AMD_PP_TASK_COMPLETE_INIT:
1248 case AMD_PP_TASK_READJUST_POWER_STATE:
1249 ret = smu_adjust_power_state_dynamic(smu, level, true);
1250 break;
1251 default:
1252 break;
1253 }
1254
1255 return ret;
1256 }
1257
1258 const struct amd_ip_funcs smu_ip_funcs = {
1259 .name = "smu",
1260 .early_init = smu_early_init,
1261 .late_init = smu_late_init,
1262 .sw_init = smu_sw_init,
1263 .sw_fini = smu_sw_fini,
1264 .hw_init = smu_hw_init,
1265 .hw_fini = smu_hw_fini,
1266 .suspend = smu_suspend,
1267 .resume = smu_resume,
1268 .is_idle = NULL,
1269 .check_soft_reset = NULL,
1270 .wait_for_idle = NULL,
1271 .soft_reset = NULL,
1272 .set_clockgating_state = smu_set_clockgating_state,
1273 .set_powergating_state = smu_set_powergating_state,
1274 .enable_umd_pstate = smu_enable_umd_pstate,
1275 };
1276
1277 const struct amdgpu_ip_block_version smu_v11_0_ip_block =
1278 {
1279 .type = AMD_IP_BLOCK_TYPE_SMC,
1280 .major = 11,
1281 .minor = 0,
1282 .rev = 0,
1283 .funcs = &smu_ip_funcs,
1284 };