]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/gpu/drm/amd/powerplay/smumgr/ci_smc.c
drm/amdkfd: Improve multiple SDMA queues support per process
[mirror_ubuntu-bionic-kernel.git] / drivers / gpu / drm / amd / powerplay / smumgr / ci_smc.c
1 /*
2 * Copyright 2017 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23 #include <linux/module.h>
24 #include <linux/slab.h>
25 #include <linux/fb.h>
26 #include "linux/delay.h"
27 #include <linux/types.h>
28
29 #include "smumgr.h"
30 #include "pp_debug.h"
31 #include "ci_smumgr.h"
32 #include "ppsmc.h"
33 #include "smu7_hwmgr.h"
34 #include "hardwaremanager.h"
35 #include "ppatomctrl.h"
36 #include "cgs_common.h"
37 #include "atombios.h"
38 #include "pppcielanes.h"
39
40 #include "smu/smu_7_0_1_d.h"
41 #include "smu/smu_7_0_1_sh_mask.h"
42
43 #include "dce/dce_8_0_d.h"
44 #include "dce/dce_8_0_sh_mask.h"
45
46 #include "bif/bif_4_1_d.h"
47 #include "bif/bif_4_1_sh_mask.h"
48
49 #include "gca/gfx_7_2_d.h"
50 #include "gca/gfx_7_2_sh_mask.h"
51
52 #include "gmc/gmc_7_1_d.h"
53 #include "gmc/gmc_7_1_sh_mask.h"
54
55 #include "processpptables.h"
56
57 #define MC_CG_ARB_FREQ_F0 0x0a
58 #define MC_CG_ARB_FREQ_F1 0x0b
59 #define MC_CG_ARB_FREQ_F2 0x0c
60 #define MC_CG_ARB_FREQ_F3 0x0d
61
62 #define SMC_RAM_END 0x40000
63
64 #define VOLTAGE_SCALE 4
65 #define VOLTAGE_VID_OFFSET_SCALE1 625
66 #define VOLTAGE_VID_OFFSET_SCALE2 100
67 #define CISLAND_MINIMUM_ENGINE_CLOCK 800
68 #define CISLAND_MAX_DEEPSLEEP_DIVIDER_ID 5
69
70 static const struct ci_pt_defaults defaults_hawaii_xt = {
71 1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0xB0000,
72 { 0x2E, 0x00, 0x00, 0x88, 0x00, 0x00, 0x72, 0x60, 0x51, 0xA7, 0x79, 0x6B, 0x90, 0xBD, 0x79 },
73 { 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 }
74 };
75
76 static const struct ci_pt_defaults defaults_hawaii_pro = {
77 1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0x65062,
78 { 0x2E, 0x00, 0x00, 0x88, 0x00, 0x00, 0x72, 0x60, 0x51, 0xA7, 0x79, 0x6B, 0x90, 0xBD, 0x79 },
79 { 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 }
80 };
81
82 static const struct ci_pt_defaults defaults_bonaire_xt = {
83 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
84 { 0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8, 0xC9, 0xC9, 0x2F, 0x4D, 0x61 },
85 { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 }
86 };
87
88
89 static const struct ci_pt_defaults defaults_saturn_xt = {
90 1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x70000,
91 { 0x8C, 0x247, 0x249, 0xA6, 0x80, 0x81, 0x8B, 0x89, 0x86, 0xC9, 0xCA, 0xC9, 0x4D, 0x4D, 0x4D },
92 { 0x187, 0x187, 0x187, 0x1C7, 0x1C7, 0x1C7, 0x210, 0x210, 0x210, 0x266, 0x266, 0x266, 0x2C9, 0x2C9, 0x2C9 }
93 };
94
95
96 static int ci_set_smc_sram_address(struct pp_hwmgr *hwmgr,
97 uint32_t smc_addr, uint32_t limit)
98 {
99 if ((0 != (3 & smc_addr))
100 || ((smc_addr + 3) >= limit)) {
101 pr_err("smc_addr invalid \n");
102 return -EINVAL;
103 }
104
105 cgs_write_register(hwmgr->device, mmSMC_IND_INDEX_0, smc_addr);
106 PHM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
107 return 0;
108 }
109
110 static int ci_copy_bytes_to_smc(struct pp_hwmgr *hwmgr, uint32_t smc_start_address,
111 const uint8_t *src, uint32_t byte_count, uint32_t limit)
112 {
113 int result;
114 uint32_t data = 0;
115 uint32_t original_data;
116 uint32_t addr = 0;
117 uint32_t extra_shift;
118
119 if ((3 & smc_start_address)
120 || ((smc_start_address + byte_count) >= limit)) {
121 pr_err("smc_start_address invalid \n");
122 return -EINVAL;
123 }
124
125 addr = smc_start_address;
126
127 while (byte_count >= 4) {
128 /* Bytes are written into the SMC address space with the MSB first. */
129 data = src[0] * 0x1000000 + src[1] * 0x10000 + src[2] * 0x100 + src[3];
130
131 result = ci_set_smc_sram_address(hwmgr, addr, limit);
132
133 if (0 != result)
134 return result;
135
136 cgs_write_register(hwmgr->device, mmSMC_IND_DATA_0, data);
137
138 src += 4;
139 byte_count -= 4;
140 addr += 4;
141 }
142
143 if (0 != byte_count) {
144
145 data = 0;
146
147 result = ci_set_smc_sram_address(hwmgr, addr, limit);
148
149 if (0 != result)
150 return result;
151
152
153 original_data = cgs_read_register(hwmgr->device, mmSMC_IND_DATA_0);
154
155 extra_shift = 8 * (4 - byte_count);
156
157 while (byte_count > 0) {
158 /* Bytes are written into the SMC addres space with the MSB first. */
159 data = (0x100 * data) + *src++;
160 byte_count--;
161 }
162
163 data <<= extra_shift;
164
165 data |= (original_data & ~((~0UL) << extra_shift));
166
167 result = ci_set_smc_sram_address(hwmgr, addr, limit);
168
169 if (0 != result)
170 return result;
171
172 cgs_write_register(hwmgr->device, mmSMC_IND_DATA_0, data);
173 }
174
175 return 0;
176 }
177
178
179 static int ci_program_jump_on_start(struct pp_hwmgr *hwmgr)
180 {
181 static const unsigned char data[4] = { 0xE0, 0x00, 0x80, 0x40 };
182
183 ci_copy_bytes_to_smc(hwmgr, 0x0, data, 4, sizeof(data)+1);
184
185 return 0;
186 }
187
188 bool ci_is_smc_ram_running(struct pp_hwmgr *hwmgr)
189 {
190 return ((0 == PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,
191 CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable))
192 && (0x20100 <= cgs_read_ind_register(hwmgr->device,
193 CGS_IND_REG__SMC, ixSMC_PC_C)));
194 }
195
196 static int ci_read_smc_sram_dword(struct pp_hwmgr *hwmgr, uint32_t smc_addr,
197 uint32_t *value, uint32_t limit)
198 {
199 int result;
200
201 result = ci_set_smc_sram_address(hwmgr, smc_addr, limit);
202
203 if (result)
204 return result;
205
206 *value = cgs_read_register(hwmgr->device, mmSMC_IND_DATA_0);
207 return 0;
208 }
209
210 static int ci_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
211 {
212 int ret;
213
214 if (!ci_is_smc_ram_running(hwmgr))
215 return -EINVAL;
216
217 cgs_write_register(hwmgr->device, mmSMC_MESSAGE_0, msg);
218
219 PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0);
220
221 ret = PHM_READ_FIELD(hwmgr->device, SMC_RESP_0, SMC_RESP);
222
223 if (ret != 1)
224 pr_info("\n failed to send message %x ret is %d\n", msg, ret);
225
226 return 0;
227 }
228
229 static int ci_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
230 uint16_t msg, uint32_t parameter)
231 {
232 cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, parameter);
233 return ci_send_msg_to_smc(hwmgr, msg);
234 }
235
236 static void ci_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
237 {
238 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
239 struct cgs_system_info sys_info = {0};
240 uint32_t dev_id;
241
242 sys_info.size = sizeof(struct cgs_system_info);
243 sys_info.info_id = CGS_SYSTEM_INFO_PCIE_DEV;
244 cgs_query_system_info(hwmgr->device, &sys_info);
245 dev_id = (uint32_t)sys_info.value;
246
247 switch (dev_id) {
248 case 0x67BA:
249 case 0x66B1:
250 smu_data->power_tune_defaults = &defaults_hawaii_pro;
251 break;
252 case 0x67B8:
253 case 0x66B0:
254 smu_data->power_tune_defaults = &defaults_hawaii_xt;
255 break;
256 case 0x6640:
257 case 0x6641:
258 case 0x6646:
259 case 0x6647:
260 smu_data->power_tune_defaults = &defaults_saturn_xt;
261 break;
262 case 0x6649:
263 case 0x6650:
264 case 0x6651:
265 case 0x6658:
266 case 0x665C:
267 case 0x665D:
268 case 0x67A0:
269 case 0x67A1:
270 case 0x67A2:
271 case 0x67A8:
272 case 0x67A9:
273 case 0x67AA:
274 case 0x67B9:
275 case 0x67BE:
276 default:
277 smu_data->power_tune_defaults = &defaults_bonaire_xt;
278 break;
279 }
280 }
281
282 static int ci_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
283 struct phm_clock_voltage_dependency_table *allowed_clock_voltage_table,
284 uint32_t clock, uint32_t *vol)
285 {
286 uint32_t i = 0;
287
288 if (allowed_clock_voltage_table->count == 0)
289 return -EINVAL;
290
291 for (i = 0; i < allowed_clock_voltage_table->count; i++) {
292 if (allowed_clock_voltage_table->entries[i].clk >= clock) {
293 *vol = allowed_clock_voltage_table->entries[i].v;
294 return 0;
295 }
296 }
297
298 *vol = allowed_clock_voltage_table->entries[i - 1].v;
299 return 0;
300 }
301
302 static int ci_calculate_sclk_params(struct pp_hwmgr *hwmgr,
303 uint32_t clock, struct SMU7_Discrete_GraphicsLevel *sclk)
304 {
305 const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
306 struct pp_atomctrl_clock_dividers_vi dividers;
307 uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL;
308 uint32_t spll_func_cntl_3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
309 uint32_t spll_func_cntl_4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
310 uint32_t cg_spll_spread_spectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
311 uint32_t cg_spll_spread_spectrum_2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
312 uint32_t ref_clock;
313 uint32_t ref_divider;
314 uint32_t fbdiv;
315 int result;
316
317 /* get the engine clock dividers for this clock value */
318 result = atomctrl_get_engine_pll_dividers_vi(hwmgr, clock, &dividers);
319
320 PP_ASSERT_WITH_CODE(result == 0,
321 "Error retrieving Engine Clock dividers from VBIOS.",
322 return result);
323
324 /* To get FBDIV we need to multiply this by 16384 and divide it by Fref. */
325 ref_clock = atomctrl_get_reference_clock(hwmgr);
326 ref_divider = 1 + dividers.uc_pll_ref_div;
327
328 /* low 14 bits is fraction and high 12 bits is divider */
329 fbdiv = dividers.ul_fb_div.ul_fb_divider & 0x3FFFFFF;
330
331 /* SPLL_FUNC_CNTL setup */
332 spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
333 SPLL_REF_DIV, dividers.uc_pll_ref_div);
334 spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
335 SPLL_PDIV_A, dividers.uc_pll_post_div);
336
337 /* SPLL_FUNC_CNTL_3 setup*/
338 spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3, CG_SPLL_FUNC_CNTL_3,
339 SPLL_FB_DIV, fbdiv);
340
341 /* set to use fractional accumulation*/
342 spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3, CG_SPLL_FUNC_CNTL_3,
343 SPLL_DITHEN, 1);
344
345 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
346 PHM_PlatformCaps_EngineSpreadSpectrumSupport)) {
347 struct pp_atomctrl_internal_ss_info ss_info;
348 uint32_t vco_freq = clock * dividers.uc_pll_post_div;
349
350 if (!atomctrl_get_engine_clock_spread_spectrum(hwmgr,
351 vco_freq, &ss_info)) {
352 uint32_t clk_s = ref_clock * 5 /
353 (ref_divider * ss_info.speed_spectrum_rate);
354 uint32_t clk_v = 4 * ss_info.speed_spectrum_percentage *
355 fbdiv / (clk_s * 10000);
356
357 cg_spll_spread_spectrum = PHM_SET_FIELD(cg_spll_spread_spectrum,
358 CG_SPLL_SPREAD_SPECTRUM, CLKS, clk_s);
359 cg_spll_spread_spectrum = PHM_SET_FIELD(cg_spll_spread_spectrum,
360 CG_SPLL_SPREAD_SPECTRUM, SSEN, 1);
361 cg_spll_spread_spectrum_2 = PHM_SET_FIELD(cg_spll_spread_spectrum_2,
362 CG_SPLL_SPREAD_SPECTRUM_2, CLKV, clk_v);
363 }
364 }
365
366 sclk->SclkFrequency = clock;
367 sclk->CgSpllFuncCntl3 = spll_func_cntl_3;
368 sclk->CgSpllFuncCntl4 = spll_func_cntl_4;
369 sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum;
370 sclk->SpllSpreadSpectrum2 = cg_spll_spread_spectrum_2;
371 sclk->SclkDid = (uint8_t)dividers.pll_post_divider;
372
373 return 0;
374 }
375
376 static void ci_populate_phase_value_based_on_sclk(struct pp_hwmgr *hwmgr,
377 const struct phm_phase_shedding_limits_table *pl,
378 uint32_t sclk, uint32_t *p_shed)
379 {
380 unsigned int i;
381
382 /* use the minimum phase shedding */
383 *p_shed = 1;
384
385 for (i = 0; i < pl->count; i++) {
386 if (sclk < pl->entries[i].Sclk) {
387 *p_shed = i;
388 break;
389 }
390 }
391 }
392
393 static uint8_t ci_get_sleep_divider_id_from_clock(uint32_t clock,
394 uint32_t clock_insr)
395 {
396 uint8_t i;
397 uint32_t temp;
398 uint32_t min = min_t(uint32_t, clock_insr, CISLAND_MINIMUM_ENGINE_CLOCK);
399
400 if (clock < min) {
401 pr_info("Engine clock can't satisfy stutter requirement!\n");
402 return 0;
403 }
404 for (i = CISLAND_MAX_DEEPSLEEP_DIVIDER_ID; ; i--) {
405 temp = clock >> i;
406
407 if (temp >= min || i == 0)
408 break;
409 }
410 return i;
411 }
412
413 static int ci_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
414 uint32_t clock, uint16_t sclk_al_threshold,
415 struct SMU7_Discrete_GraphicsLevel *level)
416 {
417 int result;
418 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
419
420
421 result = ci_calculate_sclk_params(hwmgr, clock, level);
422
423 /* populate graphics levels */
424 result = ci_get_dependency_volt_by_clk(hwmgr,
425 hwmgr->dyn_state.vddc_dependency_on_sclk, clock,
426 (uint32_t *)(&level->MinVddc));
427 if (result) {
428 pr_err("vdd_dep_on_sclk table is NULL\n");
429 return result;
430 }
431
432 level->SclkFrequency = clock;
433 level->MinVddcPhases = 1;
434
435 if (data->vddc_phase_shed_control)
436 ci_populate_phase_value_based_on_sclk(hwmgr,
437 hwmgr->dyn_state.vddc_phase_shed_limits_table,
438 clock,
439 &level->MinVddcPhases);
440
441 level->ActivityLevel = sclk_al_threshold;
442 level->CcPwrDynRm = 0;
443 level->CcPwrDynRm1 = 0;
444 level->EnabledForActivity = 0;
445 /* this level can be used for throttling.*/
446 level->EnabledForThrottle = 1;
447 level->UpH = 0;
448 level->DownH = 0;
449 level->VoltageDownH = 0;
450 level->PowerThrottle = 0;
451
452
453 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
454 PHM_PlatformCaps_SclkDeepSleep))
455 level->DeepSleepDivId =
456 ci_get_sleep_divider_id_from_clock(clock,
457 CISLAND_MINIMUM_ENGINE_CLOCK);
458
459 /* Default to slow, highest DPM level will be set to PPSMC_DISPLAY_WATERMARK_LOW later.*/
460 level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
461
462 if (0 == result) {
463 level->MinVddc = PP_HOST_TO_SMC_UL(level->MinVddc * VOLTAGE_SCALE);
464 CONVERT_FROM_HOST_TO_SMC_UL(level->MinVddcPhases);
465 CONVERT_FROM_HOST_TO_SMC_UL(level->SclkFrequency);
466 CONVERT_FROM_HOST_TO_SMC_US(level->ActivityLevel);
467 CONVERT_FROM_HOST_TO_SMC_UL(level->CgSpllFuncCntl3);
468 CONVERT_FROM_HOST_TO_SMC_UL(level->CgSpllFuncCntl4);
469 CONVERT_FROM_HOST_TO_SMC_UL(level->SpllSpreadSpectrum);
470 CONVERT_FROM_HOST_TO_SMC_UL(level->SpllSpreadSpectrum2);
471 CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm);
472 CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm1);
473 }
474
475 return result;
476 }
477
478 static int ci_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
479 {
480 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
481 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
482 struct smu7_dpm_table *dpm_table = &data->dpm_table;
483 int result = 0;
484 uint32_t array = smu_data->dpm_table_start +
485 offsetof(SMU7_Discrete_DpmTable, GraphicsLevel);
486 uint32_t array_size = sizeof(struct SMU7_Discrete_GraphicsLevel) *
487 SMU7_MAX_LEVELS_GRAPHICS;
488 struct SMU7_Discrete_GraphicsLevel *levels =
489 smu_data->smc_state_table.GraphicsLevel;
490 uint32_t i;
491
492 for (i = 0; i < dpm_table->sclk_table.count; i++) {
493 result = ci_populate_single_graphic_level(hwmgr,
494 dpm_table->sclk_table.dpm_levels[i].value,
495 (uint16_t)smu_data->activity_target[i],
496 &levels[i]);
497 if (result)
498 return result;
499 if (i > 1)
500 smu_data->smc_state_table.GraphicsLevel[i].DeepSleepDivId = 0;
501 if (i == (dpm_table->sclk_table.count - 1))
502 smu_data->smc_state_table.GraphicsLevel[i].DisplayWatermark =
503 PPSMC_DISPLAY_WATERMARK_HIGH;
504 }
505
506 smu_data->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1;
507
508 smu_data->smc_state_table.GraphicsDpmLevelCount = (u8)dpm_table->sclk_table.count;
509 data->dpm_level_enable_mask.sclk_dpm_enable_mask =
510 phm_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
511
512 result = ci_copy_bytes_to_smc(hwmgr, array,
513 (u8 *)levels, array_size,
514 SMC_RAM_END);
515
516 return result;
517
518 }
519
520 static int ci_populate_svi_load_line(struct pp_hwmgr *hwmgr)
521 {
522 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
523 const struct ci_pt_defaults *defaults = smu_data->power_tune_defaults;
524
525 smu_data->power_tune_table.SviLoadLineEn = defaults->svi_load_line_en;
526 smu_data->power_tune_table.SviLoadLineVddC = defaults->svi_load_line_vddc;
527 smu_data->power_tune_table.SviLoadLineTrimVddC = 3;
528 smu_data->power_tune_table.SviLoadLineOffsetVddC = 0;
529
530 return 0;
531 }
532
533 static int ci_populate_tdc_limit(struct pp_hwmgr *hwmgr)
534 {
535 uint16_t tdc_limit;
536 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
537 const struct ci_pt_defaults *defaults = smu_data->power_tune_defaults;
538
539 tdc_limit = (uint16_t)(hwmgr->dyn_state.cac_dtp_table->usTDC * 256);
540 smu_data->power_tune_table.TDC_VDDC_PkgLimit =
541 CONVERT_FROM_HOST_TO_SMC_US(tdc_limit);
542 smu_data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
543 defaults->tdc_vddc_throttle_release_limit_perc;
544 smu_data->power_tune_table.TDC_MAWt = defaults->tdc_mawt;
545
546 return 0;
547 }
548
549 static int ci_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
550 {
551 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
552 const struct ci_pt_defaults *defaults = smu_data->power_tune_defaults;
553 uint32_t temp;
554
555 if (ci_read_smc_sram_dword(hwmgr,
556 fuse_table_offset +
557 offsetof(SMU7_Discrete_PmFuses, TdcWaterfallCtl),
558 (uint32_t *)&temp, SMC_RAM_END))
559 PP_ASSERT_WITH_CODE(false,
560 "Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!",
561 return -EINVAL);
562 else
563 smu_data->power_tune_table.TdcWaterfallCtl = defaults->tdc_waterfall_ctl;
564
565 return 0;
566 }
567
568 static int ci_populate_fuzzy_fan(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
569 {
570 uint16_t tmp;
571 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
572
573 if ((hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity & (1 << 15))
574 || 0 == hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity)
575 tmp = hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity;
576 else
577 tmp = hwmgr->thermal_controller.advanceFanControlParameters.usDefaultFanOutputSensitivity;
578
579 smu_data->power_tune_table.FuzzyFan_PwmSetDelta = CONVERT_FROM_HOST_TO_SMC_US(tmp);
580
581 return 0;
582 }
583
584 static int ci_populate_bapm_vddc_vid_sidd(struct pp_hwmgr *hwmgr)
585 {
586 int i;
587 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
588 uint8_t *hi_vid = smu_data->power_tune_table.BapmVddCVidHiSidd;
589 uint8_t *lo_vid = smu_data->power_tune_table.BapmVddCVidLoSidd;
590 uint8_t *hi2_vid = smu_data->power_tune_table.BapmVddCVidHiSidd2;
591
592 PP_ASSERT_WITH_CODE(NULL != hwmgr->dyn_state.cac_leakage_table,
593 "The CAC Leakage table does not exist!", return -EINVAL);
594 PP_ASSERT_WITH_CODE(hwmgr->dyn_state.cac_leakage_table->count <= 8,
595 "There should never be more than 8 entries for BapmVddcVid!!!", return -EINVAL);
596 PP_ASSERT_WITH_CODE(hwmgr->dyn_state.cac_leakage_table->count == hwmgr->dyn_state.vddc_dependency_on_sclk->count,
597 "CACLeakageTable->count and VddcDependencyOnSCLk->count not equal", return -EINVAL);
598
599 for (i = 0; (uint32_t) i < hwmgr->dyn_state.cac_leakage_table->count; i++) {
600 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_EVV)) {
601 lo_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc1);
602 hi_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc2);
603 hi2_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc3);
604 } else {
605 lo_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc);
606 hi_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Leakage);
607 }
608 }
609
610 return 0;
611 }
612
613 static int ci_populate_vddc_vid(struct pp_hwmgr *hwmgr)
614 {
615 int i;
616 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
617 uint8_t *vid = smu_data->power_tune_table.VddCVid;
618 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
619
620 PP_ASSERT_WITH_CODE(data->vddc_voltage_table.count <= 8,
621 "There should never be more than 8 entries for VddcVid!!!",
622 return -EINVAL);
623
624 for (i = 0; i < (int)data->vddc_voltage_table.count; i++)
625 vid[i] = convert_to_vid(data->vddc_voltage_table.entries[i].value);
626
627 return 0;
628 }
629
630 static int ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(struct pp_hwmgr *hwmgr)
631 {
632 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
633 u8 *hi_vid = smu_data->power_tune_table.BapmVddCVidHiSidd;
634 u8 *lo_vid = smu_data->power_tune_table.BapmVddCVidLoSidd;
635 int i, min, max;
636
637 min = max = hi_vid[0];
638 for (i = 0; i < 8; i++) {
639 if (0 != hi_vid[i]) {
640 if (min > hi_vid[i])
641 min = hi_vid[i];
642 if (max < hi_vid[i])
643 max = hi_vid[i];
644 }
645
646 if (0 != lo_vid[i]) {
647 if (min > lo_vid[i])
648 min = lo_vid[i];
649 if (max < lo_vid[i])
650 max = lo_vid[i];
651 }
652 }
653
654 if ((min == 0) || (max == 0))
655 return -EINVAL;
656 smu_data->power_tune_table.GnbLPMLMaxVid = (u8)max;
657 smu_data->power_tune_table.GnbLPMLMinVid = (u8)min;
658
659 return 0;
660 }
661
662 static int ci_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr)
663 {
664 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
665 uint16_t HiSidd = smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd;
666 uint16_t LoSidd = smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd;
667 struct phm_cac_tdp_table *cac_table = hwmgr->dyn_state.cac_dtp_table;
668
669 HiSidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256);
670 LoSidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256);
671
672 smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd =
673 CONVERT_FROM_HOST_TO_SMC_US(HiSidd);
674 smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd =
675 CONVERT_FROM_HOST_TO_SMC_US(LoSidd);
676
677 return 0;
678 }
679
680 static int ci_populate_pm_fuses(struct pp_hwmgr *hwmgr)
681 {
682 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
683 uint32_t pm_fuse_table_offset;
684 int ret = 0;
685
686 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
687 PHM_PlatformCaps_PowerContainment)) {
688 if (ci_read_smc_sram_dword(hwmgr,
689 SMU7_FIRMWARE_HEADER_LOCATION +
690 offsetof(SMU7_Firmware_Header, PmFuseTable),
691 &pm_fuse_table_offset, SMC_RAM_END)) {
692 pr_err("Attempt to get pm_fuse_table_offset Failed!\n");
693 return -EINVAL;
694 }
695
696 /* DW0 - DW3 */
697 ret = ci_populate_bapm_vddc_vid_sidd(hwmgr);
698 /* DW4 - DW5 */
699 ret |= ci_populate_vddc_vid(hwmgr);
700 /* DW6 */
701 ret |= ci_populate_svi_load_line(hwmgr);
702 /* DW7 */
703 ret |= ci_populate_tdc_limit(hwmgr);
704 /* DW8 */
705 ret |= ci_populate_dw8(hwmgr, pm_fuse_table_offset);
706
707 ret |= ci_populate_fuzzy_fan(hwmgr, pm_fuse_table_offset);
708
709 ret |= ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(hwmgr);
710
711 ret |= ci_populate_bapm_vddc_base_leakage_sidd(hwmgr);
712 if (ret)
713 return ret;
714
715 ret = ci_copy_bytes_to_smc(hwmgr, pm_fuse_table_offset,
716 (uint8_t *)&smu_data->power_tune_table,
717 sizeof(struct SMU7_Discrete_PmFuses), SMC_RAM_END);
718 }
719 return ret;
720 }
721
722 static int ci_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
723 {
724 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
725 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
726 const struct ci_pt_defaults *defaults = smu_data->power_tune_defaults;
727 SMU7_Discrete_DpmTable *dpm_table = &(smu_data->smc_state_table);
728 struct phm_cac_tdp_table *cac_dtp_table = hwmgr->dyn_state.cac_dtp_table;
729 struct phm_ppm_table *ppm = hwmgr->dyn_state.ppm_parameter_table;
730 const uint16_t *def1, *def2;
731 int i, j, k;
732
733 dpm_table->DefaultTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 256));
734 dpm_table->TargetTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usConfigurableTDP * 256));
735
736 dpm_table->DTETjOffset = 0;
737 dpm_table->GpuTjMax = (uint8_t)(data->thermal_temp_setting.temperature_high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES);
738 dpm_table->GpuTjHyst = 8;
739
740 dpm_table->DTEAmbientTempBase = defaults->dte_ambient_temp_base;
741
742 if (ppm) {
743 dpm_table->PPM_PkgPwrLimit = (uint16_t)ppm->dgpu_tdp * 256 / 1000;
744 dpm_table->PPM_TemperatureLimit = (uint16_t)ppm->tj_max * 256;
745 } else {
746 dpm_table->PPM_PkgPwrLimit = 0;
747 dpm_table->PPM_TemperatureLimit = 0;
748 }
749
750 CONVERT_FROM_HOST_TO_SMC_US(dpm_table->PPM_PkgPwrLimit);
751 CONVERT_FROM_HOST_TO_SMC_US(dpm_table->PPM_TemperatureLimit);
752
753 dpm_table->BAPM_TEMP_GRADIENT = PP_HOST_TO_SMC_UL(defaults->bapm_temp_gradient);
754 def1 = defaults->bapmti_r;
755 def2 = defaults->bapmti_rc;
756
757 for (i = 0; i < SMU7_DTE_ITERATIONS; i++) {
758 for (j = 0; j < SMU7_DTE_SOURCES; j++) {
759 for (k = 0; k < SMU7_DTE_SINKS; k++) {
760 dpm_table->BAPMTI_R[i][j][k] = PP_HOST_TO_SMC_US(*def1);
761 dpm_table->BAPMTI_RC[i][j][k] = PP_HOST_TO_SMC_US(*def2);
762 def1++;
763 def2++;
764 }
765 }
766 }
767
768 return 0;
769 }
770
771 static int ci_get_std_voltage_value_sidd(struct pp_hwmgr *hwmgr,
772 pp_atomctrl_voltage_table_entry *tab, uint16_t *hi,
773 uint16_t *lo)
774 {
775 uint16_t v_index;
776 bool vol_found = false;
777 *hi = tab->value * VOLTAGE_SCALE;
778 *lo = tab->value * VOLTAGE_SCALE;
779
780 PP_ASSERT_WITH_CODE(NULL != hwmgr->dyn_state.vddc_dependency_on_sclk,
781 "The SCLK/VDDC Dependency Table does not exist.\n",
782 return -EINVAL);
783
784 if (NULL == hwmgr->dyn_state.cac_leakage_table) {
785 pr_warn("CAC Leakage Table does not exist, using vddc.\n");
786 return 0;
787 }
788
789 for (v_index = 0; (uint32_t)v_index < hwmgr->dyn_state.vddc_dependency_on_sclk->count; v_index++) {
790 if (tab->value == hwmgr->dyn_state.vddc_dependency_on_sclk->entries[v_index].v) {
791 vol_found = true;
792 if ((uint32_t)v_index < hwmgr->dyn_state.cac_leakage_table->count) {
793 *lo = hwmgr->dyn_state.cac_leakage_table->entries[v_index].Vddc * VOLTAGE_SCALE;
794 *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[v_index].Leakage * VOLTAGE_SCALE);
795 } else {
796 pr_warn("Index from SCLK/VDDC Dependency Table exceeds the CAC Leakage Table index, using maximum index from CAC table.\n");
797 *lo = hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Vddc * VOLTAGE_SCALE;
798 *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Leakage * VOLTAGE_SCALE);
799 }
800 break;
801 }
802 }
803
804 if (!vol_found) {
805 for (v_index = 0; (uint32_t)v_index < hwmgr->dyn_state.vddc_dependency_on_sclk->count; v_index++) {
806 if (tab->value <= hwmgr->dyn_state.vddc_dependency_on_sclk->entries[v_index].v) {
807 vol_found = true;
808 if ((uint32_t)v_index < hwmgr->dyn_state.cac_leakage_table->count) {
809 *lo = hwmgr->dyn_state.cac_leakage_table->entries[v_index].Vddc * VOLTAGE_SCALE;
810 *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[v_index].Leakage) * VOLTAGE_SCALE;
811 } else {
812 pr_warn("Index from SCLK/VDDC Dependency Table exceeds the CAC Leakage Table index in second look up, using maximum index from CAC table.");
813 *lo = hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Vddc * VOLTAGE_SCALE;
814 *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Leakage * VOLTAGE_SCALE);
815 }
816 break;
817 }
818 }
819
820 if (!vol_found)
821 pr_warn("Unable to get std_vddc from SCLK/VDDC Dependency Table, using vddc.\n");
822 }
823
824 return 0;
825 }
826
827 static int ci_populate_smc_voltage_table(struct pp_hwmgr *hwmgr,
828 pp_atomctrl_voltage_table_entry *tab,
829 SMU7_Discrete_VoltageLevel *smc_voltage_tab)
830 {
831 int result;
832
833 result = ci_get_std_voltage_value_sidd(hwmgr, tab,
834 &smc_voltage_tab->StdVoltageHiSidd,
835 &smc_voltage_tab->StdVoltageLoSidd);
836 if (result) {
837 smc_voltage_tab->StdVoltageHiSidd = tab->value * VOLTAGE_SCALE;
838 smc_voltage_tab->StdVoltageLoSidd = tab->value * VOLTAGE_SCALE;
839 }
840
841 smc_voltage_tab->Voltage = PP_HOST_TO_SMC_US(tab->value * VOLTAGE_SCALE);
842 CONVERT_FROM_HOST_TO_SMC_US(smc_voltage_tab->StdVoltageHiSidd);
843 CONVERT_FROM_HOST_TO_SMC_US(smc_voltage_tab->StdVoltageLoSidd);
844
845 return 0;
846 }
847
848 static int ci_populate_smc_vddc_table(struct pp_hwmgr *hwmgr,
849 SMU7_Discrete_DpmTable *table)
850 {
851 unsigned int count;
852 int result;
853 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
854
855 table->VddcLevelCount = data->vddc_voltage_table.count;
856 for (count = 0; count < table->VddcLevelCount; count++) {
857 result = ci_populate_smc_voltage_table(hwmgr,
858 &(data->vddc_voltage_table.entries[count]),
859 &(table->VddcLevel[count]));
860 PP_ASSERT_WITH_CODE(0 == result, "do not populate SMC VDDC voltage table", return -EINVAL);
861
862 /* GPIO voltage control */
863 if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->voltage_control)
864 table->VddcLevel[count].Smio |= data->vddc_voltage_table.entries[count].smio_low;
865 else
866 table->VddcLevel[count].Smio = 0;
867 }
868
869 CONVERT_FROM_HOST_TO_SMC_UL(table->VddcLevelCount);
870
871 return 0;
872 }
873
874 static int ci_populate_smc_vdd_ci_table(struct pp_hwmgr *hwmgr,
875 SMU7_Discrete_DpmTable *table)
876 {
877 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
878 uint32_t count;
879 int result;
880
881 table->VddciLevelCount = data->vddci_voltage_table.count;
882
883 for (count = 0; count < table->VddciLevelCount; count++) {
884 result = ci_populate_smc_voltage_table(hwmgr,
885 &(data->vddci_voltage_table.entries[count]),
886 &(table->VddciLevel[count]));
887 PP_ASSERT_WITH_CODE(result == 0, "do not populate SMC VDDCI voltage table", return -EINVAL);
888 if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
889 table->VddciLevel[count].Smio |= data->vddci_voltage_table.entries[count].smio_low;
890 else
891 table->VddciLevel[count].Smio |= 0;
892 }
893
894 CONVERT_FROM_HOST_TO_SMC_UL(table->VddciLevelCount);
895
896 return 0;
897 }
898
899 static int ci_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
900 SMU7_Discrete_DpmTable *table)
901 {
902 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
903 uint32_t count;
904 int result;
905
906 table->MvddLevelCount = data->mvdd_voltage_table.count;
907
908 for (count = 0; count < table->MvddLevelCount; count++) {
909 result = ci_populate_smc_voltage_table(hwmgr,
910 &(data->mvdd_voltage_table.entries[count]),
911 &table->MvddLevel[count]);
912 PP_ASSERT_WITH_CODE(result == 0, "do not populate SMC mvdd voltage table", return -EINVAL);
913 if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control)
914 table->MvddLevel[count].Smio |= data->mvdd_voltage_table.entries[count].smio_low;
915 else
916 table->MvddLevel[count].Smio |= 0;
917 }
918
919 CONVERT_FROM_HOST_TO_SMC_UL(table->MvddLevelCount);
920
921 return 0;
922 }
923
924
925 static int ci_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr,
926 SMU7_Discrete_DpmTable *table)
927 {
928 int result;
929
930 result = ci_populate_smc_vddc_table(hwmgr, table);
931 PP_ASSERT_WITH_CODE(0 == result,
932 "can not populate VDDC voltage table to SMC", return -EINVAL);
933
934 result = ci_populate_smc_vdd_ci_table(hwmgr, table);
935 PP_ASSERT_WITH_CODE(0 == result,
936 "can not populate VDDCI voltage table to SMC", return -EINVAL);
937
938 result = ci_populate_smc_mvdd_table(hwmgr, table);
939 PP_ASSERT_WITH_CODE(0 == result,
940 "can not populate MVDD voltage table to SMC", return -EINVAL);
941
942 return 0;
943 }
944
945 static int ci_populate_ulv_level(struct pp_hwmgr *hwmgr,
946 struct SMU7_Discrete_Ulv *state)
947 {
948 uint32_t voltage_response_time, ulv_voltage;
949 int result;
950 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
951
952 state->CcPwrDynRm = 0;
953 state->CcPwrDynRm1 = 0;
954
955 result = pp_tables_get_response_times(hwmgr, &voltage_response_time, &ulv_voltage);
956 PP_ASSERT_WITH_CODE((0 == result), "can not get ULV voltage value", return result;);
957
958 if (ulv_voltage == 0) {
959 data->ulv_supported = false;
960 return 0;
961 }
962
963 if (data->voltage_control != SMU7_VOLTAGE_CONTROL_BY_SVID2) {
964 /* use minimum voltage if ulv voltage in pptable is bigger than minimum voltage */
965 if (ulv_voltage > hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v)
966 state->VddcOffset = 0;
967 else
968 /* used in SMIO Mode. not implemented for now. this is backup only for CI. */
969 state->VddcOffset = (uint16_t)(hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v - ulv_voltage);
970 } else {
971 /* use minimum voltage if ulv voltage in pptable is bigger than minimum voltage */
972 if (ulv_voltage > hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v)
973 state->VddcOffsetVid = 0;
974 else /* used in SVI2 Mode */
975 state->VddcOffsetVid = (uint8_t)(
976 (hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v - ulv_voltage)
977 * VOLTAGE_VID_OFFSET_SCALE2
978 / VOLTAGE_VID_OFFSET_SCALE1);
979 }
980 state->VddcPhase = 1;
981
982 CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm);
983 CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm1);
984 CONVERT_FROM_HOST_TO_SMC_US(state->VddcOffset);
985
986 return 0;
987 }
988
989 static int ci_populate_ulv_state(struct pp_hwmgr *hwmgr,
990 SMU7_Discrete_Ulv *ulv_level)
991 {
992 return ci_populate_ulv_level(hwmgr, ulv_level);
993 }
994
995 static int ci_populate_smc_link_level(struct pp_hwmgr *hwmgr, SMU7_Discrete_DpmTable *table)
996 {
997 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
998 struct smu7_dpm_table *dpm_table = &data->dpm_table;
999 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
1000 uint32_t i;
1001
1002 /* Index dpm_table->pcie_speed_table.count is reserved for PCIE boot level.*/
1003 for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) {
1004 table->LinkLevel[i].PcieGenSpeed =
1005 (uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value;
1006 table->LinkLevel[i].PcieLaneCount =
1007 (uint8_t)encode_pcie_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1);
1008 table->LinkLevel[i].EnabledForActivity = 1;
1009 table->LinkLevel[i].DownT = PP_HOST_TO_SMC_UL(5);
1010 table->LinkLevel[i].UpT = PP_HOST_TO_SMC_UL(30);
1011 }
1012
1013 smu_data->smc_state_table.LinkLevelCount =
1014 (uint8_t)dpm_table->pcie_speed_table.count;
1015 data->dpm_level_enable_mask.pcie_dpm_enable_mask =
1016 phm_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
1017
1018 return 0;
1019 }
1020
1021 static int ci_calculate_mclk_params(
1022 struct pp_hwmgr *hwmgr,
1023 uint32_t memory_clock,
1024 SMU7_Discrete_MemoryLevel *mclk,
1025 bool strobe_mode,
1026 bool dllStateOn
1027 )
1028 {
1029 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1030 uint32_t dll_cntl = data->clock_registers.vDLL_CNTL;
1031 uint32_t mclk_pwrmgt_cntl = data->clock_registers.vMCLK_PWRMGT_CNTL;
1032 uint32_t mpll_ad_func_cntl = data->clock_registers.vMPLL_AD_FUNC_CNTL;
1033 uint32_t mpll_dq_func_cntl = data->clock_registers.vMPLL_DQ_FUNC_CNTL;
1034 uint32_t mpll_func_cntl = data->clock_registers.vMPLL_FUNC_CNTL;
1035 uint32_t mpll_func_cntl_1 = data->clock_registers.vMPLL_FUNC_CNTL_1;
1036 uint32_t mpll_func_cntl_2 = data->clock_registers.vMPLL_FUNC_CNTL_2;
1037 uint32_t mpll_ss1 = data->clock_registers.vMPLL_SS1;
1038 uint32_t mpll_ss2 = data->clock_registers.vMPLL_SS2;
1039
1040 pp_atomctrl_memory_clock_param mpll_param;
1041 int result;
1042
1043 result = atomctrl_get_memory_pll_dividers_si(hwmgr,
1044 memory_clock, &mpll_param, strobe_mode);
1045 PP_ASSERT_WITH_CODE(0 == result,
1046 "Error retrieving Memory Clock Parameters from VBIOS.", return result);
1047
1048 mpll_func_cntl = PHM_SET_FIELD(mpll_func_cntl, MPLL_FUNC_CNTL, BWCTRL, mpll_param.bw_ctrl);
1049
1050 mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1,
1051 MPLL_FUNC_CNTL_1, CLKF, mpll_param.mpll_fb_divider.cl_kf);
1052 mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1,
1053 MPLL_FUNC_CNTL_1, CLKFRAC, mpll_param.mpll_fb_divider.clk_frac);
1054 mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1,
1055 MPLL_FUNC_CNTL_1, VCO_MODE, mpll_param.vco_mode);
1056
1057 mpll_ad_func_cntl = PHM_SET_FIELD(mpll_ad_func_cntl,
1058 MPLL_AD_FUNC_CNTL, YCLK_POST_DIV, mpll_param.mpll_post_divider);
1059
1060 if (data->is_memory_gddr5) {
1061 mpll_dq_func_cntl = PHM_SET_FIELD(mpll_dq_func_cntl,
1062 MPLL_DQ_FUNC_CNTL, YCLK_SEL, mpll_param.yclk_sel);
1063 mpll_dq_func_cntl = PHM_SET_FIELD(mpll_dq_func_cntl,
1064 MPLL_DQ_FUNC_CNTL, YCLK_POST_DIV, mpll_param.mpll_post_divider);
1065 }
1066
1067 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1068 PHM_PlatformCaps_MemorySpreadSpectrumSupport)) {
1069 pp_atomctrl_internal_ss_info ss_info;
1070 uint32_t freq_nom;
1071 uint32_t tmp;
1072 uint32_t reference_clock = atomctrl_get_mpll_reference_clock(hwmgr);
1073
1074 /* for GDDR5 for all modes and DDR3 */
1075 if (1 == mpll_param.qdr)
1076 freq_nom = memory_clock * 4 * (1 << mpll_param.mpll_post_divider);
1077 else
1078 freq_nom = memory_clock * 2 * (1 << mpll_param.mpll_post_divider);
1079
1080 /* tmp = (freq_nom / reference_clock * reference_divider) ^ 2 Note: S.I. reference_divider = 1*/
1081 tmp = (freq_nom / reference_clock);
1082 tmp = tmp * tmp;
1083
1084 if (0 == atomctrl_get_memory_clock_spread_spectrum(hwmgr, freq_nom, &ss_info)) {
1085 uint32_t clks = reference_clock * 5 / ss_info.speed_spectrum_rate;
1086 uint32_t clkv =
1087 (uint32_t)((((131 * ss_info.speed_spectrum_percentage *
1088 ss_info.speed_spectrum_rate) / 100) * tmp) / freq_nom);
1089
1090 mpll_ss1 = PHM_SET_FIELD(mpll_ss1, MPLL_SS1, CLKV, clkv);
1091 mpll_ss2 = PHM_SET_FIELD(mpll_ss2, MPLL_SS2, CLKS, clks);
1092 }
1093 }
1094
1095 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
1096 MCLK_PWRMGT_CNTL, DLL_SPEED, mpll_param.dll_speed);
1097 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
1098 MCLK_PWRMGT_CNTL, MRDCK0_PDNB, dllStateOn);
1099 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
1100 MCLK_PWRMGT_CNTL, MRDCK1_PDNB, dllStateOn);
1101
1102
1103 mclk->MclkFrequency = memory_clock;
1104 mclk->MpllFuncCntl = mpll_func_cntl;
1105 mclk->MpllFuncCntl_1 = mpll_func_cntl_1;
1106 mclk->MpllFuncCntl_2 = mpll_func_cntl_2;
1107 mclk->MpllAdFuncCntl = mpll_ad_func_cntl;
1108 mclk->MpllDqFuncCntl = mpll_dq_func_cntl;
1109 mclk->MclkPwrmgtCntl = mclk_pwrmgt_cntl;
1110 mclk->DllCntl = dll_cntl;
1111 mclk->MpllSs1 = mpll_ss1;
1112 mclk->MpllSs2 = mpll_ss2;
1113
1114 return 0;
1115 }
1116
1117 static uint8_t ci_get_mclk_frequency_ratio(uint32_t memory_clock,
1118 bool strobe_mode)
1119 {
1120 uint8_t mc_para_index;
1121
1122 if (strobe_mode) {
1123 if (memory_clock < 12500)
1124 mc_para_index = 0x00;
1125 else if (memory_clock > 47500)
1126 mc_para_index = 0x0f;
1127 else
1128 mc_para_index = (uint8_t)((memory_clock - 10000) / 2500);
1129 } else {
1130 if (memory_clock < 65000)
1131 mc_para_index = 0x00;
1132 else if (memory_clock > 135000)
1133 mc_para_index = 0x0f;
1134 else
1135 mc_para_index = (uint8_t)((memory_clock - 60000) / 5000);
1136 }
1137
1138 return mc_para_index;
1139 }
1140
1141 static uint8_t ci_get_ddr3_mclk_frequency_ratio(uint32_t memory_clock)
1142 {
1143 uint8_t mc_para_index;
1144
1145 if (memory_clock < 10000)
1146 mc_para_index = 0;
1147 else if (memory_clock >= 80000)
1148 mc_para_index = 0x0f;
1149 else
1150 mc_para_index = (uint8_t)((memory_clock - 10000) / 5000 + 1);
1151
1152 return mc_para_index;
1153 }
1154
1155 static int ci_populate_phase_value_based_on_mclk(struct pp_hwmgr *hwmgr, const struct phm_phase_shedding_limits_table *pl,
1156 uint32_t memory_clock, uint32_t *p_shed)
1157 {
1158 unsigned int i;
1159
1160 *p_shed = 1;
1161
1162 for (i = 0; i < pl->count; i++) {
1163 if (memory_clock < pl->entries[i].Mclk) {
1164 *p_shed = i;
1165 break;
1166 }
1167 }
1168
1169 return 0;
1170 }
1171
1172 static int ci_populate_single_memory_level(
1173 struct pp_hwmgr *hwmgr,
1174 uint32_t memory_clock,
1175 SMU7_Discrete_MemoryLevel *memory_level
1176 )
1177 {
1178 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1179 int result = 0;
1180 bool dll_state_on;
1181 struct cgs_display_info info = {0};
1182 uint32_t mclk_edc_wr_enable_threshold = 40000;
1183 uint32_t mclk_edc_enable_threshold = 40000;
1184 uint32_t mclk_strobe_mode_threshold = 40000;
1185
1186 if (hwmgr->dyn_state.vddc_dependency_on_mclk != NULL) {
1187 result = ci_get_dependency_volt_by_clk(hwmgr,
1188 hwmgr->dyn_state.vddc_dependency_on_mclk, memory_clock, &memory_level->MinVddc);
1189 PP_ASSERT_WITH_CODE((0 == result),
1190 "can not find MinVddc voltage value from memory VDDC voltage dependency table", return result);
1191 }
1192
1193 if (NULL != hwmgr->dyn_state.vddci_dependency_on_mclk) {
1194 result = ci_get_dependency_volt_by_clk(hwmgr,
1195 hwmgr->dyn_state.vddci_dependency_on_mclk,
1196 memory_clock,
1197 &memory_level->MinVddci);
1198 PP_ASSERT_WITH_CODE((0 == result),
1199 "can not find MinVddci voltage value from memory VDDCI voltage dependency table", return result);
1200 }
1201
1202 if (NULL != hwmgr->dyn_state.mvdd_dependency_on_mclk) {
1203 result = ci_get_dependency_volt_by_clk(hwmgr,
1204 hwmgr->dyn_state.mvdd_dependency_on_mclk,
1205 memory_clock,
1206 &memory_level->MinMvdd);
1207 PP_ASSERT_WITH_CODE((0 == result),
1208 "can not find MinVddci voltage value from memory MVDD voltage dependency table", return result);
1209 }
1210
1211 memory_level->MinVddcPhases = 1;
1212
1213 if (data->vddc_phase_shed_control) {
1214 ci_populate_phase_value_based_on_mclk(hwmgr, hwmgr->dyn_state.vddc_phase_shed_limits_table,
1215 memory_clock, &memory_level->MinVddcPhases);
1216 }
1217
1218 memory_level->EnabledForThrottle = 1;
1219 memory_level->EnabledForActivity = 1;
1220 memory_level->UpH = 0;
1221 memory_level->DownH = 100;
1222 memory_level->VoltageDownH = 0;
1223
1224 /* Indicates maximum activity level for this performance level.*/
1225 memory_level->ActivityLevel = (uint16_t)data->mclk_activity_target;
1226 memory_level->StutterEnable = 0;
1227 memory_level->StrobeEnable = 0;
1228 memory_level->EdcReadEnable = 0;
1229 memory_level->EdcWriteEnable = 0;
1230 memory_level->RttEnable = 0;
1231
1232 /* default set to low watermark. Highest level will be set to high later.*/
1233 memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
1234
1235 cgs_get_active_displays_info(hwmgr->device, &info);
1236 data->display_timing.num_existing_displays = info.display_count;
1237
1238 /* stutter mode not support on ci */
1239
1240 /* decide strobe mode*/
1241 memory_level->StrobeEnable = (mclk_strobe_mode_threshold != 0) &&
1242 (memory_clock <= mclk_strobe_mode_threshold);
1243
1244 /* decide EDC mode and memory clock ratio*/
1245 if (data->is_memory_gddr5) {
1246 memory_level->StrobeRatio = ci_get_mclk_frequency_ratio(memory_clock,
1247 memory_level->StrobeEnable);
1248
1249 if ((mclk_edc_enable_threshold != 0) &&
1250 (memory_clock > mclk_edc_enable_threshold)) {
1251 memory_level->EdcReadEnable = 1;
1252 }
1253
1254 if ((mclk_edc_wr_enable_threshold != 0) &&
1255 (memory_clock > mclk_edc_wr_enable_threshold)) {
1256 memory_level->EdcWriteEnable = 1;
1257 }
1258
1259 if (memory_level->StrobeEnable) {
1260 if (ci_get_mclk_frequency_ratio(memory_clock, 1) >=
1261 ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC7) >> 16) & 0xf))
1262 dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0;
1263 else
1264 dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC6) >> 1) & 0x1) ? 1 : 0;
1265 } else
1266 dll_state_on = data->dll_default_on;
1267 } else {
1268 memory_level->StrobeRatio =
1269 ci_get_ddr3_mclk_frequency_ratio(memory_clock);
1270 dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0;
1271 }
1272
1273 result = ci_calculate_mclk_params(hwmgr,
1274 memory_clock, memory_level, memory_level->StrobeEnable, dll_state_on);
1275
1276 if (0 == result) {
1277 memory_level->MinVddc = PP_HOST_TO_SMC_UL(memory_level->MinVddc * VOLTAGE_SCALE);
1278 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MinVddcPhases);
1279 memory_level->MinVddci = PP_HOST_TO_SMC_UL(memory_level->MinVddci * VOLTAGE_SCALE);
1280 memory_level->MinMvdd = PP_HOST_TO_SMC_UL(memory_level->MinMvdd * VOLTAGE_SCALE);
1281 /* MCLK frequency in units of 10KHz*/
1282 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkFrequency);
1283 /* Indicates maximum activity level for this performance level.*/
1284 CONVERT_FROM_HOST_TO_SMC_US(memory_level->ActivityLevel);
1285 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl);
1286 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_1);
1287 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_2);
1288 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllAdFuncCntl);
1289 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllDqFuncCntl);
1290 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkPwrmgtCntl);
1291 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->DllCntl);
1292 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs1);
1293 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs2);
1294 }
1295
1296 return result;
1297 }
1298
1299 static int ci_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
1300 {
1301 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1302 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
1303 struct smu7_dpm_table *dpm_table = &data->dpm_table;
1304 int result;
1305 struct cgs_system_info sys_info = {0};
1306 uint32_t dev_id;
1307
1308 uint32_t level_array_address = smu_data->dpm_table_start + offsetof(SMU7_Discrete_DpmTable, MemoryLevel);
1309 uint32_t level_array_size = sizeof(SMU7_Discrete_MemoryLevel) * SMU7_MAX_LEVELS_MEMORY;
1310 SMU7_Discrete_MemoryLevel *levels = smu_data->smc_state_table.MemoryLevel;
1311 uint32_t i;
1312
1313 memset(levels, 0x00, level_array_size);
1314
1315 for (i = 0; i < dpm_table->mclk_table.count; i++) {
1316 PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value),
1317 "can not populate memory level as memory clock is zero", return -EINVAL);
1318 result = ci_populate_single_memory_level(hwmgr, dpm_table->mclk_table.dpm_levels[i].value,
1319 &(smu_data->smc_state_table.MemoryLevel[i]));
1320 if (0 != result)
1321 return result;
1322 }
1323
1324 smu_data->smc_state_table.MemoryLevel[0].EnabledForActivity = 1;
1325
1326 sys_info.size = sizeof(struct cgs_system_info);
1327 sys_info.info_id = CGS_SYSTEM_INFO_PCIE_DEV;
1328 cgs_query_system_info(hwmgr->device, &sys_info);
1329 dev_id = (uint32_t)sys_info.value;
1330
1331 if ((dpm_table->mclk_table.count >= 2)
1332 && ((dev_id == 0x67B0) || (dev_id == 0x67B1))) {
1333 smu_data->smc_state_table.MemoryLevel[1].MinVddci =
1334 smu_data->smc_state_table.MemoryLevel[0].MinVddci;
1335 smu_data->smc_state_table.MemoryLevel[1].MinMvdd =
1336 smu_data->smc_state_table.MemoryLevel[0].MinMvdd;
1337 }
1338 smu_data->smc_state_table.MemoryLevel[0].ActivityLevel = 0x1F;
1339 CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.MemoryLevel[0].ActivityLevel);
1340
1341 smu_data->smc_state_table.MemoryDpmLevelCount = (uint8_t)dpm_table->mclk_table.count;
1342 data->dpm_level_enable_mask.mclk_dpm_enable_mask = phm_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
1343 smu_data->smc_state_table.MemoryLevel[dpm_table->mclk_table.count-1].DisplayWatermark = PPSMC_DISPLAY_WATERMARK_HIGH;
1344
1345 result = ci_copy_bytes_to_smc(hwmgr,
1346 level_array_address, (uint8_t *)levels, (uint32_t)level_array_size,
1347 SMC_RAM_END);
1348
1349 return result;
1350 }
1351
1352 static int ci_populate_mvdd_value(struct pp_hwmgr *hwmgr, uint32_t mclk,
1353 SMU7_Discrete_VoltageLevel *voltage)
1354 {
1355 const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1356
1357 uint32_t i = 0;
1358
1359 if (SMU7_VOLTAGE_CONTROL_NONE != data->mvdd_control) {
1360 /* find mvdd value which clock is more than request */
1361 for (i = 0; i < hwmgr->dyn_state.mvdd_dependency_on_mclk->count; i++) {
1362 if (mclk <= hwmgr->dyn_state.mvdd_dependency_on_mclk->entries[i].clk) {
1363 /* Always round to higher voltage. */
1364 voltage->Voltage = data->mvdd_voltage_table.entries[i].value;
1365 break;
1366 }
1367 }
1368
1369 PP_ASSERT_WITH_CODE(i < hwmgr->dyn_state.mvdd_dependency_on_mclk->count,
1370 "MVDD Voltage is outside the supported range.", return -EINVAL);
1371
1372 } else {
1373 return -EINVAL;
1374 }
1375
1376 return 0;
1377 }
1378
1379 static int ci_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
1380 SMU7_Discrete_DpmTable *table)
1381 {
1382 int result = 0;
1383 const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1384 struct pp_atomctrl_clock_dividers_vi dividers;
1385
1386 SMU7_Discrete_VoltageLevel voltage_level;
1387 uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL;
1388 uint32_t spll_func_cntl_2 = data->clock_registers.vCG_SPLL_FUNC_CNTL_2;
1389 uint32_t dll_cntl = data->clock_registers.vDLL_CNTL;
1390 uint32_t mclk_pwrmgt_cntl = data->clock_registers.vMCLK_PWRMGT_CNTL;
1391
1392
1393 /* The ACPI state should not do DPM on DC (or ever).*/
1394 table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
1395
1396 if (data->acpi_vddc)
1397 table->ACPILevel.MinVddc = PP_HOST_TO_SMC_UL(data->acpi_vddc * VOLTAGE_SCALE);
1398 else
1399 table->ACPILevel.MinVddc = PP_HOST_TO_SMC_UL(data->min_vddc_in_pptable * VOLTAGE_SCALE);
1400
1401 table->ACPILevel.MinVddcPhases = data->vddc_phase_shed_control ? 0 : 1;
1402 /* assign zero for now*/
1403 table->ACPILevel.SclkFrequency = atomctrl_get_reference_clock(hwmgr);
1404
1405 /* get the engine clock dividers for this clock value*/
1406 result = atomctrl_get_engine_pll_dividers_vi(hwmgr,
1407 table->ACPILevel.SclkFrequency, &dividers);
1408
1409 PP_ASSERT_WITH_CODE(result == 0,
1410 "Error retrieving Engine Clock dividers from VBIOS.", return result);
1411
1412 /* divider ID for required SCLK*/
1413 table->ACPILevel.SclkDid = (uint8_t)dividers.pll_post_divider;
1414 table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
1415 table->ACPILevel.DeepSleepDivId = 0;
1416
1417 spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
1418 CG_SPLL_FUNC_CNTL, SPLL_PWRON, 0);
1419 spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
1420 CG_SPLL_FUNC_CNTL, SPLL_RESET, 1);
1421 spll_func_cntl_2 = PHM_SET_FIELD(spll_func_cntl_2,
1422 CG_SPLL_FUNC_CNTL_2, SCLK_MUX_SEL, 4);
1423
1424 table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
1425 table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
1426 table->ACPILevel.CgSpllFuncCntl3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
1427 table->ACPILevel.CgSpllFuncCntl4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
1428 table->ACPILevel.SpllSpreadSpectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
1429 table->ACPILevel.SpllSpreadSpectrum2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
1430 table->ACPILevel.CcPwrDynRm = 0;
1431 table->ACPILevel.CcPwrDynRm1 = 0;
1432
1433 /* For various features to be enabled/disabled while this level is active.*/
1434 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags);
1435 /* SCLK frequency in units of 10KHz*/
1436 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkFrequency);
1437 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl);
1438 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl2);
1439 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl3);
1440 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl4);
1441 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum);
1442 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum2);
1443 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm);
1444 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1);
1445
1446
1447 /* table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;*/
1448 table->MemoryACPILevel.MinVddc = table->ACPILevel.MinVddc;
1449 table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;
1450
1451 if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control)
1452 table->MemoryACPILevel.MinVddci = table->MemoryACPILevel.MinVddc;
1453 else {
1454 if (data->acpi_vddci != 0)
1455 table->MemoryACPILevel.MinVddci = PP_HOST_TO_SMC_UL(data->acpi_vddci * VOLTAGE_SCALE);
1456 else
1457 table->MemoryACPILevel.MinVddci = PP_HOST_TO_SMC_UL(data->min_vddci_in_pptable * VOLTAGE_SCALE);
1458 }
1459
1460 if (0 == ci_populate_mvdd_value(hwmgr, 0, &voltage_level))
1461 table->MemoryACPILevel.MinMvdd =
1462 PP_HOST_TO_SMC_UL(voltage_level.Voltage * VOLTAGE_SCALE);
1463 else
1464 table->MemoryACPILevel.MinMvdd = 0;
1465
1466 /* Force reset on DLL*/
1467 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
1468 MCLK_PWRMGT_CNTL, MRDCK0_RESET, 0x1);
1469 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
1470 MCLK_PWRMGT_CNTL, MRDCK1_RESET, 0x1);
1471
1472 /* Disable DLL in ACPIState*/
1473 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
1474 MCLK_PWRMGT_CNTL, MRDCK0_PDNB, 0);
1475 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
1476 MCLK_PWRMGT_CNTL, MRDCK1_PDNB, 0);
1477
1478 /* Enable DLL bypass signal*/
1479 dll_cntl = PHM_SET_FIELD(dll_cntl,
1480 DLL_CNTL, MRDCK0_BYPASS, 0);
1481 dll_cntl = PHM_SET_FIELD(dll_cntl,
1482 DLL_CNTL, MRDCK1_BYPASS, 0);
1483
1484 table->MemoryACPILevel.DllCntl =
1485 PP_HOST_TO_SMC_UL(dll_cntl);
1486 table->MemoryACPILevel.MclkPwrmgtCntl =
1487 PP_HOST_TO_SMC_UL(mclk_pwrmgt_cntl);
1488 table->MemoryACPILevel.MpllAdFuncCntl =
1489 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_AD_FUNC_CNTL);
1490 table->MemoryACPILevel.MpllDqFuncCntl =
1491 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_DQ_FUNC_CNTL);
1492 table->MemoryACPILevel.MpllFuncCntl =
1493 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL);
1494 table->MemoryACPILevel.MpllFuncCntl_1 =
1495 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_1);
1496 table->MemoryACPILevel.MpllFuncCntl_2 =
1497 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_2);
1498 table->MemoryACPILevel.MpllSs1 =
1499 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS1);
1500 table->MemoryACPILevel.MpllSs2 =
1501 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS2);
1502
1503 table->MemoryACPILevel.EnabledForThrottle = 0;
1504 table->MemoryACPILevel.EnabledForActivity = 0;
1505 table->MemoryACPILevel.UpH = 0;
1506 table->MemoryACPILevel.DownH = 100;
1507 table->MemoryACPILevel.VoltageDownH = 0;
1508 /* Indicates maximum activity level for this performance level.*/
1509 table->MemoryACPILevel.ActivityLevel = PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target);
1510
1511 table->MemoryACPILevel.StutterEnable = 0;
1512 table->MemoryACPILevel.StrobeEnable = 0;
1513 table->MemoryACPILevel.EdcReadEnable = 0;
1514 table->MemoryACPILevel.EdcWriteEnable = 0;
1515 table->MemoryACPILevel.RttEnable = 0;
1516
1517 return result;
1518 }
1519
1520 static int ci_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
1521 SMU7_Discrete_DpmTable *table)
1522 {
1523 int result = 0;
1524 uint8_t count;
1525 struct pp_atomctrl_clock_dividers_vi dividers;
1526 struct phm_uvd_clock_voltage_dependency_table *uvd_table =
1527 hwmgr->dyn_state.uvd_clock_voltage_dependency_table;
1528
1529 table->UvdLevelCount = (uint8_t)(uvd_table->count);
1530
1531 for (count = 0; count < table->UvdLevelCount; count++) {
1532 table->UvdLevel[count].VclkFrequency =
1533 uvd_table->entries[count].vclk;
1534 table->UvdLevel[count].DclkFrequency =
1535 uvd_table->entries[count].dclk;
1536 table->UvdLevel[count].MinVddc =
1537 uvd_table->entries[count].v * VOLTAGE_SCALE;
1538 table->UvdLevel[count].MinVddcPhases = 1;
1539
1540 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
1541 table->UvdLevel[count].VclkFrequency, &dividers);
1542 PP_ASSERT_WITH_CODE((0 == result),
1543 "can not find divide id for Vclk clock", return result);
1544
1545 table->UvdLevel[count].VclkDivider = (uint8_t)dividers.pll_post_divider;
1546
1547 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
1548 table->UvdLevel[count].DclkFrequency, &dividers);
1549 PP_ASSERT_WITH_CODE((0 == result),
1550 "can not find divide id for Dclk clock", return result);
1551
1552 table->UvdLevel[count].DclkDivider = (uint8_t)dividers.pll_post_divider;
1553 CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency);
1554 CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency);
1555 CONVERT_FROM_HOST_TO_SMC_US(table->UvdLevel[count].MinVddc);
1556 }
1557
1558 return result;
1559 }
1560
1561 static int ci_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
1562 SMU7_Discrete_DpmTable *table)
1563 {
1564 int result = -EINVAL;
1565 uint8_t count;
1566 struct pp_atomctrl_clock_dividers_vi dividers;
1567 struct phm_vce_clock_voltage_dependency_table *vce_table =
1568 hwmgr->dyn_state.vce_clock_voltage_dependency_table;
1569
1570 table->VceLevelCount = (uint8_t)(vce_table->count);
1571 table->VceBootLevel = 0;
1572
1573 for (count = 0; count < table->VceLevelCount; count++) {
1574 table->VceLevel[count].Frequency = vce_table->entries[count].evclk;
1575 table->VceLevel[count].MinVoltage =
1576 vce_table->entries[count].v * VOLTAGE_SCALE;
1577 table->VceLevel[count].MinPhases = 1;
1578
1579 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
1580 table->VceLevel[count].Frequency, &dividers);
1581 PP_ASSERT_WITH_CODE((0 == result),
1582 "can not find divide id for VCE engine clock",
1583 return result);
1584
1585 table->VceLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
1586
1587 CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].Frequency);
1588 CONVERT_FROM_HOST_TO_SMC_US(table->VceLevel[count].MinVoltage);
1589 }
1590 return result;
1591 }
1592
1593 static int ci_populate_smc_acp_level(struct pp_hwmgr *hwmgr,
1594 SMU7_Discrete_DpmTable *table)
1595 {
1596 int result = -EINVAL;
1597 uint8_t count;
1598 struct pp_atomctrl_clock_dividers_vi dividers;
1599 struct phm_acp_clock_voltage_dependency_table *acp_table =
1600 hwmgr->dyn_state.acp_clock_voltage_dependency_table;
1601
1602 table->AcpLevelCount = (uint8_t)(acp_table->count);
1603 table->AcpBootLevel = 0;
1604
1605 for (count = 0; count < table->AcpLevelCount; count++) {
1606 table->AcpLevel[count].Frequency = acp_table->entries[count].acpclk;
1607 table->AcpLevel[count].MinVoltage = acp_table->entries[count].v;
1608 table->AcpLevel[count].MinPhases = 1;
1609
1610 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
1611 table->AcpLevel[count].Frequency, &dividers);
1612 PP_ASSERT_WITH_CODE((0 == result),
1613 "can not find divide id for engine clock", return result);
1614
1615 table->AcpLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
1616
1617 CONVERT_FROM_HOST_TO_SMC_UL(table->AcpLevel[count].Frequency);
1618 CONVERT_FROM_HOST_TO_SMC_US(table->AcpLevel[count].MinVoltage);
1619 }
1620 return result;
1621 }
1622
1623 static int ci_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
1624 SMU7_Discrete_DpmTable *table)
1625 {
1626 int result = -EINVAL;
1627 uint8_t count;
1628 struct pp_atomctrl_clock_dividers_vi dividers;
1629 struct phm_samu_clock_voltage_dependency_table *samu_table =
1630 hwmgr->dyn_state.samu_clock_voltage_dependency_table;
1631
1632 table->SamuBootLevel = 0;
1633 table->SamuLevelCount = (uint8_t)(samu_table->count);
1634
1635 for (count = 0; count < table->SamuLevelCount; count++) {
1636 table->SamuLevel[count].Frequency = samu_table->entries[count].samclk;
1637 table->SamuLevel[count].MinVoltage = samu_table->entries[count].v * VOLTAGE_SCALE;
1638 table->SamuLevel[count].MinPhases = 1;
1639
1640 /* retrieve divider value for VBIOS */
1641 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
1642 table->SamuLevel[count].Frequency, &dividers);
1643 PP_ASSERT_WITH_CODE((0 == result),
1644 "can not find divide id for samu clock", return result);
1645
1646 table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
1647
1648 CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency);
1649 CONVERT_FROM_HOST_TO_SMC_US(table->SamuLevel[count].MinVoltage);
1650 }
1651 return result;
1652 }
1653
1654 static int ci_populate_memory_timing_parameters(
1655 struct pp_hwmgr *hwmgr,
1656 uint32_t engine_clock,
1657 uint32_t memory_clock,
1658 struct SMU7_Discrete_MCArbDramTimingTableEntry *arb_regs
1659 )
1660 {
1661 uint32_t dramTiming;
1662 uint32_t dramTiming2;
1663 uint32_t burstTime;
1664 int result;
1665
1666 result = atomctrl_set_engine_dram_timings_rv770(hwmgr,
1667 engine_clock, memory_clock);
1668
1669 PP_ASSERT_WITH_CODE(result == 0,
1670 "Error calling VBIOS to set DRAM_TIMING.", return result);
1671
1672 dramTiming = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
1673 dramTiming2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
1674 burstTime = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
1675
1676 arb_regs->McArbDramTiming = PP_HOST_TO_SMC_UL(dramTiming);
1677 arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dramTiming2);
1678 arb_regs->McArbBurstTime = (uint8_t)burstTime;
1679
1680 return 0;
1681 }
1682
1683 static int ci_program_memory_timing_parameters(struct pp_hwmgr *hwmgr)
1684 {
1685 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1686 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
1687 int result = 0;
1688 SMU7_Discrete_MCArbDramTimingTable arb_regs;
1689 uint32_t i, j;
1690
1691 memset(&arb_regs, 0x00, sizeof(SMU7_Discrete_MCArbDramTimingTable));
1692
1693 for (i = 0; i < data->dpm_table.sclk_table.count; i++) {
1694 for (j = 0; j < data->dpm_table.mclk_table.count; j++) {
1695 result = ci_populate_memory_timing_parameters
1696 (hwmgr, data->dpm_table.sclk_table.dpm_levels[i].value,
1697 data->dpm_table.mclk_table.dpm_levels[j].value,
1698 &arb_regs.entries[i][j]);
1699
1700 if (0 != result)
1701 break;
1702 }
1703 }
1704
1705 if (0 == result) {
1706 result = ci_copy_bytes_to_smc(
1707 hwmgr,
1708 smu_data->arb_table_start,
1709 (uint8_t *)&arb_regs,
1710 sizeof(SMU7_Discrete_MCArbDramTimingTable),
1711 SMC_RAM_END
1712 );
1713 }
1714
1715 return result;
1716 }
1717
1718 static int ci_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
1719 SMU7_Discrete_DpmTable *table)
1720 {
1721 int result = 0;
1722 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1723 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
1724
1725 table->GraphicsBootLevel = 0;
1726 table->MemoryBootLevel = 0;
1727
1728 /* find boot level from dpm table*/
1729 result = phm_find_boot_level(&(data->dpm_table.sclk_table),
1730 data->vbios_boot_state.sclk_bootup_value,
1731 (uint32_t *)&(smu_data->smc_state_table.GraphicsBootLevel));
1732
1733 if (0 != result) {
1734 smu_data->smc_state_table.GraphicsBootLevel = 0;
1735 pr_err("VBIOS did not find boot engine clock value \
1736 in dependency table. Using Graphics DPM level 0!");
1737 result = 0;
1738 }
1739
1740 result = phm_find_boot_level(&(data->dpm_table.mclk_table),
1741 data->vbios_boot_state.mclk_bootup_value,
1742 (uint32_t *)&(smu_data->smc_state_table.MemoryBootLevel));
1743
1744 if (0 != result) {
1745 smu_data->smc_state_table.MemoryBootLevel = 0;
1746 pr_err("VBIOS did not find boot engine clock value \
1747 in dependency table. Using Memory DPM level 0!");
1748 result = 0;
1749 }
1750
1751 table->BootVddc = data->vbios_boot_state.vddc_bootup_value;
1752 table->BootVddci = data->vbios_boot_state.vddci_bootup_value;
1753 table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value;
1754
1755 return result;
1756 }
1757
1758 static int ci_populate_mc_reg_address(struct pp_hwmgr *hwmgr,
1759 SMU7_Discrete_MCRegisters *mc_reg_table)
1760 {
1761 const struct ci_smumgr *smu_data = (struct ci_smumgr *)hwmgr->smu_backend;
1762
1763 uint32_t i, j;
1764
1765 for (i = 0, j = 0; j < smu_data->mc_reg_table.last; j++) {
1766 if (smu_data->mc_reg_table.validflag & 1<<j) {
1767 PP_ASSERT_WITH_CODE(i < SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE,
1768 "Index of mc_reg_table->address[] array out of boundary", return -EINVAL);
1769 mc_reg_table->address[i].s0 =
1770 PP_HOST_TO_SMC_US(smu_data->mc_reg_table.mc_reg_address[j].s0);
1771 mc_reg_table->address[i].s1 =
1772 PP_HOST_TO_SMC_US(smu_data->mc_reg_table.mc_reg_address[j].s1);
1773 i++;
1774 }
1775 }
1776
1777 mc_reg_table->last = (uint8_t)i;
1778
1779 return 0;
1780 }
1781
1782 static void ci_convert_mc_registers(
1783 const struct ci_mc_reg_entry *entry,
1784 SMU7_Discrete_MCRegisterSet *data,
1785 uint32_t num_entries, uint32_t valid_flag)
1786 {
1787 uint32_t i, j;
1788
1789 for (i = 0, j = 0; j < num_entries; j++) {
1790 if (valid_flag & 1<<j) {
1791 data->value[i] = PP_HOST_TO_SMC_UL(entry->mc_data[j]);
1792 i++;
1793 }
1794 }
1795 }
1796
1797 static int ci_convert_mc_reg_table_entry_to_smc(
1798 struct pp_hwmgr *hwmgr,
1799 const uint32_t memory_clock,
1800 SMU7_Discrete_MCRegisterSet *mc_reg_table_data
1801 )
1802 {
1803 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
1804 uint32_t i = 0;
1805
1806 for (i = 0; i < smu_data->mc_reg_table.num_entries; i++) {
1807 if (memory_clock <=
1808 smu_data->mc_reg_table.mc_reg_table_entry[i].mclk_max) {
1809 break;
1810 }
1811 }
1812
1813 if ((i == smu_data->mc_reg_table.num_entries) && (i > 0))
1814 --i;
1815
1816 ci_convert_mc_registers(&smu_data->mc_reg_table.mc_reg_table_entry[i],
1817 mc_reg_table_data, smu_data->mc_reg_table.last,
1818 smu_data->mc_reg_table.validflag);
1819
1820 return 0;
1821 }
1822
1823 static int ci_convert_mc_reg_table_to_smc(struct pp_hwmgr *hwmgr,
1824 SMU7_Discrete_MCRegisters *mc_regs)
1825 {
1826 int result = 0;
1827 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1828 int res;
1829 uint32_t i;
1830
1831 for (i = 0; i < data->dpm_table.mclk_table.count; i++) {
1832 res = ci_convert_mc_reg_table_entry_to_smc(
1833 hwmgr,
1834 data->dpm_table.mclk_table.dpm_levels[i].value,
1835 &mc_regs->data[i]
1836 );
1837
1838 if (0 != res)
1839 result = res;
1840 }
1841
1842 return result;
1843 }
1844
1845 static int ci_update_and_upload_mc_reg_table(struct pp_hwmgr *hwmgr)
1846 {
1847 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
1848 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1849 uint32_t address;
1850 int32_t result;
1851
1852 if (0 == (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK))
1853 return 0;
1854
1855
1856 memset(&smu_data->mc_regs, 0, sizeof(SMU7_Discrete_MCRegisters));
1857
1858 result = ci_convert_mc_reg_table_to_smc(hwmgr, &(smu_data->mc_regs));
1859
1860 if (result != 0)
1861 return result;
1862
1863 address = smu_data->mc_reg_table_start + (uint32_t)offsetof(SMU7_Discrete_MCRegisters, data[0]);
1864
1865 return ci_copy_bytes_to_smc(hwmgr, address,
1866 (uint8_t *)&smu_data->mc_regs.data[0],
1867 sizeof(SMU7_Discrete_MCRegisterSet) * data->dpm_table.mclk_table.count,
1868 SMC_RAM_END);
1869 }
1870
1871 static int ci_populate_initial_mc_reg_table(struct pp_hwmgr *hwmgr)
1872 {
1873 int result;
1874 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
1875
1876 memset(&smu_data->mc_regs, 0x00, sizeof(SMU7_Discrete_MCRegisters));
1877 result = ci_populate_mc_reg_address(hwmgr, &(smu_data->mc_regs));
1878 PP_ASSERT_WITH_CODE(0 == result,
1879 "Failed to initialize MCRegTable for the MC register addresses!", return result;);
1880
1881 result = ci_convert_mc_reg_table_to_smc(hwmgr, &smu_data->mc_regs);
1882 PP_ASSERT_WITH_CODE(0 == result,
1883 "Failed to initialize MCRegTable for driver state!", return result;);
1884
1885 return ci_copy_bytes_to_smc(hwmgr, smu_data->mc_reg_table_start,
1886 (uint8_t *)&smu_data->mc_regs, sizeof(SMU7_Discrete_MCRegisters), SMC_RAM_END);
1887 }
1888
1889 static int ci_populate_smc_initial_state(struct pp_hwmgr *hwmgr)
1890 {
1891 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1892 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
1893 uint8_t count, level;
1894
1895 count = (uint8_t)(hwmgr->dyn_state.vddc_dependency_on_sclk->count);
1896
1897 for (level = 0; level < count; level++) {
1898 if (hwmgr->dyn_state.vddc_dependency_on_sclk->entries[level].clk
1899 >= data->vbios_boot_state.sclk_bootup_value) {
1900 smu_data->smc_state_table.GraphicsBootLevel = level;
1901 break;
1902 }
1903 }
1904
1905 count = (uint8_t)(hwmgr->dyn_state.vddc_dependency_on_mclk->count);
1906
1907 for (level = 0; level < count; level++) {
1908 if (hwmgr->dyn_state.vddc_dependency_on_mclk->entries[level].clk
1909 >= data->vbios_boot_state.mclk_bootup_value) {
1910 smu_data->smc_state_table.MemoryBootLevel = level;
1911 break;
1912 }
1913 }
1914
1915 return 0;
1916 }
1917
1918 static int ci_populate_smc_svi2_config(struct pp_hwmgr *hwmgr,
1919 SMU7_Discrete_DpmTable *table)
1920 {
1921 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1922
1923 if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control)
1924 table->SVI2Enable = 1;
1925 else
1926 table->SVI2Enable = 0;
1927 return 0;
1928 }
1929
1930 static int ci_start_smc(struct pp_hwmgr *hwmgr)
1931 {
1932 /* set smc instruct start point at 0x0 */
1933 ci_program_jump_on_start(hwmgr);
1934
1935 /* enable smc clock */
1936 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0);
1937
1938 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_RESET_CNTL, rst_reg, 0);
1939
1940 PHM_WAIT_INDIRECT_FIELD(hwmgr, SMC_IND, FIRMWARE_FLAGS,
1941 INTERRUPTS_ENABLED, 1);
1942
1943 return 0;
1944 }
1945
1946 static int ci_init_smc_table(struct pp_hwmgr *hwmgr)
1947 {
1948 int result;
1949 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1950 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
1951 SMU7_Discrete_DpmTable *table = &(smu_data->smc_state_table);
1952 struct pp_atomctrl_gpio_pin_assignment gpio_pin;
1953 u32 i;
1954
1955 ci_initialize_power_tune_defaults(hwmgr);
1956 memset(&(smu_data->smc_state_table), 0x00, sizeof(smu_data->smc_state_table));
1957
1958 if (SMU7_VOLTAGE_CONTROL_NONE != data->voltage_control)
1959 ci_populate_smc_voltage_tables(hwmgr, table);
1960
1961 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1962 PHM_PlatformCaps_AutomaticDCTransition))
1963 table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
1964
1965
1966 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1967 PHM_PlatformCaps_StepVddc))
1968 table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
1969
1970 if (data->is_memory_gddr5)
1971 table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
1972
1973 if (data->ulv_supported) {
1974 result = ci_populate_ulv_state(hwmgr, &(table->Ulv));
1975 PP_ASSERT_WITH_CODE(0 == result,
1976 "Failed to initialize ULV state!", return result);
1977
1978 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1979 ixCG_ULV_PARAMETER, 0x40035);
1980 }
1981
1982 result = ci_populate_all_graphic_levels(hwmgr);
1983 PP_ASSERT_WITH_CODE(0 == result,
1984 "Failed to initialize Graphics Level!", return result);
1985
1986 result = ci_populate_all_memory_levels(hwmgr);
1987 PP_ASSERT_WITH_CODE(0 == result,
1988 "Failed to initialize Memory Level!", return result);
1989
1990 result = ci_populate_smc_link_level(hwmgr, table);
1991 PP_ASSERT_WITH_CODE(0 == result,
1992 "Failed to initialize Link Level!", return result);
1993
1994 result = ci_populate_smc_acpi_level(hwmgr, table);
1995 PP_ASSERT_WITH_CODE(0 == result,
1996 "Failed to initialize ACPI Level!", return result);
1997
1998 result = ci_populate_smc_vce_level(hwmgr, table);
1999 PP_ASSERT_WITH_CODE(0 == result,
2000 "Failed to initialize VCE Level!", return result);
2001
2002 result = ci_populate_smc_acp_level(hwmgr, table);
2003 PP_ASSERT_WITH_CODE(0 == result,
2004 "Failed to initialize ACP Level!", return result);
2005
2006 result = ci_populate_smc_samu_level(hwmgr, table);
2007 PP_ASSERT_WITH_CODE(0 == result,
2008 "Failed to initialize SAMU Level!", return result);
2009
2010 /* Since only the initial state is completely set up at this point (the other states are just copies of the boot state) we only */
2011 /* need to populate the ARB settings for the initial state. */
2012 result = ci_program_memory_timing_parameters(hwmgr);
2013 PP_ASSERT_WITH_CODE(0 == result,
2014 "Failed to Write ARB settings for the initial state.", return result);
2015
2016 result = ci_populate_smc_uvd_level(hwmgr, table);
2017 PP_ASSERT_WITH_CODE(0 == result,
2018 "Failed to initialize UVD Level!", return result);
2019
2020 table->UvdBootLevel = 0;
2021 table->VceBootLevel = 0;
2022 table->AcpBootLevel = 0;
2023 table->SamuBootLevel = 0;
2024
2025 table->GraphicsBootLevel = 0;
2026 table->MemoryBootLevel = 0;
2027
2028 result = ci_populate_smc_boot_level(hwmgr, table);
2029 PP_ASSERT_WITH_CODE(0 == result,
2030 "Failed to initialize Boot Level!", return result);
2031
2032 result = ci_populate_smc_initial_state(hwmgr);
2033 PP_ASSERT_WITH_CODE(0 == result, "Failed to initialize Boot State!", return result);
2034
2035 result = ci_populate_bapm_parameters_in_dpm_table(hwmgr);
2036 PP_ASSERT_WITH_CODE(0 == result, "Failed to populate BAPM Parameters!", return result);
2037
2038 table->UVDInterval = 1;
2039 table->VCEInterval = 1;
2040 table->ACPInterval = 1;
2041 table->SAMUInterval = 1;
2042 table->GraphicsVoltageChangeEnable = 1;
2043 table->GraphicsThermThrottleEnable = 1;
2044 table->GraphicsInterval = 1;
2045 table->VoltageInterval = 1;
2046 table->ThermalInterval = 1;
2047
2048 table->TemperatureLimitHigh =
2049 (data->thermal_temp_setting.temperature_high *
2050 SMU7_Q88_FORMAT_CONVERSION_UNIT) / PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
2051 table->TemperatureLimitLow =
2052 (data->thermal_temp_setting.temperature_low *
2053 SMU7_Q88_FORMAT_CONVERSION_UNIT) / PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
2054
2055 table->MemoryVoltageChangeEnable = 1;
2056 table->MemoryInterval = 1;
2057 table->VoltageResponseTime = 0;
2058 table->VddcVddciDelta = 4000;
2059 table->PhaseResponseTime = 0;
2060 table->MemoryThermThrottleEnable = 1;
2061
2062 PP_ASSERT_WITH_CODE((1 <= data->dpm_table.pcie_speed_table.count),
2063 "There must be 1 or more PCIE levels defined in PPTable.",
2064 return -EINVAL);
2065
2066 table->PCIeBootLinkLevel = (uint8_t)data->dpm_table.pcie_speed_table.count;
2067 table->PCIeGenInterval = 1;
2068
2069 ci_populate_smc_svi2_config(hwmgr, table);
2070
2071 for (i = 0; i < SMU7_MAX_ENTRIES_SMIO; i++)
2072 CONVERT_FROM_HOST_TO_SMC_UL(table->Smio[i]);
2073
2074 table->ThermGpio = 17;
2075 table->SclkStepSize = 0x4000;
2076 if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_VRHOT_GPIO_PINID, &gpio_pin)) {
2077 table->VRHotGpio = gpio_pin.uc_gpio_pin_bit_shift;
2078 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
2079 PHM_PlatformCaps_RegulatorHot);
2080 } else {
2081 table->VRHotGpio = SMU7_UNUSED_GPIO_PIN;
2082 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
2083 PHM_PlatformCaps_RegulatorHot);
2084 }
2085
2086 table->AcDcGpio = SMU7_UNUSED_GPIO_PIN;
2087
2088 CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags);
2089 CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddcVid);
2090 CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddcPhase);
2091 CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddciVid);
2092 CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskMvddVid);
2093 CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize);
2094 CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh);
2095 CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow);
2096 table->VddcVddciDelta = PP_HOST_TO_SMC_US(table->VddcVddciDelta);
2097 CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime);
2098 CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime);
2099
2100 table->BootVddc = PP_HOST_TO_SMC_US(table->BootVddc * VOLTAGE_SCALE);
2101 table->BootVddci = PP_HOST_TO_SMC_US(table->BootVddci * VOLTAGE_SCALE);
2102 table->BootMVdd = PP_HOST_TO_SMC_US(table->BootMVdd * VOLTAGE_SCALE);
2103
2104 /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */
2105 result = ci_copy_bytes_to_smc(hwmgr, smu_data->dpm_table_start +
2106 offsetof(SMU7_Discrete_DpmTable, SystemFlags),
2107 (uint8_t *)&(table->SystemFlags),
2108 sizeof(SMU7_Discrete_DpmTable)-3 * sizeof(SMU7_PIDController),
2109 SMC_RAM_END);
2110
2111 PP_ASSERT_WITH_CODE(0 == result,
2112 "Failed to upload dpm data to SMC memory!", return result;);
2113
2114 result = ci_populate_initial_mc_reg_table(hwmgr);
2115 PP_ASSERT_WITH_CODE((0 == result),
2116 "Failed to populate initialize MC Reg table!", return result);
2117
2118 result = ci_populate_pm_fuses(hwmgr);
2119 PP_ASSERT_WITH_CODE(0 == result,
2120 "Failed to populate PM fuses to SMC memory!", return result);
2121
2122 ci_start_smc(hwmgr);
2123
2124 return 0;
2125 }
2126
2127 static int ci_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
2128 {
2129 struct ci_smumgr *ci_data = (struct ci_smumgr *)(hwmgr->smu_backend);
2130 SMU7_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
2131 uint32_t duty100;
2132 uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2;
2133 uint16_t fdo_min, slope1, slope2;
2134 uint32_t reference_clock;
2135 int res;
2136 uint64_t tmp64;
2137
2138 if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl))
2139 return 0;
2140
2141 if (hwmgr->thermal_controller.fanInfo.bNoFan) {
2142 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
2143 PHM_PlatformCaps_MicrocodeFanControl);
2144 return 0;
2145 }
2146
2147 if (0 == ci_data->fan_table_start) {
2148 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl);
2149 return 0;
2150 }
2151
2152 duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL1, FMAX_DUTY100);
2153
2154 if (0 == duty100) {
2155 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl);
2156 return 0;
2157 }
2158
2159 tmp64 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin * duty100;
2160 do_div(tmp64, 10000);
2161 fdo_min = (uint16_t)tmp64;
2162
2163 t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed - hwmgr->thermal_controller.advanceFanControlParameters.usTMin;
2164 t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh - hwmgr->thermal_controller.advanceFanControlParameters.usTMed;
2165
2166 pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed - hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin;
2167 pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh - hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed;
2168
2169 slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
2170 slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
2171
2172 fan_table.TempMin = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMin) / 100);
2173 fan_table.TempMed = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMed) / 100);
2174 fan_table.TempMax = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMax) / 100);
2175
2176 fan_table.Slope1 = cpu_to_be16(slope1);
2177 fan_table.Slope2 = cpu_to_be16(slope2);
2178
2179 fan_table.FdoMin = cpu_to_be16(fdo_min);
2180
2181 fan_table.HystDown = cpu_to_be16(hwmgr->thermal_controller.advanceFanControlParameters.ucTHyst);
2182
2183 fan_table.HystUp = cpu_to_be16(1);
2184
2185 fan_table.HystSlope = cpu_to_be16(1);
2186
2187 fan_table.TempRespLim = cpu_to_be16(5);
2188
2189 reference_clock = smu7_get_xclk(hwmgr);
2190
2191 fan_table.RefreshPeriod = cpu_to_be32((hwmgr->thermal_controller.advanceFanControlParameters.ulCycleDelay * reference_clock) / 1600);
2192
2193 fan_table.FdoMax = cpu_to_be16((uint16_t)duty100);
2194
2195 fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_MULT_THERMAL_CTRL, TEMP_SEL);
2196
2197 res = ci_copy_bytes_to_smc(hwmgr, ci_data->fan_table_start, (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table), SMC_RAM_END);
2198
2199 return 0;
2200 }
2201
2202 static int ci_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
2203 {
2204 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2205
2206 if (data->need_update_smu7_dpm_table &
2207 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK))
2208 return ci_program_memory_timing_parameters(hwmgr);
2209
2210 return 0;
2211 }
2212
2213 static int ci_update_sclk_threshold(struct pp_hwmgr *hwmgr)
2214 {
2215 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2216 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
2217
2218 int result = 0;
2219 uint32_t low_sclk_interrupt_threshold = 0;
2220
2221 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2222 PHM_PlatformCaps_SclkThrottleLowNotification)
2223 && (hwmgr->gfx_arbiter.sclk_threshold !=
2224 data->low_sclk_interrupt_threshold)) {
2225 data->low_sclk_interrupt_threshold =
2226 hwmgr->gfx_arbiter.sclk_threshold;
2227 low_sclk_interrupt_threshold =
2228 data->low_sclk_interrupt_threshold;
2229
2230 CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold);
2231
2232 result = ci_copy_bytes_to_smc(
2233 hwmgr,
2234 smu_data->dpm_table_start +
2235 offsetof(SMU7_Discrete_DpmTable,
2236 LowSclkInterruptT),
2237 (uint8_t *)&low_sclk_interrupt_threshold,
2238 sizeof(uint32_t),
2239 SMC_RAM_END);
2240 }
2241
2242 result = ci_update_and_upload_mc_reg_table(hwmgr);
2243
2244 PP_ASSERT_WITH_CODE((0 == result), "Failed to upload MC reg table!", return result);
2245
2246 result = ci_program_mem_timing_parameters(hwmgr);
2247 PP_ASSERT_WITH_CODE((result == 0),
2248 "Failed to program memory timing parameters!",
2249 );
2250
2251 return result;
2252 }
2253
2254 static uint32_t ci_get_offsetof(uint32_t type, uint32_t member)
2255 {
2256 switch (type) {
2257 case SMU_SoftRegisters:
2258 switch (member) {
2259 case HandshakeDisables:
2260 return offsetof(SMU7_SoftRegisters, HandshakeDisables);
2261 case VoltageChangeTimeout:
2262 return offsetof(SMU7_SoftRegisters, VoltageChangeTimeout);
2263 case AverageGraphicsActivity:
2264 return offsetof(SMU7_SoftRegisters, AverageGraphicsA);
2265 case PreVBlankGap:
2266 return offsetof(SMU7_SoftRegisters, PreVBlankGap);
2267 case VBlankTimeout:
2268 return offsetof(SMU7_SoftRegisters, VBlankTimeout);
2269 }
2270 case SMU_Discrete_DpmTable:
2271 switch (member) {
2272 case LowSclkInterruptThreshold:
2273 return offsetof(SMU7_Discrete_DpmTable, LowSclkInterruptT);
2274 }
2275 }
2276 pr_debug("can't get the offset of type %x member %x\n", type, member);
2277 return 0;
2278 }
2279
2280 static uint32_t ci_get_mac_definition(uint32_t value)
2281 {
2282 switch (value) {
2283 case SMU_MAX_LEVELS_GRAPHICS:
2284 return SMU7_MAX_LEVELS_GRAPHICS;
2285 case SMU_MAX_LEVELS_MEMORY:
2286 return SMU7_MAX_LEVELS_MEMORY;
2287 case SMU_MAX_LEVELS_LINK:
2288 return SMU7_MAX_LEVELS_LINK;
2289 case SMU_MAX_ENTRIES_SMIO:
2290 return SMU7_MAX_ENTRIES_SMIO;
2291 case SMU_MAX_LEVELS_VDDC:
2292 return SMU7_MAX_LEVELS_VDDC;
2293 case SMU_MAX_LEVELS_VDDCI:
2294 return SMU7_MAX_LEVELS_VDDCI;
2295 case SMU_MAX_LEVELS_MVDD:
2296 return SMU7_MAX_LEVELS_MVDD;
2297 }
2298
2299 pr_debug("can't get the mac of %x\n", value);
2300 return 0;
2301 }
2302
2303 static int ci_load_smc_ucode(struct pp_hwmgr *hwmgr)
2304 {
2305 uint32_t byte_count, start_addr;
2306 uint8_t *src;
2307 uint32_t data;
2308
2309 struct cgs_firmware_info info = {0};
2310
2311 cgs_get_firmware_info(hwmgr->device, CGS_UCODE_ID_SMU, &info);
2312
2313 hwmgr->is_kicker = info.is_kicker;
2314 byte_count = info.image_size;
2315 src = (uint8_t *)info.kptr;
2316 start_addr = info.ucode_start_address;
2317
2318 if (byte_count > SMC_RAM_END) {
2319 pr_err("SMC address is beyond the SMC RAM area.\n");
2320 return -EINVAL;
2321 }
2322
2323 cgs_write_register(hwmgr->device, mmSMC_IND_INDEX_0, start_addr);
2324 PHM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 1);
2325
2326 for (; byte_count >= 4; byte_count -= 4) {
2327 data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3];
2328 cgs_write_register(hwmgr->device, mmSMC_IND_DATA_0, data);
2329 src += 4;
2330 }
2331 PHM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
2332
2333 if (0 != byte_count) {
2334 pr_err("SMC size must be divisible by 4\n");
2335 return -EINVAL;
2336 }
2337
2338 return 0;
2339 }
2340
2341 static int ci_upload_firmware(struct pp_hwmgr *hwmgr)
2342 {
2343 if (ci_is_smc_ram_running(hwmgr)) {
2344 pr_info("smc is running, no need to load smc firmware\n");
2345 return 0;
2346 }
2347 PHM_WAIT_INDIRECT_FIELD(hwmgr, SMC_IND, RCU_UC_EVENTS,
2348 boot_seq_done, 1);
2349 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_MISC_CNTL,
2350 pre_fetcher_en, 1);
2351
2352 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 1);
2353 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_RESET_CNTL, rst_reg, 1);
2354 return ci_load_smc_ucode(hwmgr);
2355 }
2356
2357 static int ci_process_firmware_header(struct pp_hwmgr *hwmgr)
2358 {
2359 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2360 struct ci_smumgr *ci_data = (struct ci_smumgr *)(hwmgr->smu_backend);
2361
2362 uint32_t tmp = 0;
2363 int result;
2364 bool error = false;
2365
2366 if (ci_upload_firmware(hwmgr))
2367 return -EINVAL;
2368
2369 result = ci_read_smc_sram_dword(hwmgr,
2370 SMU7_FIRMWARE_HEADER_LOCATION +
2371 offsetof(SMU7_Firmware_Header, DpmTable),
2372 &tmp, SMC_RAM_END);
2373
2374 if (0 == result)
2375 ci_data->dpm_table_start = tmp;
2376
2377 error |= (0 != result);
2378
2379 result = ci_read_smc_sram_dword(hwmgr,
2380 SMU7_FIRMWARE_HEADER_LOCATION +
2381 offsetof(SMU7_Firmware_Header, SoftRegisters),
2382 &tmp, SMC_RAM_END);
2383
2384 if (0 == result) {
2385 data->soft_regs_start = tmp;
2386 ci_data->soft_regs_start = tmp;
2387 }
2388
2389 error |= (0 != result);
2390
2391 result = ci_read_smc_sram_dword(hwmgr,
2392 SMU7_FIRMWARE_HEADER_LOCATION +
2393 offsetof(SMU7_Firmware_Header, mcRegisterTable),
2394 &tmp, SMC_RAM_END);
2395
2396 if (0 == result)
2397 ci_data->mc_reg_table_start = tmp;
2398
2399 result = ci_read_smc_sram_dword(hwmgr,
2400 SMU7_FIRMWARE_HEADER_LOCATION +
2401 offsetof(SMU7_Firmware_Header, FanTable),
2402 &tmp, SMC_RAM_END);
2403
2404 if (0 == result)
2405 ci_data->fan_table_start = tmp;
2406
2407 error |= (0 != result);
2408
2409 result = ci_read_smc_sram_dword(hwmgr,
2410 SMU7_FIRMWARE_HEADER_LOCATION +
2411 offsetof(SMU7_Firmware_Header, mcArbDramTimingTable),
2412 &tmp, SMC_RAM_END);
2413
2414 if (0 == result)
2415 ci_data->arb_table_start = tmp;
2416
2417 error |= (0 != result);
2418
2419 result = ci_read_smc_sram_dword(hwmgr,
2420 SMU7_FIRMWARE_HEADER_LOCATION +
2421 offsetof(SMU7_Firmware_Header, Version),
2422 &tmp, SMC_RAM_END);
2423
2424 if (0 == result)
2425 hwmgr->microcode_version_info.SMC = tmp;
2426
2427 error |= (0 != result);
2428
2429 return error ? 1 : 0;
2430 }
2431
2432 static uint8_t ci_get_memory_modile_index(struct pp_hwmgr *hwmgr)
2433 {
2434 return (uint8_t) (0xFF & (cgs_read_register(hwmgr->device, mmBIOS_SCRATCH_4) >> 16));
2435 }
2436
2437 static bool ci_check_s0_mc_reg_index(uint16_t in_reg, uint16_t *out_reg)
2438 {
2439 bool result = true;
2440
2441 switch (in_reg) {
2442 case mmMC_SEQ_RAS_TIMING:
2443 *out_reg = mmMC_SEQ_RAS_TIMING_LP;
2444 break;
2445
2446 case mmMC_SEQ_DLL_STBY:
2447 *out_reg = mmMC_SEQ_DLL_STBY_LP;
2448 break;
2449
2450 case mmMC_SEQ_G5PDX_CMD0:
2451 *out_reg = mmMC_SEQ_G5PDX_CMD0_LP;
2452 break;
2453
2454 case mmMC_SEQ_G5PDX_CMD1:
2455 *out_reg = mmMC_SEQ_G5PDX_CMD1_LP;
2456 break;
2457
2458 case mmMC_SEQ_G5PDX_CTRL:
2459 *out_reg = mmMC_SEQ_G5PDX_CTRL_LP;
2460 break;
2461
2462 case mmMC_SEQ_CAS_TIMING:
2463 *out_reg = mmMC_SEQ_CAS_TIMING_LP;
2464 break;
2465
2466 case mmMC_SEQ_MISC_TIMING:
2467 *out_reg = mmMC_SEQ_MISC_TIMING_LP;
2468 break;
2469
2470 case mmMC_SEQ_MISC_TIMING2:
2471 *out_reg = mmMC_SEQ_MISC_TIMING2_LP;
2472 break;
2473
2474 case mmMC_SEQ_PMG_DVS_CMD:
2475 *out_reg = mmMC_SEQ_PMG_DVS_CMD_LP;
2476 break;
2477
2478 case mmMC_SEQ_PMG_DVS_CTL:
2479 *out_reg = mmMC_SEQ_PMG_DVS_CTL_LP;
2480 break;
2481
2482 case mmMC_SEQ_RD_CTL_D0:
2483 *out_reg = mmMC_SEQ_RD_CTL_D0_LP;
2484 break;
2485
2486 case mmMC_SEQ_RD_CTL_D1:
2487 *out_reg = mmMC_SEQ_RD_CTL_D1_LP;
2488 break;
2489
2490 case mmMC_SEQ_WR_CTL_D0:
2491 *out_reg = mmMC_SEQ_WR_CTL_D0_LP;
2492 break;
2493
2494 case mmMC_SEQ_WR_CTL_D1:
2495 *out_reg = mmMC_SEQ_WR_CTL_D1_LP;
2496 break;
2497
2498 case mmMC_PMG_CMD_EMRS:
2499 *out_reg = mmMC_SEQ_PMG_CMD_EMRS_LP;
2500 break;
2501
2502 case mmMC_PMG_CMD_MRS:
2503 *out_reg = mmMC_SEQ_PMG_CMD_MRS_LP;
2504 break;
2505
2506 case mmMC_PMG_CMD_MRS1:
2507 *out_reg = mmMC_SEQ_PMG_CMD_MRS1_LP;
2508 break;
2509
2510 case mmMC_SEQ_PMG_TIMING:
2511 *out_reg = mmMC_SEQ_PMG_TIMING_LP;
2512 break;
2513
2514 case mmMC_PMG_CMD_MRS2:
2515 *out_reg = mmMC_SEQ_PMG_CMD_MRS2_LP;
2516 break;
2517
2518 case mmMC_SEQ_WR_CTL_2:
2519 *out_reg = mmMC_SEQ_WR_CTL_2_LP;
2520 break;
2521
2522 default:
2523 result = false;
2524 break;
2525 }
2526
2527 return result;
2528 }
2529
2530 static int ci_set_s0_mc_reg_index(struct ci_mc_reg_table *table)
2531 {
2532 uint32_t i;
2533 uint16_t address;
2534
2535 for (i = 0; i < table->last; i++) {
2536 table->mc_reg_address[i].s0 =
2537 ci_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address)
2538 ? address : table->mc_reg_address[i].s1;
2539 }
2540 return 0;
2541 }
2542
2543 static int ci_copy_vbios_smc_reg_table(const pp_atomctrl_mc_reg_table *table,
2544 struct ci_mc_reg_table *ni_table)
2545 {
2546 uint8_t i, j;
2547
2548 PP_ASSERT_WITH_CODE((table->last <= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE),
2549 "Invalid VramInfo table.", return -EINVAL);
2550 PP_ASSERT_WITH_CODE((table->num_entries <= MAX_AC_TIMING_ENTRIES),
2551 "Invalid VramInfo table.", return -EINVAL);
2552
2553 for (i = 0; i < table->last; i++)
2554 ni_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
2555
2556 ni_table->last = table->last;
2557
2558 for (i = 0; i < table->num_entries; i++) {
2559 ni_table->mc_reg_table_entry[i].mclk_max =
2560 table->mc_reg_table_entry[i].mclk_max;
2561 for (j = 0; j < table->last; j++) {
2562 ni_table->mc_reg_table_entry[i].mc_data[j] =
2563 table->mc_reg_table_entry[i].mc_data[j];
2564 }
2565 }
2566
2567 ni_table->num_entries = table->num_entries;
2568
2569 return 0;
2570 }
2571
2572 static int ci_set_mc_special_registers(struct pp_hwmgr *hwmgr,
2573 struct ci_mc_reg_table *table)
2574 {
2575 uint8_t i, j, k;
2576 uint32_t temp_reg;
2577 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2578
2579 for (i = 0, j = table->last; i < table->last; i++) {
2580 PP_ASSERT_WITH_CODE((j < SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE),
2581 "Invalid VramInfo table.", return -EINVAL);
2582
2583 switch (table->mc_reg_address[i].s1) {
2584
2585 case mmMC_SEQ_MISC1:
2586 temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_EMRS);
2587 table->mc_reg_address[j].s1 = mmMC_PMG_CMD_EMRS;
2588 table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_EMRS_LP;
2589 for (k = 0; k < table->num_entries; k++) {
2590 table->mc_reg_table_entry[k].mc_data[j] =
2591 ((temp_reg & 0xffff0000)) |
2592 ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
2593 }
2594 j++;
2595 PP_ASSERT_WITH_CODE((j < SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE),
2596 "Invalid VramInfo table.", return -EINVAL);
2597
2598 temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS);
2599 table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS;
2600 table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS_LP;
2601 for (k = 0; k < table->num_entries; k++) {
2602 table->mc_reg_table_entry[k].mc_data[j] =
2603 (temp_reg & 0xffff0000) |
2604 (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
2605
2606 if (!data->is_memory_gddr5)
2607 table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
2608 }
2609 j++;
2610 PP_ASSERT_WITH_CODE((j <= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE),
2611 "Invalid VramInfo table.", return -EINVAL);
2612
2613 if (!data->is_memory_gddr5 && j < SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE) {
2614 table->mc_reg_address[j].s1 = mmMC_PMG_AUTO_CMD;
2615 table->mc_reg_address[j].s0 = mmMC_PMG_AUTO_CMD;
2616 for (k = 0; k < table->num_entries; k++) {
2617 table->mc_reg_table_entry[k].mc_data[j] =
2618 (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
2619 }
2620 j++;
2621 PP_ASSERT_WITH_CODE((j <= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE),
2622 "Invalid VramInfo table.", return -EINVAL);
2623 }
2624
2625 break;
2626
2627 case mmMC_SEQ_RESERVE_M:
2628 temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1);
2629 table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS1;
2630 table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS1_LP;
2631 for (k = 0; k < table->num_entries; k++) {
2632 table->mc_reg_table_entry[k].mc_data[j] =
2633 (temp_reg & 0xffff0000) |
2634 (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
2635 }
2636 j++;
2637 PP_ASSERT_WITH_CODE((j <= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE),
2638 "Invalid VramInfo table.", return -EINVAL);
2639 break;
2640
2641 default:
2642 break;
2643 }
2644
2645 }
2646
2647 table->last = j;
2648
2649 return 0;
2650 }
2651
2652 static int ci_set_valid_flag(struct ci_mc_reg_table *table)
2653 {
2654 uint8_t i, j;
2655
2656 for (i = 0; i < table->last; i++) {
2657 for (j = 1; j < table->num_entries; j++) {
2658 if (table->mc_reg_table_entry[j-1].mc_data[i] !=
2659 table->mc_reg_table_entry[j].mc_data[i]) {
2660 table->validflag |= (1 << i);
2661 break;
2662 }
2663 }
2664 }
2665
2666 return 0;
2667 }
2668
2669 static int ci_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
2670 {
2671 int result;
2672 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
2673 pp_atomctrl_mc_reg_table *table;
2674 struct ci_mc_reg_table *ni_table = &smu_data->mc_reg_table;
2675 uint8_t module_index = ci_get_memory_modile_index(hwmgr);
2676
2677 table = kzalloc(sizeof(pp_atomctrl_mc_reg_table), GFP_KERNEL);
2678
2679 if (NULL == table)
2680 return -ENOMEM;
2681
2682 /* Program additional LP registers that are no longer programmed by VBIOS */
2683 cgs_write_register(hwmgr->device, mmMC_SEQ_RAS_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RAS_TIMING));
2684 cgs_write_register(hwmgr->device, mmMC_SEQ_CAS_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_CAS_TIMING));
2685 cgs_write_register(hwmgr->device, mmMC_SEQ_DLL_STBY_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_DLL_STBY));
2686 cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0));
2687 cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1));
2688 cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL));
2689 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD));
2690 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL));
2691 cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING));
2692 cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2));
2693 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_EMRS_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_EMRS));
2694 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS));
2695 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS1_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1));
2696 cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0));
2697 cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1));
2698 cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0));
2699 cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1));
2700 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_TIMING));
2701 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS2_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS2));
2702 cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_2_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_2));
2703
2704 memset(table, 0x00, sizeof(pp_atomctrl_mc_reg_table));
2705
2706 result = atomctrl_initialize_mc_reg_table(hwmgr, module_index, table);
2707
2708 if (0 == result)
2709 result = ci_copy_vbios_smc_reg_table(table, ni_table);
2710
2711 if (0 == result) {
2712 ci_set_s0_mc_reg_index(ni_table);
2713 result = ci_set_mc_special_registers(hwmgr, ni_table);
2714 }
2715
2716 if (0 == result)
2717 ci_set_valid_flag(ni_table);
2718
2719 kfree(table);
2720
2721 return result;
2722 }
2723
2724 static bool ci_is_dpm_running(struct pp_hwmgr *hwmgr)
2725 {
2726 return ci_is_smc_ram_running(hwmgr);
2727 }
2728
2729 static int ci_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr,
2730 struct amd_pp_profile *request)
2731 {
2732 struct ci_smumgr *smu_data = (struct ci_smumgr *)
2733 (hwmgr->smu_backend);
2734 struct SMU7_Discrete_GraphicsLevel *levels =
2735 smu_data->smc_state_table.GraphicsLevel;
2736 uint32_t array = smu_data->dpm_table_start +
2737 offsetof(SMU7_Discrete_DpmTable, GraphicsLevel);
2738 uint32_t array_size = sizeof(struct SMU7_Discrete_GraphicsLevel) *
2739 SMU7_MAX_LEVELS_GRAPHICS;
2740 uint32_t i;
2741
2742 for (i = 0; i < smu_data->smc_state_table.GraphicsDpmLevelCount; i++) {
2743 levels[i].ActivityLevel =
2744 cpu_to_be16(request->activity_threshold);
2745 levels[i].EnabledForActivity = 1;
2746 levels[i].UpH = request->up_hyst;
2747 levels[i].DownH = request->down_hyst;
2748 }
2749
2750 return ci_copy_bytes_to_smc(hwmgr, array, (uint8_t *)levels,
2751 array_size, SMC_RAM_END);
2752 }
2753
2754
2755 static int ci_smu_init(struct pp_hwmgr *hwmgr)
2756 {
2757 int i;
2758 struct ci_smumgr *ci_priv = NULL;
2759
2760 ci_priv = kzalloc(sizeof(struct ci_smumgr), GFP_KERNEL);
2761
2762 if (ci_priv == NULL)
2763 return -ENOMEM;
2764
2765 for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++)
2766 ci_priv->activity_target[i] = 30;
2767
2768 hwmgr->smu_backend = ci_priv;
2769
2770 return 0;
2771 }
2772
2773 static int ci_smu_fini(struct pp_hwmgr *hwmgr)
2774 {
2775 kfree(hwmgr->smu_backend);
2776 hwmgr->smu_backend = NULL;
2777 cgs_rel_firmware(hwmgr->device, CGS_UCODE_ID_SMU);
2778 return 0;
2779 }
2780
2781 static int ci_start_smu(struct pp_hwmgr *hwmgr)
2782 {
2783 return 0;
2784 }
2785
2786 const struct pp_smumgr_func ci_smu_funcs = {
2787 .smu_init = ci_smu_init,
2788 .smu_fini = ci_smu_fini,
2789 .start_smu = ci_start_smu,
2790 .check_fw_load_finish = NULL,
2791 .request_smu_load_fw = NULL,
2792 .request_smu_load_specific_fw = NULL,
2793 .send_msg_to_smc = ci_send_msg_to_smc,
2794 .send_msg_to_smc_with_parameter = ci_send_msg_to_smc_with_parameter,
2795 .download_pptable_settings = NULL,
2796 .upload_pptable_settings = NULL,
2797 .get_offsetof = ci_get_offsetof,
2798 .process_firmware_header = ci_process_firmware_header,
2799 .init_smc_table = ci_init_smc_table,
2800 .update_sclk_threshold = ci_update_sclk_threshold,
2801 .thermal_setup_fan_table = ci_thermal_setup_fan_table,
2802 .populate_all_graphic_levels = ci_populate_all_graphic_levels,
2803 .populate_all_memory_levels = ci_populate_all_memory_levels,
2804 .get_mac_definition = ci_get_mac_definition,
2805 .initialize_mc_reg_table = ci_initialize_mc_reg_table,
2806 .is_dpm_running = ci_is_dpm_running,
2807 .populate_requested_graphic_levels = ci_populate_requested_graphic_levels,
2808 };