]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c
drm/amdkfd: Improve multiple SDMA queues support per process
[mirror_ubuntu-bionic-kernel.git] / drivers / gpu / drm / amd / powerplay / smumgr / iceland_smc.c
CommitLineData
18aafc59
RZ
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
8 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
9 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
10 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
11 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
12 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
13 * OTHER DEALINGS IN THE SOFTWARE.
14 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
15 * and/or sell copies of the Software, and to permit persons to whom the
16 * Software is furnished to do so, subject to the following conditions:
17 *
18 * The above copyright notice and this permission notice shall be included in
19 * all copies or substantial portions of the Software.
20 *
21 *
22 */
23
7bd55429 24#include "pp_debug.h"
18aafc59
RZ
25#include "iceland_smc.h"
26#include "smu7_dyn_defaults.h"
27
28#include "smu7_hwmgr.h"
29#include "hardwaremanager.h"
30#include "ppatomctrl.h"
18aafc59
RZ
31#include "cgs_common.h"
32#include "atombios.h"
33#include "pppcielanes.h"
34#include "pp_endian.h"
35#include "smu7_ppsmc.h"
36
37#include "smu71_discrete.h"
38
39#include "smu/smu_7_1_1_d.h"
40#include "smu/smu_7_1_1_sh_mask.h"
41
42#include "gmc/gmc_8_1_d.h"
43#include "gmc/gmc_8_1_sh_mask.h"
44
45#include "bif/bif_5_0_d.h"
46#include "bif/bif_5_0_sh_mask.h"
47
48#include "dce/dce_10_0_d.h"
49#include "dce/dce_10_0_sh_mask.h"
50#include "processpptables.h"
51
52#include "iceland_smumgr.h"
53
54#define VOLTAGE_SCALE 4
55#define POWERTUNE_DEFAULT_SET_MAX 1
56#define VOLTAGE_VID_OFFSET_SCALE1 625
57#define VOLTAGE_VID_OFFSET_SCALE2 100
58#define MC_CG_ARB_FREQ_F1 0x0b
59#define VDDC_VDDCI_DELTA 200
60
61#define DEVICE_ID_VI_ICELAND_M_6900 0x6900
62#define DEVICE_ID_VI_ICELAND_M_6901 0x6901
63#define DEVICE_ID_VI_ICELAND_M_6902 0x6902
64#define DEVICE_ID_VI_ICELAND_M_6903 0x6903
65
a1c1a1de 66static const struct iceland_pt_defaults defaults_iceland = {
18aafc59
RZ
67 /*
68 * sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc,
69 * TDC_MAWt, TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac, BAPM_TEMP_GRADIENT
70 */
71 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
72 { 0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8, 0xC9, 0xC9, 0x2F, 0x4D, 0x61 },
73 { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 }
74};
75
76/* 35W - XT, XTL */
a1c1a1de 77static const struct iceland_pt_defaults defaults_icelandxt = {
18aafc59
RZ
78 /*
79 * sviLoadLIneEn, SviLoadLineVddC,
80 * TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt,
81 * TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac,
82 * BAPM_TEMP_GRADIENT
83 */
84 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x0,
85 { 0xA7, 0x0, 0x0, 0xB5, 0x0, 0x0, 0x9F, 0x0, 0x0, 0xD6, 0x0, 0x0, 0xD7, 0x0, 0x0},
86 { 0x1EA, 0x0, 0x0, 0x224, 0x0, 0x0, 0x25E, 0x0, 0x0, 0x28E, 0x0, 0x0, 0x2AB, 0x0, 0x0}
87};
88
89/* 25W - PRO, LE */
a1c1a1de 90static const struct iceland_pt_defaults defaults_icelandpro = {
18aafc59
RZ
91 /*
92 * sviLoadLIneEn, SviLoadLineVddC,
93 * TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt,
94 * TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac,
95 * BAPM_TEMP_GRADIENT
96 */
97 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x0,
98 { 0xB7, 0x0, 0x0, 0xC3, 0x0, 0x0, 0xB5, 0x0, 0x0, 0xEA, 0x0, 0x0, 0xE6, 0x0, 0x0},
99 { 0x1EA, 0x0, 0x0, 0x224, 0x0, 0x0, 0x25E, 0x0, 0x0, 0x28E, 0x0, 0x0, 0x2AB, 0x0, 0x0}
100};
101
102static void iceland_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
103{
b3b03052 104 struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
18aafc59
RZ
105 struct cgs_system_info sys_info = {0};
106 uint32_t dev_id;
107
108 sys_info.size = sizeof(struct cgs_system_info);
109 sys_info.info_id = CGS_SYSTEM_INFO_PCIE_DEV;
110 cgs_query_system_info(hwmgr->device, &sys_info);
111 dev_id = (uint32_t)sys_info.value;
112
113 switch (dev_id) {
114 case DEVICE_ID_VI_ICELAND_M_6900:
115 case DEVICE_ID_VI_ICELAND_M_6903:
116 smu_data->power_tune_defaults = &defaults_icelandxt;
117 break;
118
119 case DEVICE_ID_VI_ICELAND_M_6901:
120 case DEVICE_ID_VI_ICELAND_M_6902:
121 smu_data->power_tune_defaults = &defaults_icelandpro;
122 break;
123 default:
124 smu_data->power_tune_defaults = &defaults_iceland;
98a36749 125 pr_warn("Unknown V.I. Device ID.\n");
18aafc59
RZ
126 break;
127 }
128 return;
129}
130
131static int iceland_populate_svi_load_line(struct pp_hwmgr *hwmgr)
132{
b3b03052 133 struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
18aafc59
RZ
134 const struct iceland_pt_defaults *defaults = smu_data->power_tune_defaults;
135
136 smu_data->power_tune_table.SviLoadLineEn = defaults->svi_load_line_en;
137 smu_data->power_tune_table.SviLoadLineVddC = defaults->svi_load_line_vddc;
138 smu_data->power_tune_table.SviLoadLineTrimVddC = 3;
139 smu_data->power_tune_table.SviLoadLineOffsetVddC = 0;
140
141 return 0;
142}
143
144static int iceland_populate_tdc_limit(struct pp_hwmgr *hwmgr)
145{
146 uint16_t tdc_limit;
b3b03052 147 struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
18aafc59
RZ
148 const struct iceland_pt_defaults *defaults = smu_data->power_tune_defaults;
149
150 tdc_limit = (uint16_t)(hwmgr->dyn_state.cac_dtp_table->usTDC * 256);
151 smu_data->power_tune_table.TDC_VDDC_PkgLimit =
152 CONVERT_FROM_HOST_TO_SMC_US(tdc_limit);
153 smu_data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
154 defaults->tdc_vddc_throttle_release_limit_perc;
155 smu_data->power_tune_table.TDC_MAWt = defaults->tdc_mawt;
156
157 return 0;
158}
159
160static int iceland_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
161{
b3b03052 162 struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
18aafc59
RZ
163 const struct iceland_pt_defaults *defaults = smu_data->power_tune_defaults;
164 uint32_t temp;
165
d3f8c0ab 166 if (smu7_read_smc_sram_dword(hwmgr,
18aafc59
RZ
167 fuse_table_offset +
168 offsetof(SMU71_Discrete_PmFuses, TdcWaterfallCtl),
169 (uint32_t *)&temp, SMC_RAM_END))
170 PP_ASSERT_WITH_CODE(false,
171 "Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!",
172 return -EINVAL);
173 else
174 smu_data->power_tune_table.TdcWaterfallCtl = defaults->tdc_waterfall_ctl;
175
176 return 0;
177}
178
179static int iceland_populate_temperature_scaler(struct pp_hwmgr *hwmgr)
180{
181 return 0;
182}
183
184static int iceland_populate_gnb_lpml(struct pp_hwmgr *hwmgr)
185{
186 int i;
b3b03052 187 struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
18aafc59
RZ
188
189 /* Currently not used. Set all to zero. */
190 for (i = 0; i < 8; i++)
191 smu_data->power_tune_table.GnbLPML[i] = 0;
192
193 return 0;
194}
195
18aafc59
RZ
196static int iceland_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr)
197{
b3b03052 198 struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
18aafc59
RZ
199 uint16_t HiSidd = smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd;
200 uint16_t LoSidd = smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd;
201 struct phm_cac_tdp_table *cac_table = hwmgr->dyn_state.cac_dtp_table;
202
203 HiSidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256);
204 LoSidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256);
205
206 smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd =
207 CONVERT_FROM_HOST_TO_SMC_US(HiSidd);
208 smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd =
209 CONVERT_FROM_HOST_TO_SMC_US(LoSidd);
210
211 return 0;
212}
213
214static int iceland_populate_bapm_vddc_vid_sidd(struct pp_hwmgr *hwmgr)
215{
216 int i;
b3b03052 217 struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
18aafc59
RZ
218 uint8_t *hi_vid = smu_data->power_tune_table.BapmVddCVidHiSidd;
219 uint8_t *lo_vid = smu_data->power_tune_table.BapmVddCVidLoSidd;
220
221 PP_ASSERT_WITH_CODE(NULL != hwmgr->dyn_state.cac_leakage_table,
222 "The CAC Leakage table does not exist!", return -EINVAL);
223 PP_ASSERT_WITH_CODE(hwmgr->dyn_state.cac_leakage_table->count <= 8,
224 "There should never be more than 8 entries for BapmVddcVid!!!", return -EINVAL);
225 PP_ASSERT_WITH_CODE(hwmgr->dyn_state.cac_leakage_table->count == hwmgr->dyn_state.vddc_dependency_on_sclk->count,
226 "CACLeakageTable->count and VddcDependencyOnSCLk->count not equal", return -EINVAL);
227
228 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_EVV)) {
229 for (i = 0; (uint32_t) i < hwmgr->dyn_state.cac_leakage_table->count; i++) {
230 lo_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc1);
231 hi_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc2);
232 }
233 } else {
234 PP_ASSERT_WITH_CODE(false, "Iceland should always support EVV", return -EINVAL);
235 }
236
237 return 0;
238}
239
240static int iceland_populate_vddc_vid(struct pp_hwmgr *hwmgr)
241{
242 int i;
b3b03052 243 struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
18aafc59
RZ
244 uint8_t *vid = smu_data->power_tune_table.VddCVid;
245 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
246
247 PP_ASSERT_WITH_CODE(data->vddc_voltage_table.count <= 8,
248 "There should never be more than 8 entries for VddcVid!!!",
249 return -EINVAL);
250
251 for (i = 0; i < (int)data->vddc_voltage_table.count; i++) {
252 vid[i] = convert_to_vid(data->vddc_voltage_table.entries[i].value);
253 }
254
255 return 0;
256}
257
258
259
260static int iceland_populate_pm_fuses(struct pp_hwmgr *hwmgr)
261{
b3b03052 262 struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
18aafc59
RZ
263 uint32_t pm_fuse_table_offset;
264
265 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
266 PHM_PlatformCaps_PowerContainment)) {
d3f8c0ab 267 if (smu7_read_smc_sram_dword(hwmgr,
18aafc59
RZ
268 SMU71_FIRMWARE_HEADER_LOCATION +
269 offsetof(SMU71_Firmware_Header, PmFuseTable),
270 &pm_fuse_table_offset, SMC_RAM_END))
271 PP_ASSERT_WITH_CODE(false,
272 "Attempt to get pm_fuse_table_offset Failed!",
273 return -EINVAL);
274
275 /* DW0 - DW3 */
276 if (iceland_populate_bapm_vddc_vid_sidd(hwmgr))
277 PP_ASSERT_WITH_CODE(false,
278 "Attempt to populate bapm vddc vid Failed!",
279 return -EINVAL);
280
281 /* DW4 - DW5 */
282 if (iceland_populate_vddc_vid(hwmgr))
283 PP_ASSERT_WITH_CODE(false,
284 "Attempt to populate vddc vid Failed!",
285 return -EINVAL);
286
287 /* DW6 */
288 if (iceland_populate_svi_load_line(hwmgr))
289 PP_ASSERT_WITH_CODE(false,
290 "Attempt to populate SviLoadLine Failed!",
291 return -EINVAL);
292 /* DW7 */
293 if (iceland_populate_tdc_limit(hwmgr))
294 PP_ASSERT_WITH_CODE(false,
295 "Attempt to populate TDCLimit Failed!", return -EINVAL);
296 /* DW8 */
297 if (iceland_populate_dw8(hwmgr, pm_fuse_table_offset))
298 PP_ASSERT_WITH_CODE(false,
299 "Attempt to populate TdcWaterfallCtl, "
300 "LPMLTemperature Min and Max Failed!",
301 return -EINVAL);
302
303 /* DW9-DW12 */
304 if (0 != iceland_populate_temperature_scaler(hwmgr))
305 PP_ASSERT_WITH_CODE(false,
306 "Attempt to populate LPMLTemperatureScaler Failed!",
307 return -EINVAL);
308
309 /* DW13-DW16 */
310 if (iceland_populate_gnb_lpml(hwmgr))
311 PP_ASSERT_WITH_CODE(false,
312 "Attempt to populate GnbLPML Failed!",
313 return -EINVAL);
314
18aafc59
RZ
315 /* DW18 */
316 if (iceland_populate_bapm_vddc_base_leakage_sidd(hwmgr))
317 PP_ASSERT_WITH_CODE(false,
318 "Attempt to populate BapmVddCBaseLeakage Hi and Lo Sidd Failed!",
319 return -EINVAL);
320
d3f8c0ab 321 if (smu7_copy_bytes_to_smc(hwmgr, pm_fuse_table_offset,
18aafc59
RZ
322 (uint8_t *)&smu_data->power_tune_table,
323 sizeof(struct SMU71_Discrete_PmFuses), SMC_RAM_END))
324 PP_ASSERT_WITH_CODE(false,
325 "Attempt to download PmFuseTable Failed!",
326 return -EINVAL);
327 }
328 return 0;
329}
330
e71b7ae6 331static int iceland_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
18aafc59
RZ
332 struct phm_clock_voltage_dependency_table *allowed_clock_voltage_table,
333 uint32_t clock, uint32_t *vol)
334{
335 uint32_t i = 0;
336
337 /* clock - voltage dependency table is empty table */
338 if (allowed_clock_voltage_table->count == 0)
339 return -EINVAL;
340
341 for (i = 0; i < allowed_clock_voltage_table->count; i++) {
342 /* find first sclk bigger than request */
343 if (allowed_clock_voltage_table->entries[i].clk >= clock) {
344 *vol = allowed_clock_voltage_table->entries[i].v;
345 return 0;
346 }
347 }
348
349 /* sclk is bigger than max sclk in the dependence table */
350 *vol = allowed_clock_voltage_table->entries[i - 1].v;
351
352 return 0;
353}
354
355static int iceland_get_std_voltage_value_sidd(struct pp_hwmgr *hwmgr,
356 pp_atomctrl_voltage_table_entry *tab, uint16_t *hi,
357 uint16_t *lo)
358{
359 uint16_t v_index;
360 bool vol_found = false;
361 *hi = tab->value * VOLTAGE_SCALE;
362 *lo = tab->value * VOLTAGE_SCALE;
363
364 /* SCLK/VDDC Dependency Table has to exist. */
365 PP_ASSERT_WITH_CODE(NULL != hwmgr->dyn_state.vddc_dependency_on_sclk,
366 "The SCLK/VDDC Dependency Table does not exist.\n",
367 return -EINVAL);
368
369 if (NULL == hwmgr->dyn_state.cac_leakage_table) {
98a36749 370 pr_warn("CAC Leakage Table does not exist, using vddc.\n");
18aafc59
RZ
371 return 0;
372 }
373
374 /*
375 * Since voltage in the sclk/vddc dependency table is not
376 * necessarily in ascending order because of ELB voltage
377 * patching, loop through entire list to find exact voltage.
378 */
379 for (v_index = 0; (uint32_t)v_index < hwmgr->dyn_state.vddc_dependency_on_sclk->count; v_index++) {
380 if (tab->value == hwmgr->dyn_state.vddc_dependency_on_sclk->entries[v_index].v) {
381 vol_found = true;
382 if ((uint32_t)v_index < hwmgr->dyn_state.cac_leakage_table->count) {
383 *lo = hwmgr->dyn_state.cac_leakage_table->entries[v_index].Vddc * VOLTAGE_SCALE;
384 *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[v_index].Leakage * VOLTAGE_SCALE);
385 } else {
98a36749 386 pr_warn("Index from SCLK/VDDC Dependency Table exceeds the CAC Leakage Table index, using maximum index from CAC table.\n");
18aafc59
RZ
387 *lo = hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Vddc * VOLTAGE_SCALE;
388 *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Leakage * VOLTAGE_SCALE);
389 }
390 break;
391 }
392 }
393
394 /*
395 * If voltage is not found in the first pass, loop again to
396 * find the best match, equal or higher value.
397 */
398 if (!vol_found) {
399 for (v_index = 0; (uint32_t)v_index < hwmgr->dyn_state.vddc_dependency_on_sclk->count; v_index++) {
400 if (tab->value <= hwmgr->dyn_state.vddc_dependency_on_sclk->entries[v_index].v) {
401 vol_found = true;
402 if ((uint32_t)v_index < hwmgr->dyn_state.cac_leakage_table->count) {
403 *lo = hwmgr->dyn_state.cac_leakage_table->entries[v_index].Vddc * VOLTAGE_SCALE;
404 *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[v_index].Leakage) * VOLTAGE_SCALE;
405 } else {
98a36749 406 pr_warn("Index from SCLK/VDDC Dependency Table exceeds the CAC Leakage Table index in second look up, using maximum index from CAC table.");
18aafc59
RZ
407 *lo = hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Vddc * VOLTAGE_SCALE;
408 *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Leakage * VOLTAGE_SCALE);
409 }
410 break;
411 }
412 }
413
414 if (!vol_found)
98a36749 415 pr_warn("Unable to get std_vddc from SCLK/VDDC Dependency Table, using vddc.\n");
18aafc59
RZ
416 }
417
418 return 0;
419}
420
421static int iceland_populate_smc_voltage_table(struct pp_hwmgr *hwmgr,
422 pp_atomctrl_voltage_table_entry *tab,
423 SMU71_Discrete_VoltageLevel *smc_voltage_tab)
424{
425 int result;
426
427 result = iceland_get_std_voltage_value_sidd(hwmgr, tab,
428 &smc_voltage_tab->StdVoltageHiSidd,
429 &smc_voltage_tab->StdVoltageLoSidd);
430 if (0 != result) {
431 smc_voltage_tab->StdVoltageHiSidd = tab->value * VOLTAGE_SCALE;
432 smc_voltage_tab->StdVoltageLoSidd = tab->value * VOLTAGE_SCALE;
433 }
434
435 smc_voltage_tab->Voltage = PP_HOST_TO_SMC_US(tab->value * VOLTAGE_SCALE);
436 CONVERT_FROM_HOST_TO_SMC_US(smc_voltage_tab->StdVoltageHiSidd);
437 CONVERT_FROM_HOST_TO_SMC_US(smc_voltage_tab->StdVoltageHiSidd);
438
439 return 0;
440}
441
442static int iceland_populate_smc_vddc_table(struct pp_hwmgr *hwmgr,
443 SMU71_Discrete_DpmTable *table)
444{
445 unsigned int count;
446 int result;
447 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
448
449 table->VddcLevelCount = data->vddc_voltage_table.count;
450 for (count = 0; count < table->VddcLevelCount; count++) {
451 result = iceland_populate_smc_voltage_table(hwmgr,
452 &(data->vddc_voltage_table.entries[count]),
453 &(table->VddcLevel[count]));
454 PP_ASSERT_WITH_CODE(0 == result, "do not populate SMC VDDC voltage table", return -EINVAL);
455
456 /* GPIO voltage control */
457 if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->voltage_control)
458 table->VddcLevel[count].Smio |= data->vddc_voltage_table.entries[count].smio_low;
459 else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control)
460 table->VddcLevel[count].Smio = 0;
461 }
462
463 CONVERT_FROM_HOST_TO_SMC_UL(table->VddcLevelCount);
464
465 return 0;
466}
467
468static int iceland_populate_smc_vdd_ci_table(struct pp_hwmgr *hwmgr,
469 SMU71_Discrete_DpmTable *table)
470{
471 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
472 uint32_t count;
473 int result;
474
475 table->VddciLevelCount = data->vddci_voltage_table.count;
476
477 for (count = 0; count < table->VddciLevelCount; count++) {
478 result = iceland_populate_smc_voltage_table(hwmgr,
479 &(data->vddci_voltage_table.entries[count]),
480 &(table->VddciLevel[count]));
481 PP_ASSERT_WITH_CODE(result == 0, "do not populate SMC VDDCI voltage table", return -EINVAL);
482 if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
483 table->VddciLevel[count].Smio |= data->vddci_voltage_table.entries[count].smio_low;
484 else
485 table->VddciLevel[count].Smio |= 0;
486 }
487
488 CONVERT_FROM_HOST_TO_SMC_UL(table->VddciLevelCount);
489
490 return 0;
491}
492
493static int iceland_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
494 SMU71_Discrete_DpmTable *table)
495{
496 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
497 uint32_t count;
498 int result;
499
500 table->MvddLevelCount = data->mvdd_voltage_table.count;
501
502 for (count = 0; count < table->VddciLevelCount; count++) {
503 result = iceland_populate_smc_voltage_table(hwmgr,
504 &(data->mvdd_voltage_table.entries[count]),
505 &table->MvddLevel[count]);
506 PP_ASSERT_WITH_CODE(result == 0, "do not populate SMC mvdd voltage table", return -EINVAL);
507 if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control)
508 table->MvddLevel[count].Smio |= data->mvdd_voltage_table.entries[count].smio_low;
509 else
510 table->MvddLevel[count].Smio |= 0;
511 }
512
513 CONVERT_FROM_HOST_TO_SMC_UL(table->MvddLevelCount);
514
515 return 0;
516}
517
518
519static int iceland_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr,
520 SMU71_Discrete_DpmTable *table)
521{
522 int result;
523
524 result = iceland_populate_smc_vddc_table(hwmgr, table);
525 PP_ASSERT_WITH_CODE(0 == result,
526 "can not populate VDDC voltage table to SMC", return -EINVAL);
527
528 result = iceland_populate_smc_vdd_ci_table(hwmgr, table);
529 PP_ASSERT_WITH_CODE(0 == result,
530 "can not populate VDDCI voltage table to SMC", return -EINVAL);
531
532 result = iceland_populate_smc_mvdd_table(hwmgr, table);
533 PP_ASSERT_WITH_CODE(0 == result,
534 "can not populate MVDD voltage table to SMC", return -EINVAL);
535
536 return 0;
537}
538
539static int iceland_populate_ulv_level(struct pp_hwmgr *hwmgr,
540 struct SMU71_Discrete_Ulv *state)
541{
542 uint32_t voltage_response_time, ulv_voltage;
543 int result;
544 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
545
546 state->CcPwrDynRm = 0;
547 state->CcPwrDynRm1 = 0;
548
549 result = pp_tables_get_response_times(hwmgr, &voltage_response_time, &ulv_voltage);
550 PP_ASSERT_WITH_CODE((0 == result), "can not get ULV voltage value", return result;);
551
552 if (ulv_voltage == 0) {
553 data->ulv_supported = false;
554 return 0;
555 }
556
557 if (data->voltage_control != SMU7_VOLTAGE_CONTROL_BY_SVID2) {
558 /* use minimum voltage if ulv voltage in pptable is bigger than minimum voltage */
559 if (ulv_voltage > hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v)
560 state->VddcOffset = 0;
561 else
562 /* used in SMIO Mode. not implemented for now. this is backup only for CI. */
563 state->VddcOffset = (uint16_t)(hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v - ulv_voltage);
564 } else {
565 /* use minimum voltage if ulv voltage in pptable is bigger than minimum voltage */
566 if (ulv_voltage > hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v)
567 state->VddcOffsetVid = 0;
568 else /* used in SVI2 Mode */
569 state->VddcOffsetVid = (uint8_t)(
570 (hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v - ulv_voltage)
571 * VOLTAGE_VID_OFFSET_SCALE2
572 / VOLTAGE_VID_OFFSET_SCALE1);
573 }
574 state->VddcPhase = 1;
575
576 CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm);
577 CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm1);
578 CONVERT_FROM_HOST_TO_SMC_US(state->VddcOffset);
579
580 return 0;
581}
582
583static int iceland_populate_ulv_state(struct pp_hwmgr *hwmgr,
584 SMU71_Discrete_Ulv *ulv_level)
585{
586 return iceland_populate_ulv_level(hwmgr, ulv_level);
587}
588
589static int iceland_populate_smc_link_level(struct pp_hwmgr *hwmgr, SMU71_Discrete_DpmTable *table)
590{
591 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
592 struct smu7_dpm_table *dpm_table = &data->dpm_table;
b3b03052 593 struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
18aafc59
RZ
594 uint32_t i;
595
596 /* Index (dpm_table->pcie_speed_table.count) is reserved for PCIE boot level. */
597 for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) {
598 table->LinkLevel[i].PcieGenSpeed =
599 (uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value;
600 table->LinkLevel[i].PcieLaneCount =
601 (uint8_t)encode_pcie_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1);
602 table->LinkLevel[i].EnabledForActivity =
603 1;
604 table->LinkLevel[i].SPC =
605 (uint8_t)(data->pcie_spc_cap & 0xff);
606 table->LinkLevel[i].DownThreshold =
607 PP_HOST_TO_SMC_UL(5);
608 table->LinkLevel[i].UpThreshold =
609 PP_HOST_TO_SMC_UL(30);
610 }
611
612 smu_data->smc_state_table.LinkLevelCount =
613 (uint8_t)dpm_table->pcie_speed_table.count;
614 data->dpm_level_enable_mask.pcie_dpm_enable_mask =
615 phm_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
616
617 return 0;
618}
619
620/**
621 * Calculates the SCLK dividers using the provided engine clock
622 *
623 * @param hwmgr the address of the hardware manager
624 * @param engine_clock the engine clock to use to populate the structure
625 * @param sclk the SMC SCLK structure to be populated
626 */
627static int iceland_calculate_sclk_params(struct pp_hwmgr *hwmgr,
628 uint32_t engine_clock, SMU71_Discrete_GraphicsLevel *sclk)
629{
630 const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
631 pp_atomctrl_clock_dividers_vi dividers;
632 uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL;
633 uint32_t spll_func_cntl_3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
634 uint32_t spll_func_cntl_4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
635 uint32_t cg_spll_spread_spectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
636 uint32_t cg_spll_spread_spectrum_2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
637 uint32_t reference_clock;
638 uint32_t reference_divider;
639 uint32_t fbdiv;
640 int result;
641
642 /* get the engine clock dividers for this clock value*/
643 result = atomctrl_get_engine_pll_dividers_vi(hwmgr, engine_clock, &dividers);
644
645 PP_ASSERT_WITH_CODE(result == 0,
646 "Error retrieving Engine Clock dividers from VBIOS.", return result);
647
648 /* To get FBDIV we need to multiply this by 16384 and divide it by Fref.*/
649 reference_clock = atomctrl_get_reference_clock(hwmgr);
650
651 reference_divider = 1 + dividers.uc_pll_ref_div;
652
653 /* low 14 bits is fraction and high 12 bits is divider*/
654 fbdiv = dividers.ul_fb_div.ul_fb_divider & 0x3FFFFFF;
655
656 /* SPLL_FUNC_CNTL setup*/
657 spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
658 CG_SPLL_FUNC_CNTL, SPLL_REF_DIV, dividers.uc_pll_ref_div);
659 spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
660 CG_SPLL_FUNC_CNTL, SPLL_PDIV_A, dividers.uc_pll_post_div);
661
662 /* SPLL_FUNC_CNTL_3 setup*/
663 spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3,
664 CG_SPLL_FUNC_CNTL_3, SPLL_FB_DIV, fbdiv);
665
666 /* set to use fractional accumulation*/
667 spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3,
668 CG_SPLL_FUNC_CNTL_3, SPLL_DITHEN, 1);
669
670 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
671 PHM_PlatformCaps_EngineSpreadSpectrumSupport)) {
672 pp_atomctrl_internal_ss_info ss_info;
673
674 uint32_t vcoFreq = engine_clock * dividers.uc_pll_post_div;
675 if (0 == atomctrl_get_engine_clock_spread_spectrum(hwmgr, vcoFreq, &ss_info)) {
676 /*
677 * ss_info.speed_spectrum_percentage -- in unit of 0.01%
678 * ss_info.speed_spectrum_rate -- in unit of khz
679 */
680 /* clks = reference_clock * 10 / (REFDIV + 1) / speed_spectrum_rate / 2 */
681 uint32_t clkS = reference_clock * 5 / (reference_divider * ss_info.speed_spectrum_rate);
682
683 /* clkv = 2 * D * fbdiv / NS */
684 uint32_t clkV = 4 * ss_info.speed_spectrum_percentage * fbdiv / (clkS * 10000);
685
686 cg_spll_spread_spectrum =
687 PHM_SET_FIELD(cg_spll_spread_spectrum, CG_SPLL_SPREAD_SPECTRUM, CLKS, clkS);
688 cg_spll_spread_spectrum =
689 PHM_SET_FIELD(cg_spll_spread_spectrum, CG_SPLL_SPREAD_SPECTRUM, SSEN, 1);
690 cg_spll_spread_spectrum_2 =
691 PHM_SET_FIELD(cg_spll_spread_spectrum_2, CG_SPLL_SPREAD_SPECTRUM_2, CLKV, clkV);
692 }
693 }
694
695 sclk->SclkFrequency = engine_clock;
696 sclk->CgSpllFuncCntl3 = spll_func_cntl_3;
697 sclk->CgSpllFuncCntl4 = spll_func_cntl_4;
698 sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum;
699 sclk->SpllSpreadSpectrum2 = cg_spll_spread_spectrum_2;
700 sclk->SclkDid = (uint8_t)dividers.pll_post_divider;
701
702 return 0;
703}
704
705static int iceland_populate_phase_value_based_on_sclk(struct pp_hwmgr *hwmgr,
706 const struct phm_phase_shedding_limits_table *pl,
707 uint32_t sclk, uint32_t *p_shed)
708{
709 unsigned int i;
710
711 /* use the minimum phase shedding */
712 *p_shed = 1;
713
714 for (i = 0; i < pl->count; i++) {
715 if (sclk < pl->entries[i].Sclk) {
716 *p_shed = i;
717 break;
718 }
719 }
720 return 0;
721}
722
723/**
724 * Populates single SMC SCLK structure using the provided engine clock
725 *
726 * @param hwmgr the address of the hardware manager
727 * @param engine_clock the engine clock to use to populate the structure
728 * @param sclk the SMC SCLK structure to be populated
729 */
730static int iceland_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
731 uint32_t engine_clock,
732 uint16_t sclk_activity_level_threshold,
733 SMU71_Discrete_GraphicsLevel *graphic_level)
734{
735 int result;
736 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
737
738 result = iceland_calculate_sclk_params(hwmgr, engine_clock, graphic_level);
739
740 /* populate graphics levels*/
e71b7ae6 741 result = iceland_get_dependency_volt_by_clk(hwmgr,
18aafc59
RZ
742 hwmgr->dyn_state.vddc_dependency_on_sclk, engine_clock,
743 &graphic_level->MinVddc);
744 PP_ASSERT_WITH_CODE((0 == result),
745 "can not find VDDC voltage value for VDDC \
746 engine clock dependency table", return result);
747
748 /* SCLK frequency in units of 10KHz*/
749 graphic_level->SclkFrequency = engine_clock;
750 graphic_level->MinVddcPhases = 1;
751
752 if (data->vddc_phase_shed_control)
753 iceland_populate_phase_value_based_on_sclk(hwmgr,
754 hwmgr->dyn_state.vddc_phase_shed_limits_table,
755 engine_clock,
756 &graphic_level->MinVddcPhases);
757
758 /* Indicates maximum activity level for this performance level. 50% for now*/
759 graphic_level->ActivityLevel = sclk_activity_level_threshold;
760
761 graphic_level->CcPwrDynRm = 0;
762 graphic_level->CcPwrDynRm1 = 0;
763 /* this level can be used if activity is high enough.*/
764 graphic_level->EnabledForActivity = 0;
765 /* this level can be used for throttling.*/
766 graphic_level->EnabledForThrottle = 1;
767 graphic_level->UpHyst = 0;
768 graphic_level->DownHyst = 100;
769 graphic_level->VoltageDownHyst = 0;
770 graphic_level->PowerThrottle = 0;
771
772 data->display_timing.min_clock_in_sr =
773 hwmgr->display_config.min_core_set_clock_in_sr;
774
775 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
776 PHM_PlatformCaps_SclkDeepSleep))
777 graphic_level->DeepSleepDivId =
778 smu7_get_sleep_divider_id_from_clock(engine_clock,
779 data->display_timing.min_clock_in_sr);
780
781 /* Default to slow, highest DPM level will be set to PPSMC_DISPLAY_WATERMARK_LOW later.*/
782 graphic_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
783
784 if (0 == result) {
785 graphic_level->MinVddc = PP_HOST_TO_SMC_UL(graphic_level->MinVddc * VOLTAGE_SCALE);
786 CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->MinVddcPhases);
787 CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SclkFrequency);
788 CONVERT_FROM_HOST_TO_SMC_US(graphic_level->ActivityLevel);
789 CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CgSpllFuncCntl3);
790 CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CgSpllFuncCntl4);
791 CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SpllSpreadSpectrum);
792 CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SpllSpreadSpectrum2);
793 CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CcPwrDynRm);
794 CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CcPwrDynRm1);
795 }
796
797 return result;
798}
799
800/**
801 * Populates all SMC SCLK levels' structure based on the trimmed allowed dpm engine clock states
802 *
803 * @param hwmgr the address of the hardware manager
804 */
805int iceland_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
806{
807 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
b3b03052 808 struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
18aafc59
RZ
809 struct smu7_dpm_table *dpm_table = &data->dpm_table;
810 uint32_t level_array_adress = smu_data->smu7_data.dpm_table_start +
811 offsetof(SMU71_Discrete_DpmTable, GraphicsLevel);
812
813 uint32_t level_array_size = sizeof(SMU71_Discrete_GraphicsLevel) *
814 SMU71_MAX_LEVELS_GRAPHICS;
815
816 SMU71_Discrete_GraphicsLevel *levels = smu_data->smc_state_table.GraphicsLevel;
817
818 uint32_t i;
819 uint8_t highest_pcie_level_enabled = 0;
820 uint8_t lowest_pcie_level_enabled = 0, mid_pcie_level_enabled = 0;
821 uint8_t count = 0;
822 int result = 0;
823
824 memset(levels, 0x00, level_array_size);
825
826 for (i = 0; i < dpm_table->sclk_table.count; i++) {
827 result = iceland_populate_single_graphic_level(hwmgr,
828 dpm_table->sclk_table.dpm_levels[i].value,
829 (uint16_t)smu_data->activity_target[i],
830 &(smu_data->smc_state_table.GraphicsLevel[i]));
831 if (result != 0)
832 return result;
833
834 /* Making sure only DPM level 0-1 have Deep Sleep Div ID populated. */
835 if (i > 1)
836 smu_data->smc_state_table.GraphicsLevel[i].DeepSleepDivId = 0;
837 }
838
839 /* Only enable level 0 for now. */
840 smu_data->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1;
841
842 /* set highest level watermark to high */
843 if (dpm_table->sclk_table.count > 1)
844 smu_data->smc_state_table.GraphicsLevel[dpm_table->sclk_table.count-1].DisplayWatermark =
845 PPSMC_DISPLAY_WATERMARK_HIGH;
846
847 smu_data->smc_state_table.GraphicsDpmLevelCount =
848 (uint8_t)dpm_table->sclk_table.count;
849 data->dpm_level_enable_mask.sclk_dpm_enable_mask =
850 phm_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
851
852 while ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
853 (1 << (highest_pcie_level_enabled + 1))) != 0) {
854 highest_pcie_level_enabled++;
855 }
856
857 while ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
858 (1 << lowest_pcie_level_enabled)) == 0) {
859 lowest_pcie_level_enabled++;
860 }
861
862 while ((count < highest_pcie_level_enabled) &&
863 ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
864 (1 << (lowest_pcie_level_enabled + 1 + count))) == 0)) {
865 count++;
866 }
867
868 mid_pcie_level_enabled = (lowest_pcie_level_enabled+1+count) < highest_pcie_level_enabled ?
869 (lowest_pcie_level_enabled+1+count) : highest_pcie_level_enabled;
870
871
872 /* set pcieDpmLevel to highest_pcie_level_enabled*/
873 for (i = 2; i < dpm_table->sclk_table.count; i++) {
874 smu_data->smc_state_table.GraphicsLevel[i].pcieDpmLevel = highest_pcie_level_enabled;
875 }
876
877 /* set pcieDpmLevel to lowest_pcie_level_enabled*/
878 smu_data->smc_state_table.GraphicsLevel[0].pcieDpmLevel = lowest_pcie_level_enabled;
879
880 /* set pcieDpmLevel to mid_pcie_level_enabled*/
881 smu_data->smc_state_table.GraphicsLevel[1].pcieDpmLevel = mid_pcie_level_enabled;
882
883 /* level count will send to smc once at init smc table and never change*/
d3f8c0ab 884 result = smu7_copy_bytes_to_smc(hwmgr, level_array_adress,
18aafc59
RZ
885 (uint8_t *)levels, (uint32_t)level_array_size,
886 SMC_RAM_END);
887
888 return result;
889}
890
891/**
892 * Populates the SMC MCLK structure using the provided memory clock
893 *
894 * @param hwmgr the address of the hardware manager
895 * @param memory_clock the memory clock to use to populate the structure
896 * @param sclk the SMC SCLK structure to be populated
897 */
898static int iceland_calculate_mclk_params(
899 struct pp_hwmgr *hwmgr,
900 uint32_t memory_clock,
901 SMU71_Discrete_MemoryLevel *mclk,
902 bool strobe_mode,
903 bool dllStateOn
904 )
905{
906 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
907
908 uint32_t dll_cntl = data->clock_registers.vDLL_CNTL;
909 uint32_t mclk_pwrmgt_cntl = data->clock_registers.vMCLK_PWRMGT_CNTL;
910 uint32_t mpll_ad_func_cntl = data->clock_registers.vMPLL_AD_FUNC_CNTL;
911 uint32_t mpll_dq_func_cntl = data->clock_registers.vMPLL_DQ_FUNC_CNTL;
912 uint32_t mpll_func_cntl = data->clock_registers.vMPLL_FUNC_CNTL;
913 uint32_t mpll_func_cntl_1 = data->clock_registers.vMPLL_FUNC_CNTL_1;
914 uint32_t mpll_func_cntl_2 = data->clock_registers.vMPLL_FUNC_CNTL_2;
915 uint32_t mpll_ss1 = data->clock_registers.vMPLL_SS1;
916 uint32_t mpll_ss2 = data->clock_registers.vMPLL_SS2;
917
918 pp_atomctrl_memory_clock_param mpll_param;
919 int result;
920
921 result = atomctrl_get_memory_pll_dividers_si(hwmgr,
922 memory_clock, &mpll_param, strobe_mode);
923 PP_ASSERT_WITH_CODE(0 == result,
924 "Error retrieving Memory Clock Parameters from VBIOS.", return result);
925
926 /* MPLL_FUNC_CNTL setup*/
927 mpll_func_cntl = PHM_SET_FIELD(mpll_func_cntl, MPLL_FUNC_CNTL, BWCTRL, mpll_param.bw_ctrl);
928
929 /* MPLL_FUNC_CNTL_1 setup*/
930 mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1,
931 MPLL_FUNC_CNTL_1, CLKF, mpll_param.mpll_fb_divider.cl_kf);
932 mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1,
933 MPLL_FUNC_CNTL_1, CLKFRAC, mpll_param.mpll_fb_divider.clk_frac);
934 mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1,
935 MPLL_FUNC_CNTL_1, VCO_MODE, mpll_param.vco_mode);
936
937 /* MPLL_AD_FUNC_CNTL setup*/
938 mpll_ad_func_cntl = PHM_SET_FIELD(mpll_ad_func_cntl,
939 MPLL_AD_FUNC_CNTL, YCLK_POST_DIV, mpll_param.mpll_post_divider);
940
941 if (data->is_memory_gddr5) {
942 /* MPLL_DQ_FUNC_CNTL setup*/
943 mpll_dq_func_cntl = PHM_SET_FIELD(mpll_dq_func_cntl,
944 MPLL_DQ_FUNC_CNTL, YCLK_SEL, mpll_param.yclk_sel);
945 mpll_dq_func_cntl = PHM_SET_FIELD(mpll_dq_func_cntl,
946 MPLL_DQ_FUNC_CNTL, YCLK_POST_DIV, mpll_param.mpll_post_divider);
947 }
948
949 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
950 PHM_PlatformCaps_MemorySpreadSpectrumSupport)) {
951 /*
952 ************************************
953 Fref = Reference Frequency
954 NF = Feedback divider ratio
955 NR = Reference divider ratio
956 Fnom = Nominal VCO output frequency = Fref * NF / NR
957 Fs = Spreading Rate
958 D = Percentage down-spread / 2
959 Fint = Reference input frequency to PFD = Fref / NR
960 NS = Spreading rate divider ratio = int(Fint / (2 * Fs))
961 CLKS = NS - 1 = ISS_STEP_NUM[11:0]
962 NV = D * Fs / Fnom * 4 * ((Fnom/Fref * NR) ^ 2)
963 CLKV = 65536 * NV = ISS_STEP_SIZE[25:0]
964 *************************************
965 */
966 pp_atomctrl_internal_ss_info ss_info;
967 uint32_t freq_nom;
968 uint32_t tmp;
969 uint32_t reference_clock = atomctrl_get_mpll_reference_clock(hwmgr);
970
971 /* for GDDR5 for all modes and DDR3 */
972 if (1 == mpll_param.qdr)
973 freq_nom = memory_clock * 4 * (1 << mpll_param.mpll_post_divider);
974 else
975 freq_nom = memory_clock * 2 * (1 << mpll_param.mpll_post_divider);
976
977 /* tmp = (freq_nom / reference_clock * reference_divider) ^ 2 Note: S.I. reference_divider = 1*/
978 tmp = (freq_nom / reference_clock);
979 tmp = tmp * tmp;
980
981 if (0 == atomctrl_get_memory_clock_spread_spectrum(hwmgr, freq_nom, &ss_info)) {
982 /* ss_info.speed_spectrum_percentage -- in unit of 0.01% */
983 /* ss.Info.speed_spectrum_rate -- in unit of khz */
984 /* CLKS = reference_clock / (2 * speed_spectrum_rate * reference_divider) * 10 */
985 /* = reference_clock * 5 / speed_spectrum_rate */
986 uint32_t clks = reference_clock * 5 / ss_info.speed_spectrum_rate;
987
988 /* CLKV = 65536 * speed_spectrum_percentage / 2 * spreadSpecrumRate / freq_nom * 4 / 100000 * ((freq_nom / reference_clock) ^ 2) */
989 /* = 131 * speed_spectrum_percentage * speed_spectrum_rate / 100 * ((freq_nom / reference_clock) ^ 2) / freq_nom */
990 uint32_t clkv =
991 (uint32_t)((((131 * ss_info.speed_spectrum_percentage *
992 ss_info.speed_spectrum_rate) / 100) * tmp) / freq_nom);
993
994 mpll_ss1 = PHM_SET_FIELD(mpll_ss1, MPLL_SS1, CLKV, clkv);
995 mpll_ss2 = PHM_SET_FIELD(mpll_ss2, MPLL_SS2, CLKS, clks);
996 }
997 }
998
999 /* MCLK_PWRMGT_CNTL setup */
1000 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
1001 MCLK_PWRMGT_CNTL, DLL_SPEED, mpll_param.dll_speed);
1002 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
1003 MCLK_PWRMGT_CNTL, MRDCK0_PDNB, dllStateOn);
1004 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
1005 MCLK_PWRMGT_CNTL, MRDCK1_PDNB, dllStateOn);
1006
1007
1008 /* Save the result data to outpupt memory level structure */
1009 mclk->MclkFrequency = memory_clock;
1010 mclk->MpllFuncCntl = mpll_func_cntl;
1011 mclk->MpllFuncCntl_1 = mpll_func_cntl_1;
1012 mclk->MpllFuncCntl_2 = mpll_func_cntl_2;
1013 mclk->MpllAdFuncCntl = mpll_ad_func_cntl;
1014 mclk->MpllDqFuncCntl = mpll_dq_func_cntl;
1015 mclk->MclkPwrmgtCntl = mclk_pwrmgt_cntl;
1016 mclk->DllCntl = dll_cntl;
1017 mclk->MpllSs1 = mpll_ss1;
1018 mclk->MpllSs2 = mpll_ss2;
1019
1020 return 0;
1021}
1022
1023static uint8_t iceland_get_mclk_frequency_ratio(uint32_t memory_clock,
1024 bool strobe_mode)
1025{
1026 uint8_t mc_para_index;
1027
1028 if (strobe_mode) {
1029 if (memory_clock < 12500) {
1030 mc_para_index = 0x00;
1031 } else if (memory_clock > 47500) {
1032 mc_para_index = 0x0f;
1033 } else {
1034 mc_para_index = (uint8_t)((memory_clock - 10000) / 2500);
1035 }
1036 } else {
1037 if (memory_clock < 65000) {
1038 mc_para_index = 0x00;
1039 } else if (memory_clock > 135000) {
1040 mc_para_index = 0x0f;
1041 } else {
1042 mc_para_index = (uint8_t)((memory_clock - 60000) / 5000);
1043 }
1044 }
1045
1046 return mc_para_index;
1047}
1048
1049static uint8_t iceland_get_ddr3_mclk_frequency_ratio(uint32_t memory_clock)
1050{
1051 uint8_t mc_para_index;
1052
1053 if (memory_clock < 10000) {
1054 mc_para_index = 0;
1055 } else if (memory_clock >= 80000) {
1056 mc_para_index = 0x0f;
1057 } else {
1058 mc_para_index = (uint8_t)((memory_clock - 10000) / 5000 + 1);
1059 }
1060
1061 return mc_para_index;
1062}
1063
1064static int iceland_populate_phase_value_based_on_mclk(struct pp_hwmgr *hwmgr, const struct phm_phase_shedding_limits_table *pl,
1065 uint32_t memory_clock, uint32_t *p_shed)
1066{
1067 unsigned int i;
1068
1069 *p_shed = 1;
1070
1071 for (i = 0; i < pl->count; i++) {
1072 if (memory_clock < pl->entries[i].Mclk) {
1073 *p_shed = i;
1074 break;
1075 }
1076 }
1077
1078 return 0;
1079}
1080
1081static int iceland_populate_single_memory_level(
1082 struct pp_hwmgr *hwmgr,
1083 uint32_t memory_clock,
1084 SMU71_Discrete_MemoryLevel *memory_level
1085 )
1086{
1087 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1088 int result = 0;
1089 bool dll_state_on;
1090 struct cgs_display_info info = {0};
1091 uint32_t mclk_edc_wr_enable_threshold = 40000;
1092 uint32_t mclk_edc_enable_threshold = 40000;
1093 uint32_t mclk_strobe_mode_threshold = 40000;
1094
1095 if (hwmgr->dyn_state.vddc_dependency_on_mclk != NULL) {
e71b7ae6 1096 result = iceland_get_dependency_volt_by_clk(hwmgr,
18aafc59
RZ
1097 hwmgr->dyn_state.vddc_dependency_on_mclk, memory_clock, &memory_level->MinVddc);
1098 PP_ASSERT_WITH_CODE((0 == result),
1099 "can not find MinVddc voltage value from memory VDDC voltage dependency table", return result);
1100 }
1101
1102 if (data->vddci_control == SMU7_VOLTAGE_CONTROL_NONE) {
1103 memory_level->MinVddci = memory_level->MinVddc;
1104 } else if (NULL != hwmgr->dyn_state.vddci_dependency_on_mclk) {
e71b7ae6 1105 result = iceland_get_dependency_volt_by_clk(hwmgr,
18aafc59
RZ
1106 hwmgr->dyn_state.vddci_dependency_on_mclk,
1107 memory_clock,
1108 &memory_level->MinVddci);
1109 PP_ASSERT_WITH_CODE((0 == result),
1110 "can not find MinVddci voltage value from memory VDDCI voltage dependency table", return result);
1111 }
1112
1113 memory_level->MinVddcPhases = 1;
1114
1115 if (data->vddc_phase_shed_control) {
1116 iceland_populate_phase_value_based_on_mclk(hwmgr, hwmgr->dyn_state.vddc_phase_shed_limits_table,
1117 memory_clock, &memory_level->MinVddcPhases);
1118 }
1119
1120 memory_level->EnabledForThrottle = 1;
1121 memory_level->EnabledForActivity = 0;
1122 memory_level->UpHyst = 0;
1123 memory_level->DownHyst = 100;
1124 memory_level->VoltageDownHyst = 0;
1125
1126 /* Indicates maximum activity level for this performance level.*/
1127 memory_level->ActivityLevel = (uint16_t)data->mclk_activity_target;
1128 memory_level->StutterEnable = 0;
1129 memory_level->StrobeEnable = 0;
1130 memory_level->EdcReadEnable = 0;
1131 memory_level->EdcWriteEnable = 0;
1132 memory_level->RttEnable = 0;
1133
1134 /* default set to low watermark. Highest level will be set to high later.*/
1135 memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
1136
1137 cgs_get_active_displays_info(hwmgr->device, &info);
1138 data->display_timing.num_existing_displays = info.display_count;
1139
1140 /* stutter mode not support on iceland */
1141
1142 /* decide strobe mode*/
1143 memory_level->StrobeEnable = (mclk_strobe_mode_threshold != 0) &&
1144 (memory_clock <= mclk_strobe_mode_threshold);
1145
1146 /* decide EDC mode and memory clock ratio*/
1147 if (data->is_memory_gddr5) {
1148 memory_level->StrobeRatio = iceland_get_mclk_frequency_ratio(memory_clock,
1149 memory_level->StrobeEnable);
1150
1151 if ((mclk_edc_enable_threshold != 0) &&
1152 (memory_clock > mclk_edc_enable_threshold)) {
1153 memory_level->EdcReadEnable = 1;
1154 }
1155
1156 if ((mclk_edc_wr_enable_threshold != 0) &&
1157 (memory_clock > mclk_edc_wr_enable_threshold)) {
1158 memory_level->EdcWriteEnable = 1;
1159 }
1160
1161 if (memory_level->StrobeEnable) {
1162 if (iceland_get_mclk_frequency_ratio(memory_clock, 1) >=
1163 ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC7) >> 16) & 0xf))
1164 dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0;
1165 else
1166 dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC6) >> 1) & 0x1) ? 1 : 0;
1167 } else
1168 dll_state_on = data->dll_default_on;
1169 } else {
1170 memory_level->StrobeRatio =
1171 iceland_get_ddr3_mclk_frequency_ratio(memory_clock);
1172 dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0;
1173 }
1174
1175 result = iceland_calculate_mclk_params(hwmgr,
1176 memory_clock, memory_level, memory_level->StrobeEnable, dll_state_on);
1177
1178 if (0 == result) {
1179 memory_level->MinVddc = PP_HOST_TO_SMC_UL(memory_level->MinVddc * VOLTAGE_SCALE);
1180 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MinVddcPhases);
1181 memory_level->MinVddci = PP_HOST_TO_SMC_UL(memory_level->MinVddci * VOLTAGE_SCALE);
1182 memory_level->MinMvdd = PP_HOST_TO_SMC_UL(memory_level->MinMvdd * VOLTAGE_SCALE);
1183 /* MCLK frequency in units of 10KHz*/
1184 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkFrequency);
1185 /* Indicates maximum activity level for this performance level.*/
1186 CONVERT_FROM_HOST_TO_SMC_US(memory_level->ActivityLevel);
1187 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl);
1188 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_1);
1189 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_2);
1190 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllAdFuncCntl);
1191 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllDqFuncCntl);
1192 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkPwrmgtCntl);
1193 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->DllCntl);
1194 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs1);
1195 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs2);
1196 }
1197
1198 return result;
1199}
1200
1201/**
1202 * Populates all SMC MCLK levels' structure based on the trimmed allowed dpm memory clock states
1203 *
1204 * @param hwmgr the address of the hardware manager
1205 */
1206
1207int iceland_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
1208{
1209 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
b3b03052 1210 struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
18aafc59
RZ
1211 struct smu7_dpm_table *dpm_table = &data->dpm_table;
1212 int result;
1213
1214 /* populate MCLK dpm table to SMU7 */
1215 uint32_t level_array_adress = smu_data->smu7_data.dpm_table_start + offsetof(SMU71_Discrete_DpmTable, MemoryLevel);
1216 uint32_t level_array_size = sizeof(SMU71_Discrete_MemoryLevel) * SMU71_MAX_LEVELS_MEMORY;
1217 SMU71_Discrete_MemoryLevel *levels = smu_data->smc_state_table.MemoryLevel;
1218 uint32_t i;
1219
1220 memset(levels, 0x00, level_array_size);
1221
1222 for (i = 0; i < dpm_table->mclk_table.count; i++) {
1223 PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value),
1224 "can not populate memory level as memory clock is zero", return -EINVAL);
1225 result = iceland_populate_single_memory_level(hwmgr, dpm_table->mclk_table.dpm_levels[i].value,
1226 &(smu_data->smc_state_table.MemoryLevel[i]));
1227 if (0 != result) {
1228 return result;
1229 }
1230 }
1231
1232 /* Only enable level 0 for now.*/
1233 smu_data->smc_state_table.MemoryLevel[0].EnabledForActivity = 1;
1234
1235 /*
1236 * in order to prevent MC activity from stutter mode to push DPM up.
1237 * the UVD change complements this by putting the MCLK in a higher state
1238 * by default such that we are not effected by up threshold or and MCLK DPM latency.
1239 */
1240 smu_data->smc_state_table.MemoryLevel[0].ActivityLevel = 0x1F;
1241 CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.MemoryLevel[0].ActivityLevel);
1242
1243 smu_data->smc_state_table.MemoryDpmLevelCount = (uint8_t)dpm_table->mclk_table.count;
1244 data->dpm_level_enable_mask.mclk_dpm_enable_mask = phm_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
1245 /* set highest level watermark to high*/
1246 smu_data->smc_state_table.MemoryLevel[dpm_table->mclk_table.count-1].DisplayWatermark = PPSMC_DISPLAY_WATERMARK_HIGH;
1247
1248 /* level count will send to smc once at init smc table and never change*/
d3f8c0ab 1249 result = smu7_copy_bytes_to_smc(hwmgr,
18aafc59
RZ
1250 level_array_adress, (uint8_t *)levels, (uint32_t)level_array_size,
1251 SMC_RAM_END);
1252
1253 return result;
1254}
1255
1256static int iceland_populate_mvdd_value(struct pp_hwmgr *hwmgr, uint32_t mclk,
1257 SMU71_Discrete_VoltageLevel *voltage)
1258{
1259 const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1260
1261 uint32_t i = 0;
1262
1263 if (SMU7_VOLTAGE_CONTROL_NONE != data->mvdd_control) {
1264 /* find mvdd value which clock is more than request */
1265 for (i = 0; i < hwmgr->dyn_state.mvdd_dependency_on_mclk->count; i++) {
1266 if (mclk <= hwmgr->dyn_state.mvdd_dependency_on_mclk->entries[i].clk) {
1267 /* Always round to higher voltage. */
1268 voltage->Voltage = data->mvdd_voltage_table.entries[i].value;
1269 break;
1270 }
1271 }
1272
1273 PP_ASSERT_WITH_CODE(i < hwmgr->dyn_state.mvdd_dependency_on_mclk->count,
1274 "MVDD Voltage is outside the supported range.", return -EINVAL);
1275
1276 } else {
1277 return -EINVAL;
1278 }
1279
1280 return 0;
1281}
1282
1283static int iceland_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
1284 SMU71_Discrete_DpmTable *table)
1285{
1286 int result = 0;
1287 const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1288 struct pp_atomctrl_clock_dividers_vi dividers;
1289 uint32_t vddc_phase_shed_control = 0;
1290
1291 SMU71_Discrete_VoltageLevel voltage_level;
1292 uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL;
1293 uint32_t spll_func_cntl_2 = data->clock_registers.vCG_SPLL_FUNC_CNTL_2;
1294 uint32_t dll_cntl = data->clock_registers.vDLL_CNTL;
1295 uint32_t mclk_pwrmgt_cntl = data->clock_registers.vMCLK_PWRMGT_CNTL;
1296
1297
1298 /* The ACPI state should not do DPM on DC (or ever).*/
1299 table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
1300
1301 if (data->acpi_vddc)
1302 table->ACPILevel.MinVddc = PP_HOST_TO_SMC_UL(data->acpi_vddc * VOLTAGE_SCALE);
1303 else
1304 table->ACPILevel.MinVddc = PP_HOST_TO_SMC_UL(data->min_vddc_in_pptable * VOLTAGE_SCALE);
1305
1306 table->ACPILevel.MinVddcPhases = vddc_phase_shed_control ? 0 : 1;
1307 /* assign zero for now*/
1308 table->ACPILevel.SclkFrequency = atomctrl_get_reference_clock(hwmgr);
1309
1310 /* get the engine clock dividers for this clock value*/
1311 result = atomctrl_get_engine_pll_dividers_vi(hwmgr,
1312 table->ACPILevel.SclkFrequency, &dividers);
1313
1314 PP_ASSERT_WITH_CODE(result == 0,
1315 "Error retrieving Engine Clock dividers from VBIOS.", return result);
1316
1317 /* divider ID for required SCLK*/
1318 table->ACPILevel.SclkDid = (uint8_t)dividers.pll_post_divider;
1319 table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
1320 table->ACPILevel.DeepSleepDivId = 0;
1321
1322 spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
1323 CG_SPLL_FUNC_CNTL, SPLL_PWRON, 0);
1324 spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
1325 CG_SPLL_FUNC_CNTL, SPLL_RESET, 1);
1326 spll_func_cntl_2 = PHM_SET_FIELD(spll_func_cntl_2,
1327 CG_SPLL_FUNC_CNTL_2, SCLK_MUX_SEL, 4);
1328
1329 table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
1330 table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
1331 table->ACPILevel.CgSpllFuncCntl3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
1332 table->ACPILevel.CgSpllFuncCntl4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
1333 table->ACPILevel.SpllSpreadSpectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
1334 table->ACPILevel.SpllSpreadSpectrum2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
1335 table->ACPILevel.CcPwrDynRm = 0;
1336 table->ACPILevel.CcPwrDynRm1 = 0;
1337
1338
1339 /* For various features to be enabled/disabled while this level is active.*/
1340 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags);
1341 /* SCLK frequency in units of 10KHz*/
1342 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkFrequency);
1343 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl);
1344 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl2);
1345 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl3);
1346 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl4);
1347 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum);
1348 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum2);
1349 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm);
1350 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1);
1351
1352 /* table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;*/
1353 table->MemoryACPILevel.MinVddc = table->ACPILevel.MinVddc;
1354 table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;
1355
1356 if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control)
1357 table->MemoryACPILevel.MinVddci = table->MemoryACPILevel.MinVddc;
1358 else {
1359 if (data->acpi_vddci != 0)
1360 table->MemoryACPILevel.MinVddci = PP_HOST_TO_SMC_UL(data->acpi_vddci * VOLTAGE_SCALE);
1361 else
1362 table->MemoryACPILevel.MinVddci = PP_HOST_TO_SMC_UL(data->min_vddci_in_pptable * VOLTAGE_SCALE);
1363 }
1364
1365 if (0 == iceland_populate_mvdd_value(hwmgr, 0, &voltage_level))
1366 table->MemoryACPILevel.MinMvdd =
1367 PP_HOST_TO_SMC_UL(voltage_level.Voltage * VOLTAGE_SCALE);
1368 else
1369 table->MemoryACPILevel.MinMvdd = 0;
1370
1371 /* Force reset on DLL*/
1372 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
1373 MCLK_PWRMGT_CNTL, MRDCK0_RESET, 0x1);
1374 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
1375 MCLK_PWRMGT_CNTL, MRDCK1_RESET, 0x1);
1376
1377 /* Disable DLL in ACPIState*/
1378 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
1379 MCLK_PWRMGT_CNTL, MRDCK0_PDNB, 0);
1380 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
1381 MCLK_PWRMGT_CNTL, MRDCK1_PDNB, 0);
1382
1383 /* Enable DLL bypass signal*/
1384 dll_cntl = PHM_SET_FIELD(dll_cntl,
1385 DLL_CNTL, MRDCK0_BYPASS, 0);
1386 dll_cntl = PHM_SET_FIELD(dll_cntl,
1387 DLL_CNTL, MRDCK1_BYPASS, 0);
1388
1389 table->MemoryACPILevel.DllCntl =
1390 PP_HOST_TO_SMC_UL(dll_cntl);
1391 table->MemoryACPILevel.MclkPwrmgtCntl =
1392 PP_HOST_TO_SMC_UL(mclk_pwrmgt_cntl);
1393 table->MemoryACPILevel.MpllAdFuncCntl =
1394 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_AD_FUNC_CNTL);
1395 table->MemoryACPILevel.MpllDqFuncCntl =
1396 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_DQ_FUNC_CNTL);
1397 table->MemoryACPILevel.MpllFuncCntl =
1398 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL);
1399 table->MemoryACPILevel.MpllFuncCntl_1 =
1400 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_1);
1401 table->MemoryACPILevel.MpllFuncCntl_2 =
1402 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_2);
1403 table->MemoryACPILevel.MpllSs1 =
1404 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS1);
1405 table->MemoryACPILevel.MpllSs2 =
1406 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS2);
1407
1408 table->MemoryACPILevel.EnabledForThrottle = 0;
1409 table->MemoryACPILevel.EnabledForActivity = 0;
1410 table->MemoryACPILevel.UpHyst = 0;
1411 table->MemoryACPILevel.DownHyst = 100;
1412 table->MemoryACPILevel.VoltageDownHyst = 0;
1413 /* Indicates maximum activity level for this performance level.*/
1414 table->MemoryACPILevel.ActivityLevel = PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target);
1415
1416 table->MemoryACPILevel.StutterEnable = 0;
1417 table->MemoryACPILevel.StrobeEnable = 0;
1418 table->MemoryACPILevel.EdcReadEnable = 0;
1419 table->MemoryACPILevel.EdcWriteEnable = 0;
1420 table->MemoryACPILevel.RttEnable = 0;
1421
1422 return result;
1423}
1424
1425static int iceland_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
1426 SMU71_Discrete_DpmTable *table)
1427{
1428 return 0;
1429}
1430
1431static int iceland_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
1432 SMU71_Discrete_DpmTable *table)
1433{
1434 return 0;
1435}
1436
1437static int iceland_populate_smc_acp_level(struct pp_hwmgr *hwmgr,
1438 SMU71_Discrete_DpmTable *table)
1439{
1440 return 0;
1441}
1442
1443static int iceland_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
1444 SMU71_Discrete_DpmTable *table)
1445{
1446 return 0;
1447}
1448
1449static int iceland_populate_memory_timing_parameters(
1450 struct pp_hwmgr *hwmgr,
1451 uint32_t engine_clock,
1452 uint32_t memory_clock,
1453 struct SMU71_Discrete_MCArbDramTimingTableEntry *arb_regs
1454 )
1455{
1456 uint32_t dramTiming;
1457 uint32_t dramTiming2;
1458 uint32_t burstTime;
1459 int result;
1460
1461 result = atomctrl_set_engine_dram_timings_rv770(hwmgr,
1462 engine_clock, memory_clock);
1463
1464 PP_ASSERT_WITH_CODE(result == 0,
1465 "Error calling VBIOS to set DRAM_TIMING.", return result);
1466
1467 dramTiming = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
1468 dramTiming2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
1469 burstTime = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
1470
1471 arb_regs->McArbDramTiming = PP_HOST_TO_SMC_UL(dramTiming);
1472 arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dramTiming2);
1473 arb_regs->McArbBurstTime = (uint8_t)burstTime;
1474
1475 return 0;
1476}
1477
1478/**
1479 * Setup parameters for the MC ARB.
1480 *
1481 * @param hwmgr the address of the powerplay hardware manager.
1482 * @return always 0
1483 * This function is to be called from the SetPowerState table.
1484 */
1485static int iceland_program_memory_timing_parameters(struct pp_hwmgr *hwmgr)
1486{
1487 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
b3b03052 1488 struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
18aafc59
RZ
1489 int result = 0;
1490 SMU71_Discrete_MCArbDramTimingTable arb_regs;
1491 uint32_t i, j;
1492
1493 memset(&arb_regs, 0x00, sizeof(SMU71_Discrete_MCArbDramTimingTable));
1494
1495 for (i = 0; i < data->dpm_table.sclk_table.count; i++) {
1496 for (j = 0; j < data->dpm_table.mclk_table.count; j++) {
1497 result = iceland_populate_memory_timing_parameters
1498 (hwmgr, data->dpm_table.sclk_table.dpm_levels[i].value,
1499 data->dpm_table.mclk_table.dpm_levels[j].value,
1500 &arb_regs.entries[i][j]);
1501
1502 if (0 != result) {
1503 break;
1504 }
1505 }
1506 }
1507
1508 if (0 == result) {
1509 result = smu7_copy_bytes_to_smc(
d3f8c0ab 1510 hwmgr,
18aafc59
RZ
1511 smu_data->smu7_data.arb_table_start,
1512 (uint8_t *)&arb_regs,
1513 sizeof(SMU71_Discrete_MCArbDramTimingTable),
1514 SMC_RAM_END
1515 );
1516 }
1517
1518 return result;
1519}
1520
1521static int iceland_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
1522 SMU71_Discrete_DpmTable *table)
1523{
1524 int result = 0;
1525 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
b3b03052 1526 struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
18aafc59
RZ
1527 table->GraphicsBootLevel = 0;
1528 table->MemoryBootLevel = 0;
1529
1530 /* find boot level from dpm table*/
1531 result = phm_find_boot_level(&(data->dpm_table.sclk_table),
1532 data->vbios_boot_state.sclk_bootup_value,
1533 (uint32_t *)&(smu_data->smc_state_table.GraphicsBootLevel));
1534
1535 if (0 != result) {
1536 smu_data->smc_state_table.GraphicsBootLevel = 0;
634a24d8 1537 pr_err("VBIOS did not find boot engine clock value \
18aafc59
RZ
1538 in dependency table. Using Graphics DPM level 0!");
1539 result = 0;
1540 }
1541
1542 result = phm_find_boot_level(&(data->dpm_table.mclk_table),
1543 data->vbios_boot_state.mclk_bootup_value,
1544 (uint32_t *)&(smu_data->smc_state_table.MemoryBootLevel));
1545
1546 if (0 != result) {
1547 smu_data->smc_state_table.MemoryBootLevel = 0;
634a24d8 1548 pr_err("VBIOS did not find boot engine clock value \
18aafc59
RZ
1549 in dependency table. Using Memory DPM level 0!");
1550 result = 0;
1551 }
1552
1553 table->BootVddc = data->vbios_boot_state.vddc_bootup_value;
1554 if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control)
1555 table->BootVddci = table->BootVddc;
1556 else
1557 table->BootVddci = data->vbios_boot_state.vddci_bootup_value;
1558
1559 table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value;
1560
1561 return result;
1562}
1563
d3f8c0ab 1564static int iceland_populate_mc_reg_address(struct pp_hwmgr *hwmgr,
18aafc59
RZ
1565 SMU71_Discrete_MCRegisters *mc_reg_table)
1566{
b3b03052 1567 const struct iceland_smumgr *smu_data = (struct iceland_smumgr *)hwmgr->smu_backend;
18aafc59
RZ
1568
1569 uint32_t i, j;
1570
1571 for (i = 0, j = 0; j < smu_data->mc_reg_table.last; j++) {
1572 if (smu_data->mc_reg_table.validflag & 1<<j) {
1573 PP_ASSERT_WITH_CODE(i < SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE,
1574 "Index of mc_reg_table->address[] array out of boundary", return -EINVAL);
1575 mc_reg_table->address[i].s0 =
1576 PP_HOST_TO_SMC_US(smu_data->mc_reg_table.mc_reg_address[j].s0);
1577 mc_reg_table->address[i].s1 =
1578 PP_HOST_TO_SMC_US(smu_data->mc_reg_table.mc_reg_address[j].s1);
1579 i++;
1580 }
1581 }
1582
1583 mc_reg_table->last = (uint8_t)i;
1584
1585 return 0;
1586}
1587
1588/*convert register values from driver to SMC format */
1589static void iceland_convert_mc_registers(
1590 const struct iceland_mc_reg_entry *entry,
1591 SMU71_Discrete_MCRegisterSet *data,
1592 uint32_t num_entries, uint32_t valid_flag)
1593{
1594 uint32_t i, j;
1595
1596 for (i = 0, j = 0; j < num_entries; j++) {
1597 if (valid_flag & 1<<j) {
1598 data->value[i] = PP_HOST_TO_SMC_UL(entry->mc_data[j]);
1599 i++;
1600 }
1601 }
1602}
1603
d3f8c0ab 1604static int iceland_convert_mc_reg_table_entry_to_smc(struct pp_hwmgr *hwmgr,
18aafc59
RZ
1605 const uint32_t memory_clock,
1606 SMU71_Discrete_MCRegisterSet *mc_reg_table_data
1607 )
1608{
b3b03052 1609 struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
18aafc59
RZ
1610 uint32_t i = 0;
1611
1612 for (i = 0; i < smu_data->mc_reg_table.num_entries; i++) {
1613 if (memory_clock <=
1614 smu_data->mc_reg_table.mc_reg_table_entry[i].mclk_max) {
1615 break;
1616 }
1617 }
1618
1619 if ((i == smu_data->mc_reg_table.num_entries) && (i > 0))
1620 --i;
1621
1622 iceland_convert_mc_registers(&smu_data->mc_reg_table.mc_reg_table_entry[i],
1623 mc_reg_table_data, smu_data->mc_reg_table.last,
1624 smu_data->mc_reg_table.validflag);
1625
1626 return 0;
1627}
1628
1629static int iceland_convert_mc_reg_table_to_smc(struct pp_hwmgr *hwmgr,
1630 SMU71_Discrete_MCRegisters *mc_regs)
1631{
1632 int result = 0;
1633 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1634 int res;
1635 uint32_t i;
1636
1637 for (i = 0; i < data->dpm_table.mclk_table.count; i++) {
1638 res = iceland_convert_mc_reg_table_entry_to_smc(
d3f8c0ab 1639 hwmgr,
18aafc59
RZ
1640 data->dpm_table.mclk_table.dpm_levels[i].value,
1641 &mc_regs->data[i]
1642 );
1643
1644 if (0 != res)
1645 result = res;
1646 }
1647
1648 return result;
1649}
1650
1651static int iceland_update_and_upload_mc_reg_table(struct pp_hwmgr *hwmgr)
1652{
b3b03052 1653 struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
18aafc59
RZ
1654 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1655 uint32_t address;
1656 int32_t result;
1657
1658 if (0 == (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK))
1659 return 0;
1660
1661
1662 memset(&smu_data->mc_regs, 0, sizeof(SMU71_Discrete_MCRegisters));
1663
1664 result = iceland_convert_mc_reg_table_to_smc(hwmgr, &(smu_data->mc_regs));
1665
1666 if (result != 0)
1667 return result;
1668
1669
1670 address = smu_data->smu7_data.mc_reg_table_start + (uint32_t)offsetof(SMU71_Discrete_MCRegisters, data[0]);
1671
d3f8c0ab 1672 return smu7_copy_bytes_to_smc(hwmgr, address,
18aafc59
RZ
1673 (uint8_t *)&smu_data->mc_regs.data[0],
1674 sizeof(SMU71_Discrete_MCRegisterSet) * data->dpm_table.mclk_table.count,
1675 SMC_RAM_END);
1676}
1677
1678static int iceland_populate_initial_mc_reg_table(struct pp_hwmgr *hwmgr)
1679{
1680 int result;
b3b03052 1681 struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
18aafc59
RZ
1682
1683 memset(&smu_data->mc_regs, 0x00, sizeof(SMU71_Discrete_MCRegisters));
d3f8c0ab 1684 result = iceland_populate_mc_reg_address(hwmgr, &(smu_data->mc_regs));
18aafc59
RZ
1685 PP_ASSERT_WITH_CODE(0 == result,
1686 "Failed to initialize MCRegTable for the MC register addresses!", return result;);
1687
1688 result = iceland_convert_mc_reg_table_to_smc(hwmgr, &smu_data->mc_regs);
1689 PP_ASSERT_WITH_CODE(0 == result,
1690 "Failed to initialize MCRegTable for driver state!", return result;);
1691
d3f8c0ab 1692 return smu7_copy_bytes_to_smc(hwmgr, smu_data->smu7_data.mc_reg_table_start,
18aafc59
RZ
1693 (uint8_t *)&smu_data->mc_regs, sizeof(SMU71_Discrete_MCRegisters), SMC_RAM_END);
1694}
1695
1696static int iceland_populate_smc_initial_state(struct pp_hwmgr *hwmgr)
1697{
1698 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
b3b03052 1699 struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
18aafc59
RZ
1700 uint8_t count, level;
1701
1702 count = (uint8_t)(hwmgr->dyn_state.vddc_dependency_on_sclk->count);
1703
1704 for (level = 0; level < count; level++) {
1705 if (hwmgr->dyn_state.vddc_dependency_on_sclk->entries[level].clk
1706 >= data->vbios_boot_state.sclk_bootup_value) {
1707 smu_data->smc_state_table.GraphicsBootLevel = level;
1708 break;
1709 }
1710 }
1711
1712 count = (uint8_t)(hwmgr->dyn_state.vddc_dependency_on_mclk->count);
1713
1714 for (level = 0; level < count; level++) {
1715 if (hwmgr->dyn_state.vddc_dependency_on_mclk->entries[level].clk
1716 >= data->vbios_boot_state.mclk_bootup_value) {
1717 smu_data->smc_state_table.MemoryBootLevel = level;
1718 break;
1719 }
1720 }
1721
1722 return 0;
1723}
1724
1725static int iceland_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
1726{
1727 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
b3b03052 1728 struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
a1c1a1de 1729 const struct iceland_pt_defaults *defaults = smu_data->power_tune_defaults;
18aafc59
RZ
1730 SMU71_Discrete_DpmTable *dpm_table = &(smu_data->smc_state_table);
1731 struct phm_cac_tdp_table *cac_dtp_table = hwmgr->dyn_state.cac_dtp_table;
1732 struct phm_ppm_table *ppm = hwmgr->dyn_state.ppm_parameter_table;
a1c1a1de 1733 const uint16_t *def1, *def2;
18aafc59
RZ
1734 int i, j, k;
1735
1736
1737 /*
1738 * TDP number of fraction bits are changed from 8 to 7 for Iceland
1739 * as requested by SMC team
1740 */
1741
1742 dpm_table->DefaultTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 256));
1743 dpm_table->TargetTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usConfigurableTDP * 256));
1744
1745
1746 dpm_table->DTETjOffset = 0;
1747
1748 dpm_table->GpuTjMax = (uint8_t)(data->thermal_temp_setting.temperature_high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES);
1749 dpm_table->GpuTjHyst = 8;
1750
1751 dpm_table->DTEAmbientTempBase = defaults->dte_ambient_temp_base;
1752
1753 /* The following are for new Iceland Multi-input fan/thermal control */
1754 if (NULL != ppm) {
1755 dpm_table->PPM_PkgPwrLimit = (uint16_t)ppm->dgpu_tdp * 256 / 1000;
1756 dpm_table->PPM_TemperatureLimit = (uint16_t)ppm->tj_max * 256;
1757 } else {
1758 dpm_table->PPM_PkgPwrLimit = 0;
1759 dpm_table->PPM_TemperatureLimit = 0;
1760 }
1761
1762 CONVERT_FROM_HOST_TO_SMC_US(dpm_table->PPM_PkgPwrLimit);
1763 CONVERT_FROM_HOST_TO_SMC_US(dpm_table->PPM_TemperatureLimit);
1764
e71b7ae6 1765 dpm_table->BAPM_TEMP_GRADIENT = PP_HOST_TO_SMC_UL(defaults->bapm_temp_gradient);
18aafc59
RZ
1766 def1 = defaults->bapmti_r;
1767 def2 = defaults->bapmti_rc;
1768
1769 for (i = 0; i < SMU71_DTE_ITERATIONS; i++) {
1770 for (j = 0; j < SMU71_DTE_SOURCES; j++) {
1771 for (k = 0; k < SMU71_DTE_SINKS; k++) {
1772 dpm_table->BAPMTI_R[i][j][k] = PP_HOST_TO_SMC_US(*def1);
1773 dpm_table->BAPMTI_RC[i][j][k] = PP_HOST_TO_SMC_US(*def2);
1774 def1++;
1775 def2++;
1776 }
1777 }
1778 }
1779
1780 return 0;
1781}
1782
1783static int iceland_populate_smc_svi2_config(struct pp_hwmgr *hwmgr,
1784 SMU71_Discrete_DpmTable *tab)
1785{
1786 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1787
1788 if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control)
1789 tab->SVI2Enable |= VDDC_ON_SVI2;
1790
1791 if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
1792 tab->SVI2Enable |= VDDCI_ON_SVI2;
1793 else
1794 tab->MergedVddci = 1;
1795
1796 if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control)
1797 tab->SVI2Enable |= MVDD_ON_SVI2;
1798
1799 PP_ASSERT_WITH_CODE(tab->SVI2Enable != (VDDC_ON_SVI2 | VDDCI_ON_SVI2 | MVDD_ON_SVI2) &&
1800 (tab->SVI2Enable & VDDC_ON_SVI2), "SVI2 domain configuration is incorrect!", return -EINVAL);
1801
1802 return 0;
1803}
1804
1805/**
1806 * Initializes the SMC table and uploads it
1807 *
1808 * @param hwmgr the address of the powerplay hardware manager.
1809 * @param pInput the pointer to input data (PowerState)
1810 * @return always 0
1811 */
1812int iceland_init_smc_table(struct pp_hwmgr *hwmgr)
1813{
1814 int result;
1815 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
b3b03052 1816 struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
18aafc59
RZ
1817 SMU71_Discrete_DpmTable *table = &(smu_data->smc_state_table);
1818
1819
1820 iceland_initialize_power_tune_defaults(hwmgr);
1821 memset(&(smu_data->smc_state_table), 0x00, sizeof(smu_data->smc_state_table));
1822
1823 if (SMU7_VOLTAGE_CONTROL_NONE != data->voltage_control) {
1824 iceland_populate_smc_voltage_tables(hwmgr, table);
1825 }
1826
1827 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1828 PHM_PlatformCaps_AutomaticDCTransition))
1829 table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
1830
1831
1832 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1833 PHM_PlatformCaps_StepVddc))
1834 table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
1835
1836 if (data->is_memory_gddr5)
1837 table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
1838
1839
1840 if (data->ulv_supported) {
1841 result = iceland_populate_ulv_state(hwmgr, &(smu_data->ulv_setting));
1842 PP_ASSERT_WITH_CODE(0 == result,
1843 "Failed to initialize ULV state!", return result;);
1844
1845 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1846 ixCG_ULV_PARAMETER, 0x40035);
1847 }
1848
1849 result = iceland_populate_smc_link_level(hwmgr, table);
1850 PP_ASSERT_WITH_CODE(0 == result,
1851 "Failed to initialize Link Level!", return result;);
1852
1853 result = iceland_populate_all_graphic_levels(hwmgr);
1854 PP_ASSERT_WITH_CODE(0 == result,
1855 "Failed to initialize Graphics Level!", return result;);
1856
1857 result = iceland_populate_all_memory_levels(hwmgr);
1858 PP_ASSERT_WITH_CODE(0 == result,
1859 "Failed to initialize Memory Level!", return result;);
1860
1861 result = iceland_populate_smc_acpi_level(hwmgr, table);
1862 PP_ASSERT_WITH_CODE(0 == result,
1863 "Failed to initialize ACPI Level!", return result;);
1864
1865 result = iceland_populate_smc_vce_level(hwmgr, table);
1866 PP_ASSERT_WITH_CODE(0 == result,
1867 "Failed to initialize VCE Level!", return result;);
1868
1869 result = iceland_populate_smc_acp_level(hwmgr, table);
1870 PP_ASSERT_WITH_CODE(0 == result,
1871 "Failed to initialize ACP Level!", return result;);
1872
1873 result = iceland_populate_smc_samu_level(hwmgr, table);
1874 PP_ASSERT_WITH_CODE(0 == result,
1875 "Failed to initialize SAMU Level!", return result;);
1876
1877 /* Since only the initial state is completely set up at this point (the other states are just copies of the boot state) we only */
1878 /* need to populate the ARB settings for the initial state. */
1879 result = iceland_program_memory_timing_parameters(hwmgr);
1880 PP_ASSERT_WITH_CODE(0 == result,
1881 "Failed to Write ARB settings for the initial state.", return result;);
1882
1883 result = iceland_populate_smc_uvd_level(hwmgr, table);
1884 PP_ASSERT_WITH_CODE(0 == result,
1885 "Failed to initialize UVD Level!", return result;);
1886
1887 table->GraphicsBootLevel = 0;
1888 table->MemoryBootLevel = 0;
1889
1890 result = iceland_populate_smc_boot_level(hwmgr, table);
1891 PP_ASSERT_WITH_CODE(0 == result,
1892 "Failed to initialize Boot Level!", return result;);
1893
1894 result = iceland_populate_smc_initial_state(hwmgr);
1895 PP_ASSERT_WITH_CODE(0 == result, "Failed to initialize Boot State!", return result);
1896
1897 result = iceland_populate_bapm_parameters_in_dpm_table(hwmgr);
1898 PP_ASSERT_WITH_CODE(0 == result, "Failed to populate BAPM Parameters!", return result);
1899
1900 table->GraphicsVoltageChangeEnable = 1;
1901 table->GraphicsThermThrottleEnable = 1;
1902 table->GraphicsInterval = 1;
1903 table->VoltageInterval = 1;
1904 table->ThermalInterval = 1;
1905
1906 table->TemperatureLimitHigh =
1907 (data->thermal_temp_setting.temperature_high *
1908 SMU7_Q88_FORMAT_CONVERSION_UNIT) / PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
1909 table->TemperatureLimitLow =
1910 (data->thermal_temp_setting.temperature_low *
1911 SMU7_Q88_FORMAT_CONVERSION_UNIT) / PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
1912
1913 table->MemoryVoltageChangeEnable = 1;
1914 table->MemoryInterval = 1;
1915 table->VoltageResponseTime = 0;
1916 table->PhaseResponseTime = 0;
1917 table->MemoryThermThrottleEnable = 1;
1918 table->PCIeBootLinkLevel = 0;
1919 table->PCIeGenInterval = 1;
1920
1921 result = iceland_populate_smc_svi2_config(hwmgr, table);
1922 PP_ASSERT_WITH_CODE(0 == result,
1923 "Failed to populate SVI2 setting!", return result);
1924
1925 table->ThermGpio = 17;
1926 table->SclkStepSize = 0x4000;
1927
1928 CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags);
1929 CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddcVid);
1930 CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddcPhase);
1931 CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddciVid);
1932 CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskMvddVid);
1933 CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize);
1934 CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh);
1935 CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow);
1936 CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime);
1937 CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime);
1938
1939 table->BootVddc = PP_HOST_TO_SMC_US(table->BootVddc * VOLTAGE_SCALE);
1940 table->BootVddci = PP_HOST_TO_SMC_US(table->BootVddci * VOLTAGE_SCALE);
1941 table->BootMVdd = PP_HOST_TO_SMC_US(table->BootMVdd * VOLTAGE_SCALE);
1942
1943 /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */
d3f8c0ab 1944 result = smu7_copy_bytes_to_smc(hwmgr, smu_data->smu7_data.dpm_table_start +
18aafc59
RZ
1945 offsetof(SMU71_Discrete_DpmTable, SystemFlags),
1946 (uint8_t *)&(table->SystemFlags),
1947 sizeof(SMU71_Discrete_DpmTable)-3 * sizeof(SMU71_PIDController),
1948 SMC_RAM_END);
1949
1950 PP_ASSERT_WITH_CODE(0 == result,
1951 "Failed to upload dpm data to SMC memory!", return result;);
1952
1953 /* Upload all ulv setting to SMC memory.(dpm level, dpm level count etc) */
d3f8c0ab 1954 result = smu7_copy_bytes_to_smc(hwmgr,
18aafc59
RZ
1955 smu_data->smu7_data.ulv_setting_starts,
1956 (uint8_t *)&(smu_data->ulv_setting),
1957 sizeof(SMU71_Discrete_Ulv),
1958 SMC_RAM_END);
1959
1960
1961 result = iceland_populate_initial_mc_reg_table(hwmgr);
1962 PP_ASSERT_WITH_CODE((0 == result),
1963 "Failed to populate initialize MC Reg table!", return result);
1964
1965 result = iceland_populate_pm_fuses(hwmgr);
1966 PP_ASSERT_WITH_CODE(0 == result,
1967 "Failed to populate PM fuses to SMC memory!", return result);
1968
1969 return 0;
1970}
1971
1972/**
1973* Set up the fan table to control the fan using the SMC.
1974* @param hwmgr the address of the powerplay hardware manager.
1975* @param pInput the pointer to input data
1976* @param pOutput the pointer to output data
1977* @param pStorage the pointer to temporary storage
1978* @param Result the last failure code
1979* @return result from set temperature range routine
1980*/
1981int iceland_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
1982{
b3b03052 1983 struct smu7_smumgr *smu7_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
18aafc59
RZ
1984 SMU71_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
1985 uint32_t duty100;
1986 uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2;
1987 uint16_t fdo_min, slope1, slope2;
1988 uint32_t reference_clock;
1989 int res;
1990 uint64_t tmp64;
1991
1992 if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl))
1993 return 0;
1994
10e2ca34
HZ
1995 if (hwmgr->thermal_controller.fanInfo.bNoFan) {
1996 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1997 PHM_PlatformCaps_MicrocodeFanControl);
1998 return 0;
1999 }
2000
18aafc59
RZ
2001 if (0 == smu7_data->fan_table_start) {
2002 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl);
2003 return 0;
2004 }
2005
2006 duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL1, FMAX_DUTY100);
2007
2008 if (0 == duty100) {
2009 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl);
2010 return 0;
2011 }
2012
2013 tmp64 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin * duty100;
2014 do_div(tmp64, 10000);
2015 fdo_min = (uint16_t)tmp64;
2016
2017 t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed - hwmgr->thermal_controller.advanceFanControlParameters.usTMin;
2018 t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh - hwmgr->thermal_controller.advanceFanControlParameters.usTMed;
2019
2020 pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed - hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin;
2021 pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh - hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed;
2022
2023 slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
2024 slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
2025
2026 fan_table.TempMin = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMin) / 100);
2027 fan_table.TempMed = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMed) / 100);
2028 fan_table.TempMax = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMax) / 100);
2029
2030 fan_table.Slope1 = cpu_to_be16(slope1);
2031 fan_table.Slope2 = cpu_to_be16(slope2);
2032
2033 fan_table.FdoMin = cpu_to_be16(fdo_min);
2034
2035 fan_table.HystDown = cpu_to_be16(hwmgr->thermal_controller.advanceFanControlParameters.ucTHyst);
2036
2037 fan_table.HystUp = cpu_to_be16(1);
2038
2039 fan_table.HystSlope = cpu_to_be16(1);
2040
2041 fan_table.TempRespLim = cpu_to_be16(5);
2042
2043 reference_clock = smu7_get_xclk(hwmgr);
2044
2045 fan_table.RefreshPeriod = cpu_to_be32((hwmgr->thermal_controller.advanceFanControlParameters.ulCycleDelay * reference_clock) / 1600);
2046
2047 fan_table.FdoMax = cpu_to_be16((uint16_t)duty100);
2048
2049 fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_MULT_THERMAL_CTRL, TEMP_SEL);
2050
2051 /* fan_table.FanControl_GL_Flag = 1; */
2052
d3f8c0ab 2053 res = smu7_copy_bytes_to_smc(hwmgr, smu7_data->fan_table_start, (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table), SMC_RAM_END);
18aafc59
RZ
2054
2055 return 0;
2056}
2057
2058
2059static int iceland_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
2060{
2061 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2062
2063 if (data->need_update_smu7_dpm_table &
2064 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK))
2065 return iceland_program_memory_timing_parameters(hwmgr);
2066
2067 return 0;
2068}
2069
2070int iceland_update_sclk_threshold(struct pp_hwmgr *hwmgr)
2071{
2072 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
b3b03052 2073 struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
18aafc59
RZ
2074
2075 int result = 0;
2076 uint32_t low_sclk_interrupt_threshold = 0;
2077
2078 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2079 PHM_PlatformCaps_SclkThrottleLowNotification)
2080 && (hwmgr->gfx_arbiter.sclk_threshold !=
2081 data->low_sclk_interrupt_threshold)) {
2082 data->low_sclk_interrupt_threshold =
2083 hwmgr->gfx_arbiter.sclk_threshold;
2084 low_sclk_interrupt_threshold =
2085 data->low_sclk_interrupt_threshold;
2086
2087 CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold);
2088
2089 result = smu7_copy_bytes_to_smc(
d3f8c0ab 2090 hwmgr,
18aafc59
RZ
2091 smu_data->smu7_data.dpm_table_start +
2092 offsetof(SMU71_Discrete_DpmTable,
2093 LowSclkInterruptThreshold),
2094 (uint8_t *)&low_sclk_interrupt_threshold,
2095 sizeof(uint32_t),
2096 SMC_RAM_END);
2097 }
2098
2099 result = iceland_update_and_upload_mc_reg_table(hwmgr);
2100
2101 PP_ASSERT_WITH_CODE((0 == result), "Failed to upload MC reg table!", return result);
2102
2103 result = iceland_program_mem_timing_parameters(hwmgr);
2104 PP_ASSERT_WITH_CODE((result == 0),
2105 "Failed to program memory timing parameters!",
2106 );
2107
2108 return result;
2109}
2110
2111uint32_t iceland_get_offsetof(uint32_t type, uint32_t member)
2112{
2113 switch (type) {
2114 case SMU_SoftRegisters:
2115 switch (member) {
2116 case HandshakeDisables:
2117 return offsetof(SMU71_SoftRegisters, HandshakeDisables);
2118 case VoltageChangeTimeout:
2119 return offsetof(SMU71_SoftRegisters, VoltageChangeTimeout);
2120 case AverageGraphicsActivity:
2121 return offsetof(SMU71_SoftRegisters, AverageGraphicsActivity);
2122 case PreVBlankGap:
2123 return offsetof(SMU71_SoftRegisters, PreVBlankGap);
2124 case VBlankTimeout:
2125 return offsetof(SMU71_SoftRegisters, VBlankTimeout);
2126 case UcodeLoadStatus:
2127 return offsetof(SMU71_SoftRegisters, UcodeLoadStatus);
2128 }
2129 case SMU_Discrete_DpmTable:
2130 switch (member) {
2131 case LowSclkInterruptThreshold:
2132 return offsetof(SMU71_Discrete_DpmTable, LowSclkInterruptThreshold);
2133 }
2134 }
98a36749 2135 pr_warn("can't get the offset of type %x member %x\n", type, member);
18aafc59
RZ
2136 return 0;
2137}
2138
2139uint32_t iceland_get_mac_definition(uint32_t value)
2140{
2141 switch (value) {
2142 case SMU_MAX_LEVELS_GRAPHICS:
2143 return SMU71_MAX_LEVELS_GRAPHICS;
2144 case SMU_MAX_LEVELS_MEMORY:
2145 return SMU71_MAX_LEVELS_MEMORY;
2146 case SMU_MAX_LEVELS_LINK:
2147 return SMU71_MAX_LEVELS_LINK;
2148 case SMU_MAX_ENTRIES_SMIO:
2149 return SMU71_MAX_ENTRIES_SMIO;
2150 case SMU_MAX_LEVELS_VDDC:
2151 return SMU71_MAX_LEVELS_VDDC;
2152 case SMU_MAX_LEVELS_VDDCI:
2153 return SMU71_MAX_LEVELS_VDDCI;
2154 case SMU_MAX_LEVELS_MVDD:
2155 return SMU71_MAX_LEVELS_MVDD;
2156 }
2157
98a36749 2158 pr_warn("can't get the mac of %x\n", value);
18aafc59
RZ
2159 return 0;
2160}
2161
2162/**
2163 * Get the location of various tables inside the FW image.
2164 *
2165 * @param hwmgr the address of the powerplay hardware manager.
2166 * @return always 0
2167 */
2168int iceland_process_firmware_header(struct pp_hwmgr *hwmgr)
2169{
2170 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
b3b03052 2171 struct smu7_smumgr *smu7_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
18aafc59
RZ
2172
2173 uint32_t tmp;
2174 int result;
2175 bool error = false;
2176
d3f8c0ab 2177 result = smu7_read_smc_sram_dword(hwmgr,
18aafc59
RZ
2178 SMU71_FIRMWARE_HEADER_LOCATION +
2179 offsetof(SMU71_Firmware_Header, DpmTable),
2180 &tmp, SMC_RAM_END);
2181
2182 if (0 == result) {
2183 smu7_data->dpm_table_start = tmp;
2184 }
2185
2186 error |= (0 != result);
2187
d3f8c0ab 2188 result = smu7_read_smc_sram_dword(hwmgr,
18aafc59
RZ
2189 SMU71_FIRMWARE_HEADER_LOCATION +
2190 offsetof(SMU71_Firmware_Header, SoftRegisters),
2191 &tmp, SMC_RAM_END);
2192
2193 if (0 == result) {
2194 data->soft_regs_start = tmp;
2195 smu7_data->soft_regs_start = tmp;
2196 }
2197
2198 error |= (0 != result);
2199
2200
d3f8c0ab 2201 result = smu7_read_smc_sram_dword(hwmgr,
18aafc59
RZ
2202 SMU71_FIRMWARE_HEADER_LOCATION +
2203 offsetof(SMU71_Firmware_Header, mcRegisterTable),
2204 &tmp, SMC_RAM_END);
2205
2206 if (0 == result) {
2207 smu7_data->mc_reg_table_start = tmp;
2208 }
2209
d3f8c0ab 2210 result = smu7_read_smc_sram_dword(hwmgr,
18aafc59
RZ
2211 SMU71_FIRMWARE_HEADER_LOCATION +
2212 offsetof(SMU71_Firmware_Header, FanTable),
2213 &tmp, SMC_RAM_END);
2214
2215 if (0 == result) {
2216 smu7_data->fan_table_start = tmp;
2217 }
2218
2219 error |= (0 != result);
2220
d3f8c0ab 2221 result = smu7_read_smc_sram_dword(hwmgr,
18aafc59
RZ
2222 SMU71_FIRMWARE_HEADER_LOCATION +
2223 offsetof(SMU71_Firmware_Header, mcArbDramTimingTable),
2224 &tmp, SMC_RAM_END);
2225
2226 if (0 == result) {
2227 smu7_data->arb_table_start = tmp;
2228 }
2229
2230 error |= (0 != result);
2231
2232
d3f8c0ab 2233 result = smu7_read_smc_sram_dword(hwmgr,
18aafc59
RZ
2234 SMU71_FIRMWARE_HEADER_LOCATION +
2235 offsetof(SMU71_Firmware_Header, Version),
2236 &tmp, SMC_RAM_END);
2237
2238 if (0 == result) {
2239 hwmgr->microcode_version_info.SMC = tmp;
2240 }
2241
2242 error |= (0 != result);
2243
d3f8c0ab 2244 result = smu7_read_smc_sram_dword(hwmgr,
18aafc59
RZ
2245 SMU71_FIRMWARE_HEADER_LOCATION +
2246 offsetof(SMU71_Firmware_Header, UlvSettings),
2247 &tmp, SMC_RAM_END);
2248
2249 if (0 == result) {
2250 smu7_data->ulv_setting_starts = tmp;
2251 }
2252
2253 error |= (0 != result);
2254
2255 return error ? 1 : 0;
2256}
2257
2258/*---------------------------MC----------------------------*/
2259
2260static uint8_t iceland_get_memory_modile_index(struct pp_hwmgr *hwmgr)
2261{
2262 return (uint8_t) (0xFF & (cgs_read_register(hwmgr->device, mmBIOS_SCRATCH_4) >> 16));
2263}
2264
2265static bool iceland_check_s0_mc_reg_index(uint16_t in_reg, uint16_t *out_reg)
2266{
2267 bool result = true;
2268
2269 switch (in_reg) {
2270 case mmMC_SEQ_RAS_TIMING:
2271 *out_reg = mmMC_SEQ_RAS_TIMING_LP;
2272 break;
2273
2274 case mmMC_SEQ_DLL_STBY:
2275 *out_reg = mmMC_SEQ_DLL_STBY_LP;
2276 break;
2277
2278 case mmMC_SEQ_G5PDX_CMD0:
2279 *out_reg = mmMC_SEQ_G5PDX_CMD0_LP;
2280 break;
2281
2282 case mmMC_SEQ_G5PDX_CMD1:
2283 *out_reg = mmMC_SEQ_G5PDX_CMD1_LP;
2284 break;
2285
2286 case mmMC_SEQ_G5PDX_CTRL:
2287 *out_reg = mmMC_SEQ_G5PDX_CTRL_LP;
2288 break;
2289
2290 case mmMC_SEQ_CAS_TIMING:
2291 *out_reg = mmMC_SEQ_CAS_TIMING_LP;
2292 break;
2293
2294 case mmMC_SEQ_MISC_TIMING:
2295 *out_reg = mmMC_SEQ_MISC_TIMING_LP;
2296 break;
2297
2298 case mmMC_SEQ_MISC_TIMING2:
2299 *out_reg = mmMC_SEQ_MISC_TIMING2_LP;
2300 break;
2301
2302 case mmMC_SEQ_PMG_DVS_CMD:
2303 *out_reg = mmMC_SEQ_PMG_DVS_CMD_LP;
2304 break;
2305
2306 case mmMC_SEQ_PMG_DVS_CTL:
2307 *out_reg = mmMC_SEQ_PMG_DVS_CTL_LP;
2308 break;
2309
2310 case mmMC_SEQ_RD_CTL_D0:
2311 *out_reg = mmMC_SEQ_RD_CTL_D0_LP;
2312 break;
2313
2314 case mmMC_SEQ_RD_CTL_D1:
2315 *out_reg = mmMC_SEQ_RD_CTL_D1_LP;
2316 break;
2317
2318 case mmMC_SEQ_WR_CTL_D0:
2319 *out_reg = mmMC_SEQ_WR_CTL_D0_LP;
2320 break;
2321
2322 case mmMC_SEQ_WR_CTL_D1:
2323 *out_reg = mmMC_SEQ_WR_CTL_D1_LP;
2324 break;
2325
2326 case mmMC_PMG_CMD_EMRS:
2327 *out_reg = mmMC_SEQ_PMG_CMD_EMRS_LP;
2328 break;
2329
2330 case mmMC_PMG_CMD_MRS:
2331 *out_reg = mmMC_SEQ_PMG_CMD_MRS_LP;
2332 break;
2333
2334 case mmMC_PMG_CMD_MRS1:
2335 *out_reg = mmMC_SEQ_PMG_CMD_MRS1_LP;
2336 break;
2337
2338 case mmMC_SEQ_PMG_TIMING:
2339 *out_reg = mmMC_SEQ_PMG_TIMING_LP;
2340 break;
2341
2342 case mmMC_PMG_CMD_MRS2:
2343 *out_reg = mmMC_SEQ_PMG_CMD_MRS2_LP;
2344 break;
2345
2346 case mmMC_SEQ_WR_CTL_2:
2347 *out_reg = mmMC_SEQ_WR_CTL_2_LP;
2348 break;
2349
2350 default:
2351 result = false;
2352 break;
2353 }
2354
2355 return result;
2356}
2357
2358static int iceland_set_s0_mc_reg_index(struct iceland_mc_reg_table *table)
2359{
2360 uint32_t i;
2361 uint16_t address;
2362
2363 for (i = 0; i < table->last; i++) {
2364 table->mc_reg_address[i].s0 =
2365 iceland_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address)
2366 ? address : table->mc_reg_address[i].s1;
2367 }
2368 return 0;
2369}
2370
2371static int iceland_copy_vbios_smc_reg_table(const pp_atomctrl_mc_reg_table *table,
2372 struct iceland_mc_reg_table *ni_table)
2373{
2374 uint8_t i, j;
2375
2376 PP_ASSERT_WITH_CODE((table->last <= SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
2377 "Invalid VramInfo table.", return -EINVAL);
2378 PP_ASSERT_WITH_CODE((table->num_entries <= MAX_AC_TIMING_ENTRIES),
2379 "Invalid VramInfo table.", return -EINVAL);
2380
2381 for (i = 0; i < table->last; i++) {
2382 ni_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
2383 }
2384 ni_table->last = table->last;
2385
2386 for (i = 0; i < table->num_entries; i++) {
2387 ni_table->mc_reg_table_entry[i].mclk_max =
2388 table->mc_reg_table_entry[i].mclk_max;
2389 for (j = 0; j < table->last; j++) {
2390 ni_table->mc_reg_table_entry[i].mc_data[j] =
2391 table->mc_reg_table_entry[i].mc_data[j];
2392 }
2393 }
2394
2395 ni_table->num_entries = table->num_entries;
2396
2397 return 0;
2398}
2399
2400/**
2401 * VBIOS omits some information to reduce size, we need to recover them here.
2402 * 1. when we see mmMC_SEQ_MISC1, bit[31:16] EMRS1, need to be write to mmMC_PMG_CMD_EMRS /_LP[15:0].
2403 * Bit[15:0] MRS, need to be update mmMC_PMG_CMD_MRS/_LP[15:0]
2404 * 2. when we see mmMC_SEQ_RESERVE_M, bit[15:0] EMRS2, need to be write to mmMC_PMG_CMD_MRS1/_LP[15:0].
2405 * 3. need to set these data for each clock range
2406 *
2407 * @param hwmgr the address of the powerplay hardware manager.
2408 * @param table the address of MCRegTable
2409 * @return always 0
2410 */
2411static int iceland_set_mc_special_registers(struct pp_hwmgr *hwmgr,
2412 struct iceland_mc_reg_table *table)
2413{
2414 uint8_t i, j, k;
2415 uint32_t temp_reg;
2416 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2417
2418 for (i = 0, j = table->last; i < table->last; i++) {
2419 PP_ASSERT_WITH_CODE((j < SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
2420 "Invalid VramInfo table.", return -EINVAL);
2421
2422 switch (table->mc_reg_address[i].s1) {
2423
2424 case mmMC_SEQ_MISC1:
2425 temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_EMRS);
2426 table->mc_reg_address[j].s1 = mmMC_PMG_CMD_EMRS;
2427 table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_EMRS_LP;
2428 for (k = 0; k < table->num_entries; k++) {
2429 table->mc_reg_table_entry[k].mc_data[j] =
2430 ((temp_reg & 0xffff0000)) |
2431 ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
2432 }
2433 j++;
2434 PP_ASSERT_WITH_CODE((j < SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
2435 "Invalid VramInfo table.", return -EINVAL);
2436
2437 temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS);
2438 table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS;
2439 table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS_LP;
2440 for (k = 0; k < table->num_entries; k++) {
2441 table->mc_reg_table_entry[k].mc_data[j] =
2442 (temp_reg & 0xffff0000) |
2443 (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
2444
2445 if (!data->is_memory_gddr5) {
2446 table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
2447 }
2448 }
2449 j++;
2450 PP_ASSERT_WITH_CODE((j <= SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
2451 "Invalid VramInfo table.", return -EINVAL);
2452
e0705324 2453 if (!data->is_memory_gddr5 && j < SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE) {
18aafc59
RZ
2454 table->mc_reg_address[j].s1 = mmMC_PMG_AUTO_CMD;
2455 table->mc_reg_address[j].s0 = mmMC_PMG_AUTO_CMD;
2456 for (k = 0; k < table->num_entries; k++) {
2457 table->mc_reg_table_entry[k].mc_data[j] =
2458 (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
2459 }
2460 j++;
2461 PP_ASSERT_WITH_CODE((j <= SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
2462 "Invalid VramInfo table.", return -EINVAL);
2463 }
2464
2465 break;
2466
2467 case mmMC_SEQ_RESERVE_M:
2468 temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1);
2469 table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS1;
2470 table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS1_LP;
2471 for (k = 0; k < table->num_entries; k++) {
2472 table->mc_reg_table_entry[k].mc_data[j] =
2473 (temp_reg & 0xffff0000) |
2474 (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
2475 }
2476 j++;
2477 PP_ASSERT_WITH_CODE((j <= SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
2478 "Invalid VramInfo table.", return -EINVAL);
2479 break;
2480
2481 default:
2482 break;
2483 }
2484
2485 }
2486
2487 table->last = j;
2488
2489 return 0;
2490}
2491
2492static int iceland_set_valid_flag(struct iceland_mc_reg_table *table)
2493{
2494 uint8_t i, j;
2495 for (i = 0; i < table->last; i++) {
2496 for (j = 1; j < table->num_entries; j++) {
2497 if (table->mc_reg_table_entry[j-1].mc_data[i] !=
2498 table->mc_reg_table_entry[j].mc_data[i]) {
2499 table->validflag |= (1<<i);
2500 break;
2501 }
2502 }
2503 }
2504
2505 return 0;
2506}
2507
2508int iceland_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
2509{
2510 int result;
b3b03052 2511 struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
18aafc59
RZ
2512 pp_atomctrl_mc_reg_table *table;
2513 struct iceland_mc_reg_table *ni_table = &smu_data->mc_reg_table;
2514 uint8_t module_index = iceland_get_memory_modile_index(hwmgr);
2515
2516 table = kzalloc(sizeof(pp_atomctrl_mc_reg_table), GFP_KERNEL);
2517
2518 if (NULL == table)
2519 return -ENOMEM;
2520
2521 /* Program additional LP registers that are no longer programmed by VBIOS */
2522 cgs_write_register(hwmgr->device, mmMC_SEQ_RAS_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RAS_TIMING));
2523 cgs_write_register(hwmgr->device, mmMC_SEQ_CAS_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_CAS_TIMING));
2524 cgs_write_register(hwmgr->device, mmMC_SEQ_DLL_STBY_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_DLL_STBY));
2525 cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0));
2526 cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1));
2527 cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL));
2528 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD));
2529 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL));
2530 cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING));
2531 cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2));
2532 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_EMRS_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_EMRS));
2533 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS));
2534 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS1_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1));
2535 cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0));
2536 cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1));
2537 cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0));
2538 cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1));
2539 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_TIMING));
2540 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS2_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS2));
2541 cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_2_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_2));
2542
2543 memset(table, 0x00, sizeof(pp_atomctrl_mc_reg_table));
2544
2545 result = atomctrl_initialize_mc_reg_table(hwmgr, module_index, table);
2546
2547 if (0 == result)
2548 result = iceland_copy_vbios_smc_reg_table(table, ni_table);
2549
2550 if (0 == result) {
2551 iceland_set_s0_mc_reg_index(ni_table);
2552 result = iceland_set_mc_special_registers(hwmgr, ni_table);
2553 }
2554
2555 if (0 == result)
2556 iceland_set_valid_flag(ni_table);
2557
2558 kfree(table);
2559
2560 return result;
2561}
2562
2563bool iceland_is_dpm_running(struct pp_hwmgr *hwmgr)
2564{
2565 return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device,
2566 CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON))
2567 ? true : false;
2568}