]>
Commit | Line | Data |
---|---|---|
599a7e9f RZ |
1 | /* |
2 | * Copyright 2015 Advanced Micro Devices, Inc. | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice shall be included in | |
12 | * all copies or substantial portions of the Software. | |
13 | * | |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
20 | * OTHER DEALINGS IN THE SOFTWARE. | |
21 | * | |
22 | */ | |
7bd55429 | 23 | #include "pp_debug.h" |
599a7e9f RZ |
24 | #include <linux/module.h> |
25 | #include <linux/slab.h> | |
26 | #include <linux/fb.h> | |
27 | #include <asm/div64.h> | |
28 | #include "linux/delay.h" | |
29 | #include "pp_acpi.h" | |
599a7e9f RZ |
30 | #include "ppatomctrl.h" |
31 | #include "atombios.h" | |
32 | #include "pptable_v1_0.h" | |
33 | #include "pppcielanes.h" | |
34 | #include "amd_pcie_helpers.h" | |
35 | #include "hardwaremanager.h" | |
36 | #include "process_pptables_v1_0.h" | |
37 | #include "cgs_common.h" | |
38 | ||
39 | #include "smu7_common.h" | |
40 | ||
41 | #include "hwmgr.h" | |
42 | #include "smu7_hwmgr.h" | |
e81f7494 HR |
43 | #include "smu7_smumgr.h" |
44 | #include "smu_ucode_xfer_vi.h" | |
599a7e9f RZ |
45 | #include "smu7_powertune.h" |
46 | #include "smu7_dyn_defaults.h" | |
47 | #include "smu7_thermal.h" | |
48 | #include "smu7_clockpowergating.h" | |
49 | #include "processpptables.h" | |
50 | ||
51 | #define MC_CG_ARB_FREQ_F0 0x0a | |
52 | #define MC_CG_ARB_FREQ_F1 0x0b | |
53 | #define MC_CG_ARB_FREQ_F2 0x0c | |
54 | #define MC_CG_ARB_FREQ_F3 0x0d | |
55 | ||
56 | #define MC_CG_SEQ_DRAMCONF_S0 0x05 | |
57 | #define MC_CG_SEQ_DRAMCONF_S1 0x06 | |
58 | #define MC_CG_SEQ_YCLK_SUSPEND 0x04 | |
59 | #define MC_CG_SEQ_YCLK_RESUME 0x0a | |
60 | ||
61 | #define SMC_CG_IND_START 0xc0030000 | |
62 | #define SMC_CG_IND_END 0xc0040000 | |
63 | ||
64 | #define VOLTAGE_SCALE 4 | |
65 | #define VOLTAGE_VID_OFFSET_SCALE1 625 | |
66 | #define VOLTAGE_VID_OFFSET_SCALE2 100 | |
67 | ||
68 | #define MEM_FREQ_LOW_LATENCY 25000 | |
69 | #define MEM_FREQ_HIGH_LATENCY 80000 | |
70 | ||
71 | #define MEM_LATENCY_HIGH 45 | |
72 | #define MEM_LATENCY_LOW 35 | |
73 | #define MEM_LATENCY_ERR 0xFFFF | |
74 | ||
75 | #define MC_SEQ_MISC0_GDDR5_SHIFT 28 | |
76 | #define MC_SEQ_MISC0_GDDR5_MASK 0xf0000000 | |
77 | #define MC_SEQ_MISC0_GDDR5_VALUE 5 | |
78 | ||
79 | #define PCIE_BUS_CLK 10000 | |
80 | #define TCLK (PCIE_BUS_CLK / 10) | |
81 | ||
82 | ||
83 | /** Values for the CG_THERMAL_CTRL::DPM_EVENT_SRC field. */ | |
84 | enum DPM_EVENT_SRC { | |
85 | DPM_EVENT_SRC_ANALOG = 0, | |
86 | DPM_EVENT_SRC_EXTERNAL = 1, | |
87 | DPM_EVENT_SRC_DIGITAL = 2, | |
88 | DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3, | |
89 | DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL = 4 | |
90 | }; | |
91 | ||
35011d39 | 92 | static int smu7_avfs_control(struct pp_hwmgr *hwmgr, bool enable); |
599a7e9f | 93 | static const unsigned long PhwVIslands_Magic = (unsigned long)(PHM_VIslands_Magic); |
570272d2 RZ |
94 | static int smu7_force_clock_level(struct pp_hwmgr *hwmgr, |
95 | enum pp_clock_type type, uint32_t mask); | |
599a7e9f | 96 | |
f8a4c11b | 97 | static struct smu7_power_state *cast_phw_smu7_power_state( |
599a7e9f RZ |
98 | struct pp_hw_power_state *hw_ps) |
99 | { | |
100 | PP_ASSERT_WITH_CODE((PhwVIslands_Magic == hw_ps->magic), | |
101 | "Invalid Powerstate Type!", | |
102 | return NULL); | |
103 | ||
104 | return (struct smu7_power_state *)hw_ps; | |
105 | } | |
106 | ||
f8a4c11b | 107 | static const struct smu7_power_state *cast_const_phw_smu7_power_state( |
599a7e9f RZ |
108 | const struct pp_hw_power_state *hw_ps) |
109 | { | |
110 | PP_ASSERT_WITH_CODE((PhwVIslands_Magic == hw_ps->magic), | |
111 | "Invalid Powerstate Type!", | |
112 | return NULL); | |
113 | ||
114 | return (const struct smu7_power_state *)hw_ps; | |
115 | } | |
116 | ||
117 | /** | |
118 | * Find the MC microcode version and store it in the HwMgr struct | |
119 | * | |
120 | * @param hwmgr the address of the powerplay hardware manager. | |
121 | * @return always 0 | |
122 | */ | |
f8a4c11b | 123 | static int smu7_get_mc_microcode_version(struct pp_hwmgr *hwmgr) |
599a7e9f RZ |
124 | { |
125 | cgs_write_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_INDEX, 0x9F); | |
126 | ||
127 | hwmgr->microcode_version_info.MC = cgs_read_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_DATA); | |
128 | ||
129 | return 0; | |
130 | } | |
131 | ||
f8a4c11b | 132 | static uint16_t smu7_get_current_pcie_speed(struct pp_hwmgr *hwmgr) |
599a7e9f RZ |
133 | { |
134 | uint32_t speedCntl = 0; | |
135 | ||
136 | /* mmPCIE_PORT_INDEX rename as mmPCIE_INDEX */ | |
137 | speedCntl = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__PCIE, | |
138 | ixPCIE_LC_SPEED_CNTL); | |
139 | return((uint16_t)PHM_GET_FIELD(speedCntl, | |
140 | PCIE_LC_SPEED_CNTL, LC_CURRENT_DATA_RATE)); | |
141 | } | |
142 | ||
f8a4c11b | 143 | static int smu7_get_current_pcie_lane_number(struct pp_hwmgr *hwmgr) |
599a7e9f RZ |
144 | { |
145 | uint32_t link_width; | |
146 | ||
147 | /* mmPCIE_PORT_INDEX rename as mmPCIE_INDEX */ | |
148 | link_width = PHM_READ_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE, | |
149 | PCIE_LC_LINK_WIDTH_CNTL, LC_LINK_WIDTH_RD); | |
150 | ||
151 | PP_ASSERT_WITH_CODE((7 >= link_width), | |
152 | "Invalid PCIe lane width!", return 0); | |
153 | ||
154 | return decode_pcie_lane_width(link_width); | |
155 | } | |
156 | ||
157 | /** | |
158 | * Enable voltage control | |
159 | * | |
160 | * @param pHwMgr the address of the powerplay hardware manager. | |
161 | * @return always PP_Result_OK | |
162 | */ | |
f8a4c11b | 163 | static int smu7_enable_smc_voltage_controller(struct pp_hwmgr *hwmgr) |
599a7e9f RZ |
164 | { |
165 | if (hwmgr->feature_mask & PP_SMC_VOLTAGE_CONTROL_MASK) | |
166 | smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Voltage_Cntl_Enable); | |
167 | ||
168 | return 0; | |
169 | } | |
170 | ||
171 | /** | |
172 | * Checks if we want to support voltage control | |
173 | * | |
174 | * @param hwmgr the address of the powerplay hardware manager. | |
175 | */ | |
176 | static bool smu7_voltage_control(const struct pp_hwmgr *hwmgr) | |
177 | { | |
178 | const struct smu7_hwmgr *data = | |
179 | (const struct smu7_hwmgr *)(hwmgr->backend); | |
180 | ||
181 | return (SMU7_VOLTAGE_CONTROL_NONE != data->voltage_control); | |
182 | } | |
183 | ||
184 | /** | |
185 | * Enable voltage control | |
186 | * | |
187 | * @param hwmgr the address of the powerplay hardware manager. | |
188 | * @return always 0 | |
189 | */ | |
190 | static int smu7_enable_voltage_control(struct pp_hwmgr *hwmgr) | |
191 | { | |
192 | /* enable voltage control */ | |
193 | PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, | |
194 | GENERAL_PWRMGT, VOLT_PWRMGT_EN, 1); | |
195 | ||
196 | return 0; | |
197 | } | |
198 | ||
199 | static int phm_get_svi2_voltage_table_v0(pp_atomctrl_voltage_table *voltage_table, | |
200 | struct phm_clock_voltage_dependency_table *voltage_dependency_table | |
201 | ) | |
202 | { | |
203 | uint32_t i; | |
204 | ||
205 | PP_ASSERT_WITH_CODE((NULL != voltage_table), | |
206 | "Voltage Dependency Table empty.", return -EINVAL;); | |
207 | ||
208 | voltage_table->mask_low = 0; | |
209 | voltage_table->phase_delay = 0; | |
210 | voltage_table->count = voltage_dependency_table->count; | |
211 | ||
212 | for (i = 0; i < voltage_dependency_table->count; i++) { | |
213 | voltage_table->entries[i].value = | |
214 | voltage_dependency_table->entries[i].v; | |
215 | voltage_table->entries[i].smio_low = 0; | |
216 | } | |
217 | ||
218 | return 0; | |
219 | } | |
220 | ||
221 | ||
222 | /** | |
223 | * Create Voltage Tables. | |
224 | * | |
225 | * @param hwmgr the address of the powerplay hardware manager. | |
226 | * @return always 0 | |
227 | */ | |
228 | static int smu7_construct_voltage_tables(struct pp_hwmgr *hwmgr) | |
229 | { | |
230 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
231 | struct phm_ppt_v1_information *table_info = | |
232 | (struct phm_ppt_v1_information *)hwmgr->pptable; | |
233 | int result = 0; | |
234 | uint32_t tmp; | |
235 | ||
236 | if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) { | |
237 | result = atomctrl_get_voltage_table_v3(hwmgr, | |
238 | VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT, | |
239 | &(data->mvdd_voltage_table)); | |
240 | PP_ASSERT_WITH_CODE((0 == result), | |
241 | "Failed to retrieve MVDD table.", | |
242 | return result); | |
243 | } else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) { | |
244 | if (hwmgr->pp_table_version == PP_TABLE_V1) | |
245 | result = phm_get_svi2_mvdd_voltage_table(&(data->mvdd_voltage_table), | |
246 | table_info->vdd_dep_on_mclk); | |
247 | else if (hwmgr->pp_table_version == PP_TABLE_V0) | |
248 | result = phm_get_svi2_voltage_table_v0(&(data->mvdd_voltage_table), | |
249 | hwmgr->dyn_state.mvdd_dependency_on_mclk); | |
250 | ||
251 | PP_ASSERT_WITH_CODE((0 == result), | |
252 | "Failed to retrieve SVI2 MVDD table from dependancy table.", | |
253 | return result;); | |
254 | } | |
255 | ||
256 | if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) { | |
257 | result = atomctrl_get_voltage_table_v3(hwmgr, | |
258 | VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT, | |
259 | &(data->vddci_voltage_table)); | |
260 | PP_ASSERT_WITH_CODE((0 == result), | |
261 | "Failed to retrieve VDDCI table.", | |
262 | return result); | |
263 | } else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) { | |
264 | if (hwmgr->pp_table_version == PP_TABLE_V1) | |
265 | result = phm_get_svi2_vddci_voltage_table(&(data->vddci_voltage_table), | |
266 | table_info->vdd_dep_on_mclk); | |
267 | else if (hwmgr->pp_table_version == PP_TABLE_V0) | |
268 | result = phm_get_svi2_voltage_table_v0(&(data->vddci_voltage_table), | |
269 | hwmgr->dyn_state.vddci_dependency_on_mclk); | |
270 | PP_ASSERT_WITH_CODE((0 == result), | |
271 | "Failed to retrieve SVI2 VDDCI table from dependancy table.", | |
272 | return result); | |
273 | } | |
274 | ||
275 | if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_gfx_control) { | |
276 | /* VDDGFX has only SVI2 voltage control */ | |
277 | result = phm_get_svi2_vdd_voltage_table(&(data->vddgfx_voltage_table), | |
278 | table_info->vddgfx_lookup_table); | |
279 | PP_ASSERT_WITH_CODE((0 == result), | |
280 | "Failed to retrieve SVI2 VDDGFX table from lookup table.", return result;); | |
281 | } | |
282 | ||
283 | ||
284 | if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->voltage_control) { | |
285 | result = atomctrl_get_voltage_table_v3(hwmgr, | |
286 | VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT, | |
287 | &data->vddc_voltage_table); | |
288 | PP_ASSERT_WITH_CODE((0 == result), | |
289 | "Failed to retrieve VDDC table.", return result;); | |
290 | } else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) { | |
291 | ||
292 | if (hwmgr->pp_table_version == PP_TABLE_V0) | |
293 | result = phm_get_svi2_voltage_table_v0(&data->vddc_voltage_table, | |
294 | hwmgr->dyn_state.vddc_dependency_on_mclk); | |
295 | else if (hwmgr->pp_table_version == PP_TABLE_V1) | |
296 | result = phm_get_svi2_vdd_voltage_table(&(data->vddc_voltage_table), | |
297 | table_info->vddc_lookup_table); | |
298 | ||
299 | PP_ASSERT_WITH_CODE((0 == result), | |
300 | "Failed to retrieve SVI2 VDDC table from dependancy table.", return result;); | |
301 | } | |
302 | ||
303 | tmp = smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_VDDC); | |
304 | PP_ASSERT_WITH_CODE( | |
305 | (data->vddc_voltage_table.count <= tmp), | |
306 | "Too many voltage values for VDDC. Trimming to fit state table.", | |
307 | phm_trim_voltage_table_to_fit_state_table(tmp, | |
308 | &(data->vddc_voltage_table))); | |
309 | ||
310 | tmp = smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_VDDGFX); | |
311 | PP_ASSERT_WITH_CODE( | |
312 | (data->vddgfx_voltage_table.count <= tmp), | |
313 | "Too many voltage values for VDDC. Trimming to fit state table.", | |
314 | phm_trim_voltage_table_to_fit_state_table(tmp, | |
315 | &(data->vddgfx_voltage_table))); | |
316 | ||
317 | tmp = smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_VDDCI); | |
318 | PP_ASSERT_WITH_CODE( | |
319 | (data->vddci_voltage_table.count <= tmp), | |
320 | "Too many voltage values for VDDCI. Trimming to fit state table.", | |
321 | phm_trim_voltage_table_to_fit_state_table(tmp, | |
322 | &(data->vddci_voltage_table))); | |
323 | ||
324 | tmp = smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_MVDD); | |
325 | PP_ASSERT_WITH_CODE( | |
326 | (data->mvdd_voltage_table.count <= tmp), | |
327 | "Too many voltage values for MVDD. Trimming to fit state table.", | |
328 | phm_trim_voltage_table_to_fit_state_table(tmp, | |
329 | &(data->mvdd_voltage_table))); | |
330 | ||
331 | return 0; | |
332 | } | |
333 | ||
334 | /** | |
335 | * Programs static screed detection parameters | |
336 | * | |
337 | * @param hwmgr the address of the powerplay hardware manager. | |
338 | * @return always 0 | |
339 | */ | |
340 | static int smu7_program_static_screen_threshold_parameters( | |
341 | struct pp_hwmgr *hwmgr) | |
342 | { | |
343 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
344 | ||
345 | /* Set static screen threshold unit */ | |
346 | PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, | |
347 | CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD_UNIT, | |
348 | data->static_screen_threshold_unit); | |
349 | /* Set static screen threshold */ | |
350 | PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, | |
351 | CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD, | |
352 | data->static_screen_threshold); | |
353 | ||
354 | return 0; | |
355 | } | |
356 | ||
357 | /** | |
358 | * Setup display gap for glitch free memory clock switching. | |
359 | * | |
360 | * @param hwmgr the address of the powerplay hardware manager. | |
361 | * @return always 0 | |
362 | */ | |
363 | static int smu7_enable_display_gap(struct pp_hwmgr *hwmgr) | |
364 | { | |
365 | uint32_t display_gap = | |
366 | cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, | |
367 | ixCG_DISPLAY_GAP_CNTL); | |
368 | ||
369 | display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL, | |
370 | DISP_GAP, DISPLAY_GAP_IGNORE); | |
371 | ||
372 | display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL, | |
373 | DISP_GAP_MCHG, DISPLAY_GAP_VBLANK); | |
374 | ||
375 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, | |
376 | ixCG_DISPLAY_GAP_CNTL, display_gap); | |
377 | ||
378 | return 0; | |
379 | } | |
380 | ||
381 | /** | |
382 | * Programs activity state transition voting clients | |
383 | * | |
384 | * @param hwmgr the address of the powerplay hardware manager. | |
385 | * @return always 0 | |
386 | */ | |
387 | static int smu7_program_voting_clients(struct pp_hwmgr *hwmgr) | |
388 | { | |
389 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
390 | ||
391 | /* Clear reset for voting clients before enabling DPM */ | |
392 | PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, | |
393 | SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 0); | |
394 | PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, | |
395 | SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 0); | |
396 | ||
397 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, | |
398 | ixCG_FREQ_TRAN_VOTING_0, data->voting_rights_clients0); | |
399 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, | |
400 | ixCG_FREQ_TRAN_VOTING_1, data->voting_rights_clients1); | |
401 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, | |
402 | ixCG_FREQ_TRAN_VOTING_2, data->voting_rights_clients2); | |
403 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, | |
404 | ixCG_FREQ_TRAN_VOTING_3, data->voting_rights_clients3); | |
405 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, | |
406 | ixCG_FREQ_TRAN_VOTING_4, data->voting_rights_clients4); | |
407 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, | |
408 | ixCG_FREQ_TRAN_VOTING_5, data->voting_rights_clients5); | |
409 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, | |
410 | ixCG_FREQ_TRAN_VOTING_6, data->voting_rights_clients6); | |
411 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, | |
412 | ixCG_FREQ_TRAN_VOTING_7, data->voting_rights_clients7); | |
413 | ||
414 | return 0; | |
415 | } | |
416 | ||
417 | static int smu7_clear_voting_clients(struct pp_hwmgr *hwmgr) | |
418 | { | |
419 | /* Reset voting clients before disabling DPM */ | |
420 | PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, | |
421 | SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 1); | |
422 | PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, | |
423 | SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 1); | |
424 | ||
425 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, | |
426 | ixCG_FREQ_TRAN_VOTING_0, 0); | |
427 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, | |
428 | ixCG_FREQ_TRAN_VOTING_1, 0); | |
429 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, | |
430 | ixCG_FREQ_TRAN_VOTING_2, 0); | |
431 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, | |
432 | ixCG_FREQ_TRAN_VOTING_3, 0); | |
433 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, | |
434 | ixCG_FREQ_TRAN_VOTING_4, 0); | |
435 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, | |
436 | ixCG_FREQ_TRAN_VOTING_5, 0); | |
437 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, | |
438 | ixCG_FREQ_TRAN_VOTING_6, 0); | |
439 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, | |
440 | ixCG_FREQ_TRAN_VOTING_7, 0); | |
441 | ||
442 | return 0; | |
443 | } | |
444 | ||
445 | /* Copy one arb setting to another and then switch the active set. | |
446 | * arb_src and arb_dest is one of the MC_CG_ARB_FREQ_Fx constants. | |
447 | */ | |
448 | static int smu7_copy_and_switch_arb_sets(struct pp_hwmgr *hwmgr, | |
449 | uint32_t arb_src, uint32_t arb_dest) | |
450 | { | |
451 | uint32_t mc_arb_dram_timing; | |
452 | uint32_t mc_arb_dram_timing2; | |
453 | uint32_t burst_time; | |
454 | uint32_t mc_cg_config; | |
455 | ||
456 | switch (arb_src) { | |
457 | case MC_CG_ARB_FREQ_F0: | |
458 | mc_arb_dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING); | |
459 | mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2); | |
460 | burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0); | |
461 | break; | |
462 | case MC_CG_ARB_FREQ_F1: | |
463 | mc_arb_dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1); | |
464 | mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1); | |
465 | burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1); | |
466 | break; | |
467 | default: | |
468 | return -EINVAL; | |
469 | } | |
470 | ||
471 | switch (arb_dest) { | |
472 | case MC_CG_ARB_FREQ_F0: | |
473 | cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING, mc_arb_dram_timing); | |
474 | cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2, mc_arb_dram_timing2); | |
475 | PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0, burst_time); | |
476 | break; | |
477 | case MC_CG_ARB_FREQ_F1: | |
478 | cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1, mc_arb_dram_timing); | |
479 | cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2); | |
480 | PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1, burst_time); | |
481 | break; | |
482 | default: | |
483 | return -EINVAL; | |
484 | } | |
485 | ||
486 | mc_cg_config = cgs_read_register(hwmgr->device, mmMC_CG_CONFIG); | |
487 | mc_cg_config |= 0x0000000F; | |
488 | cgs_write_register(hwmgr->device, mmMC_CG_CONFIG, mc_cg_config); | |
489 | PHM_WRITE_FIELD(hwmgr->device, MC_ARB_CG, CG_ARB_REQ, arb_dest); | |
490 | ||
491 | return 0; | |
492 | } | |
493 | ||
494 | static int smu7_reset_to_default(struct pp_hwmgr *hwmgr) | |
495 | { | |
496 | return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_ResetToDefaults); | |
497 | } | |
498 | ||
499 | /** | |
500 | * Initial switch from ARB F0->F1 | |
501 | * | |
502 | * @param hwmgr the address of the powerplay hardware manager. | |
503 | * @return always 0 | |
504 | * This function is to be called from the SetPowerState table. | |
505 | */ | |
506 | static int smu7_initial_switch_from_arbf0_to_f1(struct pp_hwmgr *hwmgr) | |
507 | { | |
508 | return smu7_copy_and_switch_arb_sets(hwmgr, | |
509 | MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1); | |
510 | } | |
511 | ||
512 | static int smu7_force_switch_to_arbf0(struct pp_hwmgr *hwmgr) | |
513 | { | |
514 | uint32_t tmp; | |
515 | ||
516 | tmp = (cgs_read_ind_register(hwmgr->device, | |
517 | CGS_IND_REG__SMC, ixSMC_SCRATCH9) & | |
518 | 0x0000ff00) >> 8; | |
519 | ||
520 | if (tmp == MC_CG_ARB_FREQ_F0) | |
521 | return 0; | |
522 | ||
523 | return smu7_copy_and_switch_arb_sets(hwmgr, | |
524 | tmp, MC_CG_ARB_FREQ_F0); | |
525 | } | |
526 | ||
527 | static int smu7_setup_default_pcie_table(struct pp_hwmgr *hwmgr) | |
528 | { | |
529 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
530 | ||
531 | struct phm_ppt_v1_information *table_info = | |
532 | (struct phm_ppt_v1_information *)(hwmgr->pptable); | |
533 | struct phm_ppt_v1_pcie_table *pcie_table = NULL; | |
534 | ||
535 | uint32_t i, max_entry; | |
536 | uint32_t tmp; | |
537 | ||
538 | PP_ASSERT_WITH_CODE((data->use_pcie_performance_levels || | |
539 | data->use_pcie_power_saving_levels), "No pcie performance levels!", | |
540 | return -EINVAL); | |
541 | ||
542 | if (table_info != NULL) | |
543 | pcie_table = table_info->pcie_table; | |
544 | ||
545 | if (data->use_pcie_performance_levels && | |
546 | !data->use_pcie_power_saving_levels) { | |
547 | data->pcie_gen_power_saving = data->pcie_gen_performance; | |
548 | data->pcie_lane_power_saving = data->pcie_lane_performance; | |
549 | } else if (!data->use_pcie_performance_levels && | |
550 | data->use_pcie_power_saving_levels) { | |
551 | data->pcie_gen_performance = data->pcie_gen_power_saving; | |
552 | data->pcie_lane_performance = data->pcie_lane_power_saving; | |
553 | } | |
554 | tmp = smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_LINK); | |
555 | phm_reset_single_dpm_table(&data->dpm_table.pcie_speed_table, | |
556 | tmp, | |
557 | MAX_REGULAR_DPM_NUMBER); | |
558 | ||
559 | if (pcie_table != NULL) { | |
560 | /* max_entry is used to make sure we reserve one PCIE level | |
561 | * for boot level (fix for A+A PSPP issue). | |
562 | * If PCIE table from PPTable have ULV entry + 8 entries, | |
563 | * then ignore the last entry.*/ | |
564 | max_entry = (tmp < pcie_table->count) ? tmp : pcie_table->count; | |
565 | for (i = 1; i < max_entry; i++) { | |
566 | phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, i - 1, | |
567 | get_pcie_gen_support(data->pcie_gen_cap, | |
568 | pcie_table->entries[i].gen_speed), | |
569 | get_pcie_lane_support(data->pcie_lane_cap, | |
570 | pcie_table->entries[i].lane_width)); | |
571 | } | |
572 | data->dpm_table.pcie_speed_table.count = max_entry - 1; | |
573 | smum_update_smc_table(hwmgr, SMU_BIF_TABLE); | |
574 | } else { | |
575 | /* Hardcode Pcie Table */ | |
576 | phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 0, | |
577 | get_pcie_gen_support(data->pcie_gen_cap, | |
578 | PP_Min_PCIEGen), | |
579 | get_pcie_lane_support(data->pcie_lane_cap, | |
580 | PP_Max_PCIELane)); | |
581 | phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 1, | |
582 | get_pcie_gen_support(data->pcie_gen_cap, | |
583 | PP_Min_PCIEGen), | |
584 | get_pcie_lane_support(data->pcie_lane_cap, | |
585 | PP_Max_PCIELane)); | |
586 | phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 2, | |
587 | get_pcie_gen_support(data->pcie_gen_cap, | |
588 | PP_Max_PCIEGen), | |
589 | get_pcie_lane_support(data->pcie_lane_cap, | |
590 | PP_Max_PCIELane)); | |
591 | phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 3, | |
592 | get_pcie_gen_support(data->pcie_gen_cap, | |
593 | PP_Max_PCIEGen), | |
594 | get_pcie_lane_support(data->pcie_lane_cap, | |
595 | PP_Max_PCIELane)); | |
596 | phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 4, | |
597 | get_pcie_gen_support(data->pcie_gen_cap, | |
598 | PP_Max_PCIEGen), | |
599 | get_pcie_lane_support(data->pcie_lane_cap, | |
600 | PP_Max_PCIELane)); | |
601 | phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 5, | |
602 | get_pcie_gen_support(data->pcie_gen_cap, | |
603 | PP_Max_PCIEGen), | |
604 | get_pcie_lane_support(data->pcie_lane_cap, | |
605 | PP_Max_PCIELane)); | |
606 | ||
607 | data->dpm_table.pcie_speed_table.count = 6; | |
608 | } | |
609 | /* Populate last level for boot PCIE level, but do not increment count. */ | |
610 | phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, | |
611 | data->dpm_table.pcie_speed_table.count, | |
612 | get_pcie_gen_support(data->pcie_gen_cap, | |
613 | PP_Min_PCIEGen), | |
614 | get_pcie_lane_support(data->pcie_lane_cap, | |
615 | PP_Max_PCIELane)); | |
616 | ||
617 | return 0; | |
618 | } | |
619 | ||
620 | static int smu7_reset_dpm_tables(struct pp_hwmgr *hwmgr) | |
621 | { | |
622 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
623 | ||
624 | memset(&(data->dpm_table), 0x00, sizeof(data->dpm_table)); | |
625 | ||
626 | phm_reset_single_dpm_table( | |
627 | &data->dpm_table.sclk_table, | |
628 | smum_get_mac_definition(hwmgr->smumgr, | |
629 | SMU_MAX_LEVELS_GRAPHICS), | |
630 | MAX_REGULAR_DPM_NUMBER); | |
631 | phm_reset_single_dpm_table( | |
632 | &data->dpm_table.mclk_table, | |
633 | smum_get_mac_definition(hwmgr->smumgr, | |
634 | SMU_MAX_LEVELS_MEMORY), MAX_REGULAR_DPM_NUMBER); | |
635 | ||
636 | phm_reset_single_dpm_table( | |
637 | &data->dpm_table.vddc_table, | |
638 | smum_get_mac_definition(hwmgr->smumgr, | |
639 | SMU_MAX_LEVELS_VDDC), | |
640 | MAX_REGULAR_DPM_NUMBER); | |
641 | phm_reset_single_dpm_table( | |
642 | &data->dpm_table.vddci_table, | |
643 | smum_get_mac_definition(hwmgr->smumgr, | |
644 | SMU_MAX_LEVELS_VDDCI), MAX_REGULAR_DPM_NUMBER); | |
645 | ||
646 | phm_reset_single_dpm_table( | |
647 | &data->dpm_table.mvdd_table, | |
648 | smum_get_mac_definition(hwmgr->smumgr, | |
649 | SMU_MAX_LEVELS_MVDD), | |
650 | MAX_REGULAR_DPM_NUMBER); | |
651 | return 0; | |
652 | } | |
653 | /* | |
654 | * This function is to initialize all DPM state tables | |
655 | * for SMU7 based on the dependency table. | |
656 | * Dynamic state patching function will then trim these | |
657 | * state tables to the allowed range based | |
658 | * on the power policy or external client requests, | |
659 | * such as UVD request, etc. | |
660 | */ | |
661 | ||
662 | static int smu7_setup_dpm_tables_v0(struct pp_hwmgr *hwmgr) | |
663 | { | |
664 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
665 | struct phm_clock_voltage_dependency_table *allowed_vdd_sclk_table = | |
666 | hwmgr->dyn_state.vddc_dependency_on_sclk; | |
667 | struct phm_clock_voltage_dependency_table *allowed_vdd_mclk_table = | |
668 | hwmgr->dyn_state.vddc_dependency_on_mclk; | |
669 | struct phm_cac_leakage_table *std_voltage_table = | |
670 | hwmgr->dyn_state.cac_leakage_table; | |
671 | uint32_t i; | |
672 | ||
673 | PP_ASSERT_WITH_CODE(allowed_vdd_sclk_table != NULL, | |
674 | "SCLK dependency table is missing. This table is mandatory", return -EINVAL); | |
675 | PP_ASSERT_WITH_CODE(allowed_vdd_sclk_table->count >= 1, | |
676 | "SCLK dependency table has to have is missing. This table is mandatory", return -EINVAL); | |
677 | ||
678 | PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table != NULL, | |
679 | "MCLK dependency table is missing. This table is mandatory", return -EINVAL); | |
680 | PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table->count >= 1, | |
681 | "VMCLK dependency table has to have is missing. This table is mandatory", return -EINVAL); | |
682 | ||
683 | ||
684 | /* Initialize Sclk DPM table based on allow Sclk values*/ | |
685 | data->dpm_table.sclk_table.count = 0; | |
686 | ||
687 | for (i = 0; i < allowed_vdd_sclk_table->count; i++) { | |
688 | if (i == 0 || data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count-1].value != | |
689 | allowed_vdd_sclk_table->entries[i].clk) { | |
690 | data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].value = | |
691 | allowed_vdd_sclk_table->entries[i].clk; | |
692 | data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled = 1; /*(i==0) ? 1 : 0; to do */ | |
693 | data->dpm_table.sclk_table.count++; | |
694 | } | |
695 | } | |
696 | ||
697 | PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table != NULL, | |
698 | "MCLK dependency table is missing. This table is mandatory", return -EINVAL); | |
699 | /* Initialize Mclk DPM table based on allow Mclk values */ | |
700 | data->dpm_table.mclk_table.count = 0; | |
701 | for (i = 0; i < allowed_vdd_mclk_table->count; i++) { | |
702 | if (i == 0 || data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count-1].value != | |
703 | allowed_vdd_mclk_table->entries[i].clk) { | |
704 | data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].value = | |
705 | allowed_vdd_mclk_table->entries[i].clk; | |
706 | data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].enabled = 1; /*(i==0) ? 1 : 0; */ | |
707 | data->dpm_table.mclk_table.count++; | |
708 | } | |
709 | } | |
710 | ||
711 | /* Initialize Vddc DPM table based on allow Vddc values. And populate corresponding std values. */ | |
712 | for (i = 0; i < allowed_vdd_sclk_table->count; i++) { | |
713 | data->dpm_table.vddc_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v; | |
714 | data->dpm_table.vddc_table.dpm_levels[i].param1 = std_voltage_table->entries[i].Leakage; | |
715 | /* param1 is for corresponding std voltage */ | |
716 | data->dpm_table.vddc_table.dpm_levels[i].enabled = 1; | |
717 | } | |
718 | ||
719 | data->dpm_table.vddc_table.count = allowed_vdd_sclk_table->count; | |
720 | allowed_vdd_mclk_table = hwmgr->dyn_state.vddci_dependency_on_mclk; | |
721 | ||
722 | if (NULL != allowed_vdd_mclk_table) { | |
723 | /* Initialize Vddci DPM table based on allow Mclk values */ | |
724 | for (i = 0; i < allowed_vdd_mclk_table->count; i++) { | |
725 | data->dpm_table.vddci_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v; | |
726 | data->dpm_table.vddci_table.dpm_levels[i].enabled = 1; | |
727 | } | |
728 | data->dpm_table.vddci_table.count = allowed_vdd_mclk_table->count; | |
729 | } | |
730 | ||
731 | allowed_vdd_mclk_table = hwmgr->dyn_state.mvdd_dependency_on_mclk; | |
732 | ||
733 | if (NULL != allowed_vdd_mclk_table) { | |
734 | /* | |
735 | * Initialize MVDD DPM table based on allow Mclk | |
736 | * values | |
737 | */ | |
738 | for (i = 0; i < allowed_vdd_mclk_table->count; i++) { | |
739 | data->dpm_table.mvdd_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v; | |
740 | data->dpm_table.mvdd_table.dpm_levels[i].enabled = 1; | |
741 | } | |
742 | data->dpm_table.mvdd_table.count = allowed_vdd_mclk_table->count; | |
743 | } | |
744 | ||
745 | return 0; | |
746 | } | |
747 | ||
748 | static int smu7_setup_dpm_tables_v1(struct pp_hwmgr *hwmgr) | |
749 | { | |
750 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
751 | struct phm_ppt_v1_information *table_info = | |
752 | (struct phm_ppt_v1_information *)(hwmgr->pptable); | |
753 | uint32_t i; | |
754 | ||
755 | struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table; | |
756 | struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table; | |
757 | ||
758 | if (table_info == NULL) | |
759 | return -EINVAL; | |
760 | ||
761 | dep_sclk_table = table_info->vdd_dep_on_sclk; | |
762 | dep_mclk_table = table_info->vdd_dep_on_mclk; | |
763 | ||
764 | PP_ASSERT_WITH_CODE(dep_sclk_table != NULL, | |
765 | "SCLK dependency table is missing.", | |
766 | return -EINVAL); | |
767 | PP_ASSERT_WITH_CODE(dep_sclk_table->count >= 1, | |
768 | "SCLK dependency table count is 0.", | |
769 | return -EINVAL); | |
770 | ||
771 | PP_ASSERT_WITH_CODE(dep_mclk_table != NULL, | |
772 | "MCLK dependency table is missing.", | |
773 | return -EINVAL); | |
774 | PP_ASSERT_WITH_CODE(dep_mclk_table->count >= 1, | |
775 | "MCLK dependency table count is 0", | |
776 | return -EINVAL); | |
777 | ||
778 | /* Initialize Sclk DPM table based on allow Sclk values */ | |
779 | data->dpm_table.sclk_table.count = 0; | |
780 | for (i = 0; i < dep_sclk_table->count; i++) { | |
781 | if (i == 0 || data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count - 1].value != | |
782 | dep_sclk_table->entries[i].clk) { | |
783 | ||
784 | data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].value = | |
785 | dep_sclk_table->entries[i].clk; | |
786 | ||
787 | data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled = | |
788 | (i == 0) ? true : false; | |
789 | data->dpm_table.sclk_table.count++; | |
790 | } | |
791 | } | |
792 | ||
793 | /* Initialize Mclk DPM table based on allow Mclk values */ | |
794 | data->dpm_table.mclk_table.count = 0; | |
795 | for (i = 0; i < dep_mclk_table->count; i++) { | |
796 | if (i == 0 || data->dpm_table.mclk_table.dpm_levels | |
797 | [data->dpm_table.mclk_table.count - 1].value != | |
798 | dep_mclk_table->entries[i].clk) { | |
799 | data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].value = | |
800 | dep_mclk_table->entries[i].clk; | |
801 | data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].enabled = | |
802 | (i == 0) ? true : false; | |
803 | data->dpm_table.mclk_table.count++; | |
804 | } | |
805 | } | |
806 | ||
807 | return 0; | |
808 | } | |
809 | ||
f8a4c11b | 810 | static int smu7_setup_default_dpm_tables(struct pp_hwmgr *hwmgr) |
599a7e9f RZ |
811 | { |
812 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
813 | ||
814 | smu7_reset_dpm_tables(hwmgr); | |
815 | ||
816 | if (hwmgr->pp_table_version == PP_TABLE_V1) | |
817 | smu7_setup_dpm_tables_v1(hwmgr); | |
818 | else if (hwmgr->pp_table_version == PP_TABLE_V0) | |
819 | smu7_setup_dpm_tables_v0(hwmgr); | |
820 | ||
821 | smu7_setup_default_pcie_table(hwmgr); | |
822 | ||
823 | /* save a copy of the default DPM table */ | |
824 | memcpy(&(data->golden_dpm_table), &(data->dpm_table), | |
825 | sizeof(struct smu7_dpm_table)); | |
826 | return 0; | |
827 | } | |
828 | ||
829 | uint32_t smu7_get_xclk(struct pp_hwmgr *hwmgr) | |
830 | { | |
831 | uint32_t reference_clock, tmp; | |
832 | struct cgs_display_info info = {0}; | |
833 | struct cgs_mode_info mode_info; | |
834 | ||
835 | info.mode_info = &mode_info; | |
836 | ||
837 | tmp = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_CLKPIN_CNTL_2, MUX_TCLK_TO_XCLK); | |
838 | ||
839 | if (tmp) | |
840 | return TCLK; | |
841 | ||
842 | cgs_get_active_displays_info(hwmgr->device, &info); | |
843 | reference_clock = mode_info.ref_clock; | |
844 | ||
845 | tmp = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_CLKPIN_CNTL, XTALIN_DIVIDE); | |
846 | ||
847 | if (0 != tmp) | |
848 | return reference_clock / 4; | |
849 | ||
850 | return reference_clock; | |
851 | } | |
852 | ||
853 | static int smu7_enable_vrhot_gpio_interrupt(struct pp_hwmgr *hwmgr) | |
854 | { | |
855 | ||
856 | if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, | |
857 | PHM_PlatformCaps_RegulatorHot)) | |
858 | return smum_send_msg_to_smc(hwmgr->smumgr, | |
859 | PPSMC_MSG_EnableVRHotGPIOInterrupt); | |
860 | ||
861 | return 0; | |
862 | } | |
863 | ||
864 | static int smu7_enable_sclk_control(struct pp_hwmgr *hwmgr) | |
865 | { | |
866 | PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL, | |
867 | SCLK_PWRMGT_OFF, 0); | |
868 | return 0; | |
869 | } | |
870 | ||
871 | static int smu7_enable_ulv(struct pp_hwmgr *hwmgr) | |
872 | { | |
873 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
874 | ||
875 | if (data->ulv_supported) | |
876 | return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_EnableULV); | |
877 | ||
878 | return 0; | |
879 | } | |
880 | ||
881 | static int smu7_disable_ulv(struct pp_hwmgr *hwmgr) | |
882 | { | |
883 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
884 | ||
885 | if (data->ulv_supported) | |
886 | return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_DisableULV); | |
887 | ||
888 | return 0; | |
889 | } | |
890 | ||
891 | static int smu7_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr) | |
892 | { | |
893 | if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, | |
894 | PHM_PlatformCaps_SclkDeepSleep)) { | |
895 | if (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_MASTER_DeepSleep_ON)) | |
896 | PP_ASSERT_WITH_CODE(false, | |
897 | "Attempt to enable Master Deep Sleep switch failed!", | |
898 | return -EINVAL); | |
899 | } else { | |
900 | if (smum_send_msg_to_smc(hwmgr->smumgr, | |
901 | PPSMC_MSG_MASTER_DeepSleep_OFF)) { | |
902 | PP_ASSERT_WITH_CODE(false, | |
903 | "Attempt to disable Master Deep Sleep switch failed!", | |
904 | return -EINVAL); | |
905 | } | |
906 | } | |
907 | ||
908 | return 0; | |
909 | } | |
910 | ||
911 | static int smu7_disable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr) | |
912 | { | |
913 | if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, | |
914 | PHM_PlatformCaps_SclkDeepSleep)) { | |
915 | if (smum_send_msg_to_smc(hwmgr->smumgr, | |
916 | PPSMC_MSG_MASTER_DeepSleep_OFF)) { | |
917 | PP_ASSERT_WITH_CODE(false, | |
918 | "Attempt to disable Master Deep Sleep switch failed!", | |
919 | return -EINVAL); | |
920 | } | |
921 | } | |
922 | ||
923 | return 0; | |
924 | } | |
925 | ||
926 | static int smu7_disable_handshake_uvd(struct pp_hwmgr *hwmgr) | |
927 | { | |
928 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
929 | uint32_t soft_register_value = 0; | |
930 | uint32_t handshake_disables_offset = data->soft_regs_start | |
931 | + smum_get_offsetof(hwmgr->smumgr, | |
932 | SMU_SoftRegisters, HandshakeDisables); | |
933 | ||
934 | soft_register_value = cgs_read_ind_register(hwmgr->device, | |
935 | CGS_IND_REG__SMC, handshake_disables_offset); | |
936 | soft_register_value |= smum_get_mac_definition(hwmgr->smumgr, | |
937 | SMU_UVD_MCLK_HANDSHAKE_DISABLE); | |
938 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, | |
939 | handshake_disables_offset, soft_register_value); | |
940 | return 0; | |
941 | } | |
942 | ||
943 | static int smu7_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) | |
944 | { | |
945 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
946 | ||
947 | /* enable SCLK dpm */ | |
948 | if (!data->sclk_dpm_key_disabled) | |
949 | PP_ASSERT_WITH_CODE( | |
950 | (0 == smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_DPM_Enable)), | |
951 | "Failed to enable SCLK DPM during DPM Start Function!", | |
952 | return -EINVAL); | |
953 | ||
954 | /* enable MCLK dpm */ | |
955 | if (0 == data->mclk_dpm_key_disabled) { | |
956 | if (!(hwmgr->feature_mask & PP_UVD_HANDSHAKE_MASK)) | |
957 | smu7_disable_handshake_uvd(hwmgr); | |
958 | PP_ASSERT_WITH_CODE( | |
959 | (0 == smum_send_msg_to_smc(hwmgr->smumgr, | |
960 | PPSMC_MSG_MCLKDPM_Enable)), | |
961 | "Failed to enable MCLK DPM during DPM Start Function!", | |
962 | return -EINVAL); | |
963 | ||
964 | PHM_WRITE_FIELD(hwmgr->device, MC_SEQ_CNTL_3, CAC_EN, 0x1); | |
965 | ||
966 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x5); | |
967 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x5); | |
968 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_CPL_CNTL, 0x100005); | |
969 | udelay(10); | |
970 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x400005); | |
971 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x400005); | |
972 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_CPL_CNTL, 0x500005); | |
973 | } | |
974 | ||
975 | return 0; | |
976 | } | |
977 | ||
978 | static int smu7_start_dpm(struct pp_hwmgr *hwmgr) | |
979 | { | |
980 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
981 | ||
982 | /*enable general power management */ | |
983 | ||
984 | PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, | |
985 | GLOBAL_PWRMGT_EN, 1); | |
986 | ||
987 | /* enable sclk deep sleep */ | |
988 | ||
989 | PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL, | |
990 | DYNAMIC_PM_EN, 1); | |
991 | ||
992 | /* prepare for PCIE DPM */ | |
993 | ||
994 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, | |
995 | data->soft_regs_start + | |
996 | smum_get_offsetof(hwmgr->smumgr, SMU_SoftRegisters, | |
997 | VoltageChangeTimeout), 0x1000); | |
998 | PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE, | |
999 | SWRST_COMMAND_1, RESETLC, 0x0); | |
1000 | ||
599a7e9f | 1001 | if (smu7_enable_sclk_mclk_dpm(hwmgr)) { |
b5c11b8e | 1002 | pr_err("Failed to enable Sclk DPM and Mclk DPM!"); |
599a7e9f RZ |
1003 | return -EINVAL; |
1004 | } | |
1005 | ||
1006 | /* enable PCIE dpm */ | |
1007 | if (0 == data->pcie_dpm_key_disabled) { | |
1008 | PP_ASSERT_WITH_CODE( | |
1009 | (0 == smum_send_msg_to_smc(hwmgr->smumgr, | |
1010 | PPSMC_MSG_PCIeDPM_Enable)), | |
1011 | "Failed to enable pcie DPM during DPM Start Function!", | |
1012 | return -EINVAL); | |
1013 | } | |
1014 | ||
1015 | if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, | |
1016 | PHM_PlatformCaps_Falcon_QuickTransition)) { | |
1017 | PP_ASSERT_WITH_CODE((0 == smum_send_msg_to_smc(hwmgr->smumgr, | |
1018 | PPSMC_MSG_EnableACDCGPIOInterrupt)), | |
1019 | "Failed to enable AC DC GPIO Interrupt!", | |
1020 | ); | |
1021 | } | |
1022 | ||
1023 | return 0; | |
1024 | } | |
1025 | ||
1026 | static int smu7_disable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) | |
1027 | { | |
1028 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
1029 | ||
1030 | /* disable SCLK dpm */ | |
f28a9b65 RZ |
1031 | if (!data->sclk_dpm_key_disabled) { |
1032 | PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr), | |
1033 | "Trying to disable SCLK DPM when DPM is disabled", | |
1034 | return 0); | |
1035 | smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_DPM_Disable); | |
1036 | } | |
599a7e9f RZ |
1037 | |
1038 | /* disable MCLK dpm */ | |
1039 | if (!data->mclk_dpm_key_disabled) { | |
f28a9b65 RZ |
1040 | PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr), |
1041 | "Trying to disable MCLK DPM when DPM is disabled", | |
1042 | return 0); | |
1043 | smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_MCLKDPM_Disable); | |
599a7e9f RZ |
1044 | } |
1045 | ||
1046 | return 0; | |
1047 | } | |
1048 | ||
1049 | static int smu7_stop_dpm(struct pp_hwmgr *hwmgr) | |
1050 | { | |
1051 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
1052 | ||
1053 | /* disable general power management */ | |
1054 | PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, | |
1055 | GLOBAL_PWRMGT_EN, 0); | |
1056 | /* disable sclk deep sleep */ | |
1057 | PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL, | |
1058 | DYNAMIC_PM_EN, 0); | |
1059 | ||
1060 | /* disable PCIE dpm */ | |
1061 | if (!data->pcie_dpm_key_disabled) { | |
1062 | PP_ASSERT_WITH_CODE( | |
1063 | (smum_send_msg_to_smc(hwmgr->smumgr, | |
1064 | PPSMC_MSG_PCIeDPM_Disable) == 0), | |
1065 | "Failed to disable pcie DPM during DPM Stop Function!", | |
1066 | return -EINVAL); | |
1067 | } | |
1068 | ||
f28a9b65 RZ |
1069 | smu7_disable_sclk_mclk_dpm(hwmgr); |
1070 | ||
1071 | PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr), | |
1072 | "Trying to disable voltage DPM when DPM is disabled", | |
1073 | return 0); | |
1074 | ||
1075 | smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Voltage_Cntl_Disable); | |
599a7e9f RZ |
1076 | |
1077 | return 0; | |
1078 | } | |
1079 | ||
1080 | static void smu7_set_dpm_event_sources(struct pp_hwmgr *hwmgr, uint32_t sources) | |
1081 | { | |
1082 | bool protection; | |
1083 | enum DPM_EVENT_SRC src; | |
1084 | ||
1085 | switch (sources) { | |
1086 | default: | |
b5c11b8e | 1087 | pr_err("Unknown throttling event sources."); |
599a7e9f RZ |
1088 | /* fall through */ |
1089 | case 0: | |
1090 | protection = false; | |
1091 | /* src is unused */ | |
1092 | break; | |
1093 | case (1 << PHM_AutoThrottleSource_Thermal): | |
1094 | protection = true; | |
1095 | src = DPM_EVENT_SRC_DIGITAL; | |
1096 | break; | |
1097 | case (1 << PHM_AutoThrottleSource_External): | |
1098 | protection = true; | |
1099 | src = DPM_EVENT_SRC_EXTERNAL; | |
1100 | break; | |
1101 | case (1 << PHM_AutoThrottleSource_External) | | |
1102 | (1 << PHM_AutoThrottleSource_Thermal): | |
1103 | protection = true; | |
1104 | src = DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL; | |
1105 | break; | |
1106 | } | |
1107 | /* Order matters - don't enable thermal protection for the wrong source. */ | |
1108 | if (protection) { | |
1109 | PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_CTRL, | |
1110 | DPM_EVENT_SRC, src); | |
1111 | PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, | |
1112 | THERMAL_PROTECTION_DIS, | |
1113 | !phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, | |
1114 | PHM_PlatformCaps_ThermalController)); | |
1115 | } else | |
1116 | PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, | |
1117 | THERMAL_PROTECTION_DIS, 1); | |
1118 | } | |
1119 | ||
1120 | static int smu7_enable_auto_throttle_source(struct pp_hwmgr *hwmgr, | |
1121 | PHM_AutoThrottleSource source) | |
1122 | { | |
1123 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
1124 | ||
1125 | if (!(data->active_auto_throttle_sources & (1 << source))) { | |
1126 | data->active_auto_throttle_sources |= 1 << source; | |
1127 | smu7_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources); | |
1128 | } | |
1129 | return 0; | |
1130 | } | |
1131 | ||
1132 | static int smu7_enable_thermal_auto_throttle(struct pp_hwmgr *hwmgr) | |
1133 | { | |
1134 | return smu7_enable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal); | |
1135 | } | |
1136 | ||
1137 | static int smu7_disable_auto_throttle_source(struct pp_hwmgr *hwmgr, | |
1138 | PHM_AutoThrottleSource source) | |
1139 | { | |
1140 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
1141 | ||
1142 | if (data->active_auto_throttle_sources & (1 << source)) { | |
1143 | data->active_auto_throttle_sources &= ~(1 << source); | |
1144 | smu7_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources); | |
1145 | } | |
1146 | return 0; | |
1147 | } | |
1148 | ||
1149 | static int smu7_disable_thermal_auto_throttle(struct pp_hwmgr *hwmgr) | |
1150 | { | |
1151 | return smu7_disable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal); | |
1152 | } | |
1153 | ||
f8a4c11b | 1154 | static int smu7_pcie_performance_request(struct pp_hwmgr *hwmgr) |
599a7e9f RZ |
1155 | { |
1156 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
1157 | data->pcie_performance_request = true; | |
1158 | ||
1159 | return 0; | |
1160 | } | |
1161 | ||
f8a4c11b | 1162 | static int smu7_enable_dpm_tasks(struct pp_hwmgr *hwmgr) |
599a7e9f RZ |
1163 | { |
1164 | int tmp_result = 0; | |
1165 | int result = 0; | |
1166 | ||
1167 | tmp_result = (!smum_is_dpm_running(hwmgr)) ? 0 : -1; | |
1168 | PP_ASSERT_WITH_CODE(tmp_result == 0, | |
8861a820 GI |
1169 | "DPM is already running", |
1170 | ); | |
599a7e9f RZ |
1171 | |
1172 | if (smu7_voltage_control(hwmgr)) { | |
1173 | tmp_result = smu7_enable_voltage_control(hwmgr); | |
1174 | PP_ASSERT_WITH_CODE(tmp_result == 0, | |
1175 | "Failed to enable voltage control!", | |
1176 | result = tmp_result); | |
1177 | ||
1178 | tmp_result = smu7_construct_voltage_tables(hwmgr); | |
1179 | PP_ASSERT_WITH_CODE((0 == tmp_result), | |
1180 | "Failed to contruct voltage tables!", | |
1181 | result = tmp_result); | |
1182 | } | |
1183 | smum_initialize_mc_reg_table(hwmgr); | |
1184 | ||
1185 | if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, | |
1186 | PHM_PlatformCaps_EngineSpreadSpectrumSupport)) | |
1187 | PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, | |
1188 | GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, 1); | |
1189 | ||
1190 | if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, | |
1191 | PHM_PlatformCaps_ThermalController)) | |
1192 | PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, | |
1193 | GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, 0); | |
1194 | ||
1195 | tmp_result = smu7_program_static_screen_threshold_parameters(hwmgr); | |
1196 | PP_ASSERT_WITH_CODE((0 == tmp_result), | |
1197 | "Failed to program static screen threshold parameters!", | |
1198 | result = tmp_result); | |
1199 | ||
1200 | tmp_result = smu7_enable_display_gap(hwmgr); | |
1201 | PP_ASSERT_WITH_CODE((0 == tmp_result), | |
1202 | "Failed to enable display gap!", result = tmp_result); | |
1203 | ||
1204 | tmp_result = smu7_program_voting_clients(hwmgr); | |
1205 | PP_ASSERT_WITH_CODE((0 == tmp_result), | |
1206 | "Failed to program voting clients!", result = tmp_result); | |
1207 | ||
1208 | tmp_result = smum_process_firmware_header(hwmgr); | |
1209 | PP_ASSERT_WITH_CODE((0 == tmp_result), | |
1210 | "Failed to process firmware header!", result = tmp_result); | |
1211 | ||
1212 | tmp_result = smu7_initial_switch_from_arbf0_to_f1(hwmgr); | |
1213 | PP_ASSERT_WITH_CODE((0 == tmp_result), | |
1214 | "Failed to initialize switch from ArbF0 to F1!", | |
1215 | result = tmp_result); | |
1216 | ||
1217 | result = smu7_setup_default_dpm_tables(hwmgr); | |
1218 | PP_ASSERT_WITH_CODE(0 == result, | |
1219 | "Failed to setup default DPM tables!", return result); | |
1220 | ||
1221 | tmp_result = smum_init_smc_table(hwmgr); | |
1222 | PP_ASSERT_WITH_CODE((0 == tmp_result), | |
1223 | "Failed to initialize SMC table!", result = tmp_result); | |
1224 | ||
1225 | tmp_result = smu7_enable_vrhot_gpio_interrupt(hwmgr); | |
1226 | PP_ASSERT_WITH_CODE((0 == tmp_result), | |
1227 | "Failed to enable VR hot GPIO interrupt!", result = tmp_result); | |
1228 | ||
36c285c5 | 1229 | smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)PPSMC_NoDisplay); |
599a7e9f RZ |
1230 | |
1231 | tmp_result = smu7_enable_sclk_control(hwmgr); | |
1232 | PP_ASSERT_WITH_CODE((0 == tmp_result), | |
1233 | "Failed to enable SCLK control!", result = tmp_result); | |
1234 | ||
1235 | tmp_result = smu7_enable_smc_voltage_controller(hwmgr); | |
1236 | PP_ASSERT_WITH_CODE((0 == tmp_result), | |
1237 | "Failed to enable voltage control!", result = tmp_result); | |
1238 | ||
1239 | tmp_result = smu7_enable_ulv(hwmgr); | |
1240 | PP_ASSERT_WITH_CODE((0 == tmp_result), | |
1241 | "Failed to enable ULV!", result = tmp_result); | |
1242 | ||
1243 | tmp_result = smu7_enable_deep_sleep_master_switch(hwmgr); | |
1244 | PP_ASSERT_WITH_CODE((0 == tmp_result), | |
1245 | "Failed to enable deep sleep master switch!", result = tmp_result); | |
1246 | ||
1247 | tmp_result = smu7_enable_didt_config(hwmgr); | |
1248 | PP_ASSERT_WITH_CODE((tmp_result == 0), | |
1249 | "Failed to enable deep sleep master switch!", result = tmp_result); | |
1250 | ||
1251 | tmp_result = smu7_start_dpm(hwmgr); | |
1252 | PP_ASSERT_WITH_CODE((0 == tmp_result), | |
1253 | "Failed to start DPM!", result = tmp_result); | |
1254 | ||
1255 | tmp_result = smu7_enable_smc_cac(hwmgr); | |
1256 | PP_ASSERT_WITH_CODE((0 == tmp_result), | |
1257 | "Failed to enable SMC CAC!", result = tmp_result); | |
1258 | ||
1259 | tmp_result = smu7_enable_power_containment(hwmgr); | |
1260 | PP_ASSERT_WITH_CODE((0 == tmp_result), | |
1261 | "Failed to enable power containment!", result = tmp_result); | |
1262 | ||
1263 | tmp_result = smu7_power_control_set_level(hwmgr); | |
1264 | PP_ASSERT_WITH_CODE((0 == tmp_result), | |
1265 | "Failed to power control set level!", result = tmp_result); | |
1266 | ||
1267 | tmp_result = smu7_enable_thermal_auto_throttle(hwmgr); | |
1268 | PP_ASSERT_WITH_CODE((0 == tmp_result), | |
1269 | "Failed to enable thermal auto throttle!", result = tmp_result); | |
1270 | ||
1271 | tmp_result = smu7_pcie_performance_request(hwmgr); | |
1272 | PP_ASSERT_WITH_CODE((0 == tmp_result), | |
1273 | "pcie performance request failed!", result = tmp_result); | |
1274 | ||
1275 | return 0; | |
1276 | } | |
1277 | ||
1278 | int smu7_disable_dpm_tasks(struct pp_hwmgr *hwmgr) | |
1279 | { | |
1280 | int tmp_result, result = 0; | |
1281 | ||
1282 | tmp_result = (smum_is_dpm_running(hwmgr)) ? 0 : -1; | |
1283 | PP_ASSERT_WITH_CODE(tmp_result == 0, | |
1284 | "DPM is not running right now, no need to disable DPM!", | |
1285 | return 0); | |
1286 | ||
1287 | if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, | |
1288 | PHM_PlatformCaps_ThermalController)) | |
1289 | PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, | |
1290 | GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, 1); | |
1291 | ||
1292 | tmp_result = smu7_disable_power_containment(hwmgr); | |
1293 | PP_ASSERT_WITH_CODE((tmp_result == 0), | |
1294 | "Failed to disable power containment!", result = tmp_result); | |
1295 | ||
1296 | tmp_result = smu7_disable_smc_cac(hwmgr); | |
1297 | PP_ASSERT_WITH_CODE((tmp_result == 0), | |
1298 | "Failed to disable SMC CAC!", result = tmp_result); | |
1299 | ||
7f61bed0 RZ |
1300 | tmp_result = smu7_disable_didt_config(hwmgr); |
1301 | PP_ASSERT_WITH_CODE((tmp_result == 0), | |
1302 | "Failed to disable DIDT!", result = tmp_result); | |
1303 | ||
599a7e9f RZ |
1304 | PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, |
1305 | CG_SPLL_SPREAD_SPECTRUM, SSEN, 0); | |
1306 | PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, | |
1307 | GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, 0); | |
1308 | ||
1309 | tmp_result = smu7_disable_thermal_auto_throttle(hwmgr); | |
1310 | PP_ASSERT_WITH_CODE((tmp_result == 0), | |
1311 | "Failed to disable thermal auto throttle!", result = tmp_result); | |
1312 | ||
35011d39 EH |
1313 | tmp_result = smu7_avfs_control(hwmgr, false); |
1314 | PP_ASSERT_WITH_CODE((tmp_result == 0), | |
1315 | "Failed to disable AVFS!", result = tmp_result); | |
f28a9b65 | 1316 | |
599a7e9f RZ |
1317 | tmp_result = smu7_stop_dpm(hwmgr); |
1318 | PP_ASSERT_WITH_CODE((tmp_result == 0), | |
1319 | "Failed to stop DPM!", result = tmp_result); | |
1320 | ||
1321 | tmp_result = smu7_disable_deep_sleep_master_switch(hwmgr); | |
1322 | PP_ASSERT_WITH_CODE((tmp_result == 0), | |
1323 | "Failed to disable deep sleep master switch!", result = tmp_result); | |
1324 | ||
1325 | tmp_result = smu7_disable_ulv(hwmgr); | |
1326 | PP_ASSERT_WITH_CODE((tmp_result == 0), | |
1327 | "Failed to disable ULV!", result = tmp_result); | |
1328 | ||
1329 | tmp_result = smu7_clear_voting_clients(hwmgr); | |
1330 | PP_ASSERT_WITH_CODE((tmp_result == 0), | |
1331 | "Failed to clear voting clients!", result = tmp_result); | |
1332 | ||
1333 | tmp_result = smu7_reset_to_default(hwmgr); | |
1334 | PP_ASSERT_WITH_CODE((tmp_result == 0), | |
1335 | "Failed to reset to default!", result = tmp_result); | |
1336 | ||
1337 | tmp_result = smu7_force_switch_to_arbf0(hwmgr); | |
1338 | PP_ASSERT_WITH_CODE((tmp_result == 0), | |
1339 | "Failed to force to switch arbf0!", result = tmp_result); | |
1340 | ||
1341 | return result; | |
1342 | } | |
1343 | ||
1344 | int smu7_reset_asic_tasks(struct pp_hwmgr *hwmgr) | |
1345 | { | |
1346 | ||
1347 | return 0; | |
1348 | } | |
1349 | ||
1350 | static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr) | |
1351 | { | |
1352 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
1353 | struct phm_ppt_v1_information *table_info = | |
1354 | (struct phm_ppt_v1_information *)(hwmgr->pptable); | |
97f40ef0 TSD |
1355 | struct cgs_system_info sys_info = {0}; |
1356 | int result; | |
599a7e9f RZ |
1357 | |
1358 | data->dll_default_on = false; | |
1359 | data->mclk_dpm0_activity_target = 0xa; | |
1360 | data->mclk_activity_target = SMU7_MCLK_TARGETACTIVITY_DFLT; | |
1361 | data->vddc_vddgfx_delta = 300; | |
1362 | data->static_screen_threshold = SMU7_STATICSCREENTHRESHOLD_DFLT; | |
1363 | data->static_screen_threshold_unit = SMU7_STATICSCREENTHRESHOLDUNIT_DFLT; | |
1364 | data->voting_rights_clients0 = SMU7_VOTINGRIGHTSCLIENTS_DFLT0; | |
1365 | data->voting_rights_clients1 = SMU7_VOTINGRIGHTSCLIENTS_DFLT1; | |
1366 | data->voting_rights_clients2 = SMU7_VOTINGRIGHTSCLIENTS_DFLT2; | |
1367 | data->voting_rights_clients3 = SMU7_VOTINGRIGHTSCLIENTS_DFLT3; | |
1368 | data->voting_rights_clients4 = SMU7_VOTINGRIGHTSCLIENTS_DFLT4; | |
1369 | data->voting_rights_clients5 = SMU7_VOTINGRIGHTSCLIENTS_DFLT5; | |
1370 | data->voting_rights_clients6 = SMU7_VOTINGRIGHTSCLIENTS_DFLT6; | |
1371 | data->voting_rights_clients7 = SMU7_VOTINGRIGHTSCLIENTS_DFLT7; | |
1372 | ||
1373 | data->mclk_dpm_key_disabled = hwmgr->feature_mask & PP_MCLK_DPM_MASK ? false : true; | |
1374 | data->sclk_dpm_key_disabled = hwmgr->feature_mask & PP_SCLK_DPM_MASK ? false : true; | |
1375 | data->pcie_dpm_key_disabled = hwmgr->feature_mask & PP_PCIE_DPM_MASK ? false : true; | |
1376 | /* need to set voltage control types before EVV patching */ | |
1377 | data->voltage_control = SMU7_VOLTAGE_CONTROL_NONE; | |
1378 | data->vddci_control = SMU7_VOLTAGE_CONTROL_NONE; | |
1379 | data->mvdd_control = SMU7_VOLTAGE_CONTROL_NONE; | |
1380 | data->enable_tdc_limit_feature = true; | |
1381 | data->enable_pkg_pwr_tracking_feature = true; | |
1382 | data->force_pcie_gen = PP_PCIEGenInvalid; | |
1383 | data->ulv_supported = hwmgr->feature_mask & PP_ULV_MASK ? true : false; | |
1384 | ||
187368a5 RZ |
1385 | if (hwmgr->chip_id == CHIP_POLARIS12 || hwmgr->smumgr->is_kicker) { |
1386 | uint8_t tmp1, tmp2; | |
1387 | uint16_t tmp3 = 0; | |
1388 | atomctrl_get_svi2_info(hwmgr, VOLTAGE_TYPE_VDDC, &tmp1, &tmp2, | |
1389 | &tmp3); | |
1390 | tmp3 = (tmp3 >> 5) & 0x3; | |
1391 | data->vddc_phase_shed_control = ((tmp3 << 1) | (tmp3 >> 1)) & 0x3; | |
1392 | } | |
1393 | ||
599a7e9f RZ |
1394 | data->fast_watermark_threshold = 100; |
1395 | if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr, | |
1396 | VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2)) | |
1397 | data->voltage_control = SMU7_VOLTAGE_CONTROL_BY_SVID2; | |
1398 | ||
1399 | if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, | |
1400 | PHM_PlatformCaps_ControlVDDGFX)) { | |
1401 | if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr, | |
1402 | VOLTAGE_TYPE_VDDGFX, VOLTAGE_OBJ_SVID2)) { | |
1403 | data->vdd_gfx_control = SMU7_VOLTAGE_CONTROL_BY_SVID2; | |
1404 | } | |
1405 | } | |
1406 | ||
1407 | if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, | |
1408 | PHM_PlatformCaps_EnableMVDDControl)) { | |
1409 | if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr, | |
1410 | VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT)) | |
1411 | data->mvdd_control = SMU7_VOLTAGE_CONTROL_BY_GPIO; | |
1412 | else if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr, | |
1413 | VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2)) | |
1414 | data->mvdd_control = SMU7_VOLTAGE_CONTROL_BY_SVID2; | |
1415 | } | |
1416 | ||
1417 | if (SMU7_VOLTAGE_CONTROL_NONE == data->vdd_gfx_control) { | |
1418 | phm_cap_unset(hwmgr->platform_descriptor.platformCaps, | |
1419 | PHM_PlatformCaps_ControlVDDGFX); | |
1420 | } | |
1421 | ||
1422 | if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, | |
1423 | PHM_PlatformCaps_ControlVDDCI)) { | |
1424 | if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr, | |
1425 | VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT)) | |
1426 | data->vddci_control = SMU7_VOLTAGE_CONTROL_BY_GPIO; | |
1427 | else if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr, | |
1428 | VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2)) | |
1429 | data->vddci_control = SMU7_VOLTAGE_CONTROL_BY_SVID2; | |
1430 | } | |
1431 | ||
1432 | if (data->mvdd_control == SMU7_VOLTAGE_CONTROL_NONE) | |
1433 | phm_cap_unset(hwmgr->platform_descriptor.platformCaps, | |
1434 | PHM_PlatformCaps_EnableMVDDControl); | |
1435 | ||
1436 | if (data->vddci_control == SMU7_VOLTAGE_CONTROL_NONE) | |
1437 | phm_cap_unset(hwmgr->platform_descriptor.platformCaps, | |
1438 | PHM_PlatformCaps_ControlVDDCI); | |
1439 | ||
53b963b6 | 1440 | if ((hwmgr->pp_table_version != PP_TABLE_V0) && (hwmgr->feature_mask & PP_CLOCK_STRETCH_MASK) |
599a7e9f RZ |
1441 | && (table_info->cac_dtp_table->usClockStretchAmount != 0)) |
1442 | phm_cap_set(hwmgr->platform_descriptor.platformCaps, | |
1443 | PHM_PlatformCaps_ClockStretcher); | |
1444 | ||
1445 | data->pcie_gen_performance.max = PP_PCIEGen1; | |
1446 | data->pcie_gen_performance.min = PP_PCIEGen3; | |
1447 | data->pcie_gen_power_saving.max = PP_PCIEGen1; | |
1448 | data->pcie_gen_power_saving.min = PP_PCIEGen3; | |
1449 | data->pcie_lane_performance.max = 0; | |
1450 | data->pcie_lane_performance.min = 16; | |
1451 | data->pcie_lane_power_saving.max = 0; | |
1452 | data->pcie_lane_power_saving.min = 16; | |
97f40ef0 TSD |
1453 | |
1454 | sys_info.size = sizeof(struct cgs_system_info); | |
1455 | sys_info.info_id = CGS_SYSTEM_INFO_PG_FLAGS; | |
1456 | result = cgs_query_system_info(hwmgr->device, &sys_info); | |
1457 | if (!result) { | |
1458 | if (sys_info.value & AMD_PG_SUPPORT_UVD) | |
1459 | phm_cap_set(hwmgr->platform_descriptor.platformCaps, | |
1460 | PHM_PlatformCaps_UVDPowerGating); | |
1461 | if (sys_info.value & AMD_PG_SUPPORT_VCE) | |
1462 | phm_cap_set(hwmgr->platform_descriptor.platformCaps, | |
1463 | PHM_PlatformCaps_VCEPowerGating); | |
1464 | } | |
599a7e9f RZ |
1465 | } |
1466 | ||
1467 | /** | |
1468 | * Get Leakage VDDC based on leakage ID. | |
1469 | * | |
1470 | * @param hwmgr the address of the powerplay hardware manager. | |
1471 | * @return always 0 | |
1472 | */ | |
1473 | static int smu7_get_evv_voltages(struct pp_hwmgr *hwmgr) | |
1474 | { | |
1475 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
1476 | uint16_t vv_id; | |
1477 | uint16_t vddc = 0; | |
1478 | uint16_t vddgfx = 0; | |
1479 | uint16_t i, j; | |
1480 | uint32_t sclk = 0; | |
1481 | struct phm_ppt_v1_information *table_info = | |
1482 | (struct phm_ppt_v1_information *)hwmgr->pptable; | |
1483 | struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table = NULL; | |
1484 | ||
1485 | ||
599a7e9f RZ |
1486 | for (i = 0; i < SMU7_MAX_LEAKAGE_COUNT; i++) { |
1487 | vv_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i; | |
1488 | ||
1489 | if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) { | |
0f12f73c AD |
1490 | if ((hwmgr->pp_table_version == PP_TABLE_V1) |
1491 | && !phm_get_sclk_for_voltage_evv(hwmgr, | |
599a7e9f RZ |
1492 | table_info->vddgfx_lookup_table, vv_id, &sclk)) { |
1493 | if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, | |
1494 | PHM_PlatformCaps_ClockStretcher)) { | |
0f12f73c AD |
1495 | sclk_table = table_info->vdd_dep_on_sclk; |
1496 | ||
599a7e9f RZ |
1497 | for (j = 1; j < sclk_table->count; j++) { |
1498 | if (sclk_table->entries[j].clk == sclk && | |
1499 | sclk_table->entries[j].cks_enable == 0) { | |
1500 | sclk += 5000; | |
1501 | break; | |
1502 | } | |
1503 | } | |
1504 | } | |
1505 | if (0 == atomctrl_get_voltage_evv_on_sclk | |
1506 | (hwmgr, VOLTAGE_TYPE_VDDGFX, sclk, | |
1507 | vv_id, &vddgfx)) { | |
1508 | /* need to make sure vddgfx is less than 2v or else, it could burn the ASIC. */ | |
1509 | PP_ASSERT_WITH_CODE((vddgfx < 2000 && vddgfx != 0), "Invalid VDDGFX value!", return -EINVAL); | |
1510 | ||
1511 | /* the voltage should not be zero nor equal to leakage ID */ | |
1512 | if (vddgfx != 0 && vddgfx != vv_id) { | |
1513 | data->vddcgfx_leakage.actual_voltage[data->vddcgfx_leakage.count] = vddgfx; | |
1514 | data->vddcgfx_leakage.leakage_id[data->vddcgfx_leakage.count] = vv_id; | |
1515 | data->vddcgfx_leakage.count++; | |
1516 | } | |
1517 | } else { | |
b5c11b8e | 1518 | pr_info("Error retrieving EVV voltage value!\n"); |
599a7e9f RZ |
1519 | } |
1520 | } | |
1521 | } else { | |
599a7e9f RZ |
1522 | if ((hwmgr->pp_table_version == PP_TABLE_V0) |
1523 | || !phm_get_sclk_for_voltage_evv(hwmgr, | |
1524 | table_info->vddc_lookup_table, vv_id, &sclk)) { | |
1525 | if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, | |
1526 | PHM_PlatformCaps_ClockStretcher)) { | |
0f12f73c AD |
1527 | if (table_info == NULL) |
1528 | return -EINVAL; | |
1529 | sclk_table = table_info->vdd_dep_on_sclk; | |
1530 | ||
599a7e9f RZ |
1531 | for (j = 1; j < sclk_table->count; j++) { |
1532 | if (sclk_table->entries[j].clk == sclk && | |
1533 | sclk_table->entries[j].cks_enable == 0) { | |
1534 | sclk += 5000; | |
1535 | break; | |
1536 | } | |
1537 | } | |
1538 | } | |
1539 | ||
1540 | if (phm_get_voltage_evv_on_sclk(hwmgr, | |
1541 | VOLTAGE_TYPE_VDDC, | |
1542 | sclk, vv_id, &vddc) == 0) { | |
1543 | if (vddc >= 2000 || vddc == 0) | |
1544 | return -EINVAL; | |
1545 | } else { | |
98a36749 | 1546 | pr_warn("failed to retrieving EVV voltage!\n"); |
599a7e9f RZ |
1547 | continue; |
1548 | } | |
1549 | ||
1550 | /* the voltage should not be zero nor equal to leakage ID */ | |
1551 | if (vddc != 0 && vddc != vv_id) { | |
1552 | data->vddc_leakage.actual_voltage[data->vddc_leakage.count] = (uint16_t)(vddc); | |
1553 | data->vddc_leakage.leakage_id[data->vddc_leakage.count] = vv_id; | |
1554 | data->vddc_leakage.count++; | |
1555 | } | |
1556 | } | |
1557 | } | |
1558 | } | |
1559 | ||
1560 | return 0; | |
1561 | } | |
1562 | ||
1563 | /** | |
1564 | * Change virtual leakage voltage to actual value. | |
1565 | * | |
1566 | * @param hwmgr the address of the powerplay hardware manager. | |
1567 | * @param pointer to changing voltage | |
1568 | * @param pointer to leakage table | |
1569 | */ | |
1570 | static void smu7_patch_ppt_v1_with_vdd_leakage(struct pp_hwmgr *hwmgr, | |
1571 | uint16_t *voltage, struct smu7_leakage_voltage *leakage_table) | |
1572 | { | |
1573 | uint32_t index; | |
1574 | ||
1575 | /* search for leakage voltage ID 0xff01 ~ 0xff08 */ | |
1576 | for (index = 0; index < leakage_table->count; index++) { | |
1577 | /* if this voltage matches a leakage voltage ID */ | |
1578 | /* patch with actual leakage voltage */ | |
1579 | if (leakage_table->leakage_id[index] == *voltage) { | |
1580 | *voltage = leakage_table->actual_voltage[index]; | |
1581 | break; | |
1582 | } | |
1583 | } | |
1584 | ||
1585 | if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0) | |
b5c11b8e | 1586 | pr_err("Voltage value looks like a Leakage ID but it's not patched \n"); |
599a7e9f RZ |
1587 | } |
1588 | ||
1589 | /** | |
1590 | * Patch voltage lookup table by EVV leakages. | |
1591 | * | |
1592 | * @param hwmgr the address of the powerplay hardware manager. | |
1593 | * @param pointer to voltage lookup table | |
1594 | * @param pointer to leakage table | |
1595 | * @return always 0 | |
1596 | */ | |
1597 | static int smu7_patch_lookup_table_with_leakage(struct pp_hwmgr *hwmgr, | |
1598 | phm_ppt_v1_voltage_lookup_table *lookup_table, | |
1599 | struct smu7_leakage_voltage *leakage_table) | |
1600 | { | |
1601 | uint32_t i; | |
1602 | ||
1603 | for (i = 0; i < lookup_table->count; i++) | |
1604 | smu7_patch_ppt_v1_with_vdd_leakage(hwmgr, | |
1605 | &lookup_table->entries[i].us_vdd, leakage_table); | |
1606 | ||
1607 | return 0; | |
1608 | } | |
1609 | ||
1610 | static int smu7_patch_clock_voltage_limits_with_vddc_leakage( | |
1611 | struct pp_hwmgr *hwmgr, struct smu7_leakage_voltage *leakage_table, | |
1612 | uint16_t *vddc) | |
1613 | { | |
1614 | struct phm_ppt_v1_information *table_info = | |
1615 | (struct phm_ppt_v1_information *)(hwmgr->pptable); | |
1616 | smu7_patch_ppt_v1_with_vdd_leakage(hwmgr, (uint16_t *)vddc, leakage_table); | |
1617 | hwmgr->dyn_state.max_clock_voltage_on_dc.vddc = | |
1618 | table_info->max_clock_voltage_on_dc.vddc; | |
1619 | return 0; | |
1620 | } | |
1621 | ||
1622 | static int smu7_patch_voltage_dependency_tables_with_lookup_table( | |
1623 | struct pp_hwmgr *hwmgr) | |
1624 | { | |
1625 | uint8_t entry_id; | |
1626 | uint8_t voltage_id; | |
1627 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
1628 | struct phm_ppt_v1_information *table_info = | |
1629 | (struct phm_ppt_v1_information *)(hwmgr->pptable); | |
1630 | ||
1631 | struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table = | |
1632 | table_info->vdd_dep_on_sclk; | |
1633 | struct phm_ppt_v1_clock_voltage_dependency_table *mclk_table = | |
1634 | table_info->vdd_dep_on_mclk; | |
1635 | struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = | |
1636 | table_info->mm_dep_table; | |
1637 | ||
1638 | if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) { | |
1639 | for (entry_id = 0; entry_id < sclk_table->count; ++entry_id) { | |
1640 | voltage_id = sclk_table->entries[entry_id].vddInd; | |
1641 | sclk_table->entries[entry_id].vddgfx = | |
1642 | table_info->vddgfx_lookup_table->entries[voltage_id].us_vdd; | |
1643 | } | |
1644 | } else { | |
1645 | for (entry_id = 0; entry_id < sclk_table->count; ++entry_id) { | |
1646 | voltage_id = sclk_table->entries[entry_id].vddInd; | |
1647 | sclk_table->entries[entry_id].vddc = | |
1648 | table_info->vddc_lookup_table->entries[voltage_id].us_vdd; | |
1649 | } | |
1650 | } | |
1651 | ||
1652 | for (entry_id = 0; entry_id < mclk_table->count; ++entry_id) { | |
1653 | voltage_id = mclk_table->entries[entry_id].vddInd; | |
1654 | mclk_table->entries[entry_id].vddc = | |
1655 | table_info->vddc_lookup_table->entries[voltage_id].us_vdd; | |
1656 | } | |
1657 | ||
1658 | for (entry_id = 0; entry_id < mm_table->count; ++entry_id) { | |
1659 | voltage_id = mm_table->entries[entry_id].vddcInd; | |
1660 | mm_table->entries[entry_id].vddc = | |
1661 | table_info->vddc_lookup_table->entries[voltage_id].us_vdd; | |
1662 | } | |
1663 | ||
1664 | return 0; | |
1665 | ||
1666 | } | |
1667 | ||
1668 | static int phm_add_voltage(struct pp_hwmgr *hwmgr, | |
1669 | phm_ppt_v1_voltage_lookup_table *look_up_table, | |
1670 | phm_ppt_v1_voltage_lookup_record *record) | |
1671 | { | |
1672 | uint32_t i; | |
1673 | ||
1674 | PP_ASSERT_WITH_CODE((NULL != look_up_table), | |
1675 | "Lookup Table empty.", return -EINVAL); | |
1676 | PP_ASSERT_WITH_CODE((0 != look_up_table->count), | |
1677 | "Lookup Table empty.", return -EINVAL); | |
1678 | ||
1679 | i = smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_VDDGFX); | |
1680 | PP_ASSERT_WITH_CODE((i >= look_up_table->count), | |
1681 | "Lookup Table is full.", return -EINVAL); | |
1682 | ||
1683 | /* This is to avoid entering duplicate calculated records. */ | |
1684 | for (i = 0; i < look_up_table->count; i++) { | |
1685 | if (look_up_table->entries[i].us_vdd == record->us_vdd) { | |
1686 | if (look_up_table->entries[i].us_calculated == 1) | |
1687 | return 0; | |
1688 | break; | |
1689 | } | |
1690 | } | |
1691 | ||
1692 | look_up_table->entries[i].us_calculated = 1; | |
1693 | look_up_table->entries[i].us_vdd = record->us_vdd; | |
1694 | look_up_table->entries[i].us_cac_low = record->us_cac_low; | |
1695 | look_up_table->entries[i].us_cac_mid = record->us_cac_mid; | |
1696 | look_up_table->entries[i].us_cac_high = record->us_cac_high; | |
1697 | /* Only increment the count when we're appending, not replacing duplicate entry. */ | |
1698 | if (i == look_up_table->count) | |
1699 | look_up_table->count++; | |
1700 | ||
1701 | return 0; | |
1702 | } | |
1703 | ||
1704 | ||
1705 | static int smu7_calc_voltage_dependency_tables(struct pp_hwmgr *hwmgr) | |
1706 | { | |
1707 | uint8_t entry_id; | |
1708 | struct phm_ppt_v1_voltage_lookup_record v_record; | |
1709 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
1710 | struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); | |
1711 | ||
1712 | phm_ppt_v1_clock_voltage_dependency_table *sclk_table = pptable_info->vdd_dep_on_sclk; | |
1713 | phm_ppt_v1_clock_voltage_dependency_table *mclk_table = pptable_info->vdd_dep_on_mclk; | |
1714 | ||
1715 | if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) { | |
1716 | for (entry_id = 0; entry_id < sclk_table->count; ++entry_id) { | |
1717 | if (sclk_table->entries[entry_id].vdd_offset & (1 << 15)) | |
1718 | v_record.us_vdd = sclk_table->entries[entry_id].vddgfx + | |
1719 | sclk_table->entries[entry_id].vdd_offset - 0xFFFF; | |
1720 | else | |
1721 | v_record.us_vdd = sclk_table->entries[entry_id].vddgfx + | |
1722 | sclk_table->entries[entry_id].vdd_offset; | |
1723 | ||
1724 | sclk_table->entries[entry_id].vddc = | |
1725 | v_record.us_cac_low = v_record.us_cac_mid = | |
1726 | v_record.us_cac_high = v_record.us_vdd; | |
1727 | ||
1728 | phm_add_voltage(hwmgr, pptable_info->vddc_lookup_table, &v_record); | |
1729 | } | |
1730 | ||
1731 | for (entry_id = 0; entry_id < mclk_table->count; ++entry_id) { | |
1732 | if (mclk_table->entries[entry_id].vdd_offset & (1 << 15)) | |
1733 | v_record.us_vdd = mclk_table->entries[entry_id].vddc + | |
1734 | mclk_table->entries[entry_id].vdd_offset - 0xFFFF; | |
1735 | else | |
1736 | v_record.us_vdd = mclk_table->entries[entry_id].vddc + | |
1737 | mclk_table->entries[entry_id].vdd_offset; | |
1738 | ||
1739 | mclk_table->entries[entry_id].vddgfx = v_record.us_cac_low = | |
1740 | v_record.us_cac_mid = v_record.us_cac_high = v_record.us_vdd; | |
1741 | phm_add_voltage(hwmgr, pptable_info->vddgfx_lookup_table, &v_record); | |
1742 | } | |
1743 | } | |
1744 | return 0; | |
1745 | } | |
1746 | ||
1747 | static int smu7_calc_mm_voltage_dependency_table(struct pp_hwmgr *hwmgr) | |
1748 | { | |
1749 | uint8_t entry_id; | |
1750 | struct phm_ppt_v1_voltage_lookup_record v_record; | |
1751 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
1752 | struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); | |
1753 | phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = pptable_info->mm_dep_table; | |
1754 | ||
1755 | if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) { | |
1756 | for (entry_id = 0; entry_id < mm_table->count; entry_id++) { | |
1757 | if (mm_table->entries[entry_id].vddgfx_offset & (1 << 15)) | |
1758 | v_record.us_vdd = mm_table->entries[entry_id].vddc + | |
1759 | mm_table->entries[entry_id].vddgfx_offset - 0xFFFF; | |
1760 | else | |
1761 | v_record.us_vdd = mm_table->entries[entry_id].vddc + | |
1762 | mm_table->entries[entry_id].vddgfx_offset; | |
1763 | ||
1764 | /* Add the calculated VDDGFX to the VDDGFX lookup table */ | |
1765 | mm_table->entries[entry_id].vddgfx = v_record.us_cac_low = | |
1766 | v_record.us_cac_mid = v_record.us_cac_high = v_record.us_vdd; | |
1767 | phm_add_voltage(hwmgr, pptable_info->vddgfx_lookup_table, &v_record); | |
1768 | } | |
1769 | } | |
1770 | return 0; | |
1771 | } | |
1772 | ||
1773 | static int smu7_sort_lookup_table(struct pp_hwmgr *hwmgr, | |
1774 | struct phm_ppt_v1_voltage_lookup_table *lookup_table) | |
1775 | { | |
1776 | uint32_t table_size, i, j; | |
1777 | struct phm_ppt_v1_voltage_lookup_record tmp_voltage_lookup_record; | |
1778 | table_size = lookup_table->count; | |
1779 | ||
1780 | PP_ASSERT_WITH_CODE(0 != lookup_table->count, | |
1781 | "Lookup table is empty", return -EINVAL); | |
1782 | ||
1783 | /* Sorting voltages */ | |
1784 | for (i = 0; i < table_size - 1; i++) { | |
1785 | for (j = i + 1; j > 0; j--) { | |
1786 | if (lookup_table->entries[j].us_vdd < | |
1787 | lookup_table->entries[j - 1].us_vdd) { | |
1788 | tmp_voltage_lookup_record = lookup_table->entries[j - 1]; | |
1789 | lookup_table->entries[j - 1] = lookup_table->entries[j]; | |
1790 | lookup_table->entries[j] = tmp_voltage_lookup_record; | |
1791 | } | |
1792 | } | |
1793 | } | |
1794 | ||
1795 | return 0; | |
1796 | } | |
1797 | ||
1798 | static int smu7_complete_dependency_tables(struct pp_hwmgr *hwmgr) | |
1799 | { | |
1800 | int result = 0; | |
1801 | int tmp_result; | |
1802 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
1803 | struct phm_ppt_v1_information *table_info = | |
1804 | (struct phm_ppt_v1_information *)(hwmgr->pptable); | |
1805 | ||
1806 | if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) { | |
1807 | tmp_result = smu7_patch_lookup_table_with_leakage(hwmgr, | |
1808 | table_info->vddgfx_lookup_table, &(data->vddcgfx_leakage)); | |
1809 | if (tmp_result != 0) | |
1810 | result = tmp_result; | |
1811 | ||
1812 | smu7_patch_ppt_v1_with_vdd_leakage(hwmgr, | |
1813 | &table_info->max_clock_voltage_on_dc.vddgfx, &(data->vddcgfx_leakage)); | |
1814 | } else { | |
1815 | ||
1816 | tmp_result = smu7_patch_lookup_table_with_leakage(hwmgr, | |
1817 | table_info->vddc_lookup_table, &(data->vddc_leakage)); | |
1818 | if (tmp_result) | |
1819 | result = tmp_result; | |
1820 | ||
1821 | tmp_result = smu7_patch_clock_voltage_limits_with_vddc_leakage(hwmgr, | |
1822 | &(data->vddc_leakage), &table_info->max_clock_voltage_on_dc.vddc); | |
1823 | if (tmp_result) | |
1824 | result = tmp_result; | |
1825 | } | |
1826 | ||
1827 | tmp_result = smu7_patch_voltage_dependency_tables_with_lookup_table(hwmgr); | |
1828 | if (tmp_result) | |
1829 | result = tmp_result; | |
1830 | ||
1831 | tmp_result = smu7_calc_voltage_dependency_tables(hwmgr); | |
1832 | if (tmp_result) | |
1833 | result = tmp_result; | |
1834 | ||
1835 | tmp_result = smu7_calc_mm_voltage_dependency_table(hwmgr); | |
1836 | if (tmp_result) | |
1837 | result = tmp_result; | |
1838 | ||
1839 | tmp_result = smu7_sort_lookup_table(hwmgr, table_info->vddgfx_lookup_table); | |
1840 | if (tmp_result) | |
1841 | result = tmp_result; | |
1842 | ||
1843 | tmp_result = smu7_sort_lookup_table(hwmgr, table_info->vddc_lookup_table); | |
1844 | if (tmp_result) | |
1845 | result = tmp_result; | |
1846 | ||
1847 | return result; | |
1848 | } | |
1849 | ||
1850 | static int smu7_set_private_data_based_on_pptable_v1(struct pp_hwmgr *hwmgr) | |
1851 | { | |
1852 | struct phm_ppt_v1_information *table_info = | |
1853 | (struct phm_ppt_v1_information *)(hwmgr->pptable); | |
1854 | ||
1855 | struct phm_ppt_v1_clock_voltage_dependency_table *allowed_sclk_vdd_table = | |
1856 | table_info->vdd_dep_on_sclk; | |
1857 | struct phm_ppt_v1_clock_voltage_dependency_table *allowed_mclk_vdd_table = | |
1858 | table_info->vdd_dep_on_mclk; | |
1859 | ||
1860 | PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table != NULL, | |
1861 | "VDD dependency on SCLK table is missing.", | |
1862 | return -EINVAL); | |
1863 | PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table->count >= 1, | |
1864 | "VDD dependency on SCLK table has to have is missing.", | |
1865 | return -EINVAL); | |
1866 | ||
1867 | PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table != NULL, | |
1868 | "VDD dependency on MCLK table is missing", | |
1869 | return -EINVAL); | |
1870 | PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table->count >= 1, | |
1871 | "VDD dependency on MCLK table has to have is missing.", | |
1872 | return -EINVAL); | |
1873 | ||
1874 | table_info->max_clock_voltage_on_ac.sclk = | |
1875 | allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].clk; | |
1876 | table_info->max_clock_voltage_on_ac.mclk = | |
1877 | allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].clk; | |
1878 | table_info->max_clock_voltage_on_ac.vddc = | |
1879 | allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].vddc; | |
1880 | table_info->max_clock_voltage_on_ac.vddci = | |
1881 | allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].vddci; | |
1882 | ||
1883 | hwmgr->dyn_state.max_clock_voltage_on_ac.sclk = table_info->max_clock_voltage_on_ac.sclk; | |
1884 | hwmgr->dyn_state.max_clock_voltage_on_ac.mclk = table_info->max_clock_voltage_on_ac.mclk; | |
1885 | hwmgr->dyn_state.max_clock_voltage_on_ac.vddc = table_info->max_clock_voltage_on_ac.vddc; | |
1886 | hwmgr->dyn_state.max_clock_voltage_on_ac.vddci = table_info->max_clock_voltage_on_ac.vddci; | |
1887 | ||
1888 | return 0; | |
1889 | } | |
1890 | ||
f8a4c11b | 1891 | static int smu7_patch_voltage_workaround(struct pp_hwmgr *hwmgr) |
599a7e9f RZ |
1892 | { |
1893 | struct phm_ppt_v1_information *table_info = | |
1894 | (struct phm_ppt_v1_information *)(hwmgr->pptable); | |
1895 | struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table; | |
1896 | struct phm_ppt_v1_voltage_lookup_table *lookup_table; | |
1897 | uint32_t i; | |
1898 | uint32_t hw_revision, sub_vendor_id, sub_sys_id; | |
1899 | struct cgs_system_info sys_info = {0}; | |
1900 | ||
1901 | if (table_info != NULL) { | |
1902 | dep_mclk_table = table_info->vdd_dep_on_mclk; | |
1903 | lookup_table = table_info->vddc_lookup_table; | |
1904 | } else | |
1905 | return 0; | |
1906 | ||
1907 | sys_info.size = sizeof(struct cgs_system_info); | |
1908 | ||
1909 | sys_info.info_id = CGS_SYSTEM_INFO_PCIE_REV; | |
1910 | cgs_query_system_info(hwmgr->device, &sys_info); | |
1911 | hw_revision = (uint32_t)sys_info.value; | |
1912 | ||
1913 | sys_info.info_id = CGS_SYSTEM_INFO_PCIE_SUB_SYS_ID; | |
1914 | cgs_query_system_info(hwmgr->device, &sys_info); | |
1915 | sub_sys_id = (uint32_t)sys_info.value; | |
1916 | ||
1917 | sys_info.info_id = CGS_SYSTEM_INFO_PCIE_SUB_SYS_VENDOR_ID; | |
1918 | cgs_query_system_info(hwmgr->device, &sys_info); | |
1919 | sub_vendor_id = (uint32_t)sys_info.value; | |
1920 | ||
1921 | if (hwmgr->chip_id == CHIP_POLARIS10 && hw_revision == 0xC7 && | |
1922 | ((sub_sys_id == 0xb37 && sub_vendor_id == 0x1002) || | |
1923 | (sub_sys_id == 0x4a8 && sub_vendor_id == 0x1043) || | |
1924 | (sub_sys_id == 0x9480 && sub_vendor_id == 0x1682))) { | |
1925 | if (lookup_table->entries[dep_mclk_table->entries[dep_mclk_table->count-1].vddInd].us_vdd >= 1000) | |
1926 | return 0; | |
1927 | ||
1928 | for (i = 0; i < lookup_table->count; i++) { | |
1929 | if (lookup_table->entries[i].us_vdd < 0xff01 && lookup_table->entries[i].us_vdd >= 1000) { | |
1930 | dep_mclk_table->entries[dep_mclk_table->count-1].vddInd = (uint8_t) i; | |
1931 | return 0; | |
1932 | } | |
1933 | } | |
1934 | } | |
1935 | return 0; | |
1936 | } | |
1937 | ||
1938 | static int smu7_thermal_parameter_init(struct pp_hwmgr *hwmgr) | |
1939 | { | |
1940 | struct pp_atomctrl_gpio_pin_assignment gpio_pin_assignment; | |
1941 | uint32_t temp_reg; | |
1942 | struct phm_ppt_v1_information *table_info = | |
1943 | (struct phm_ppt_v1_information *)(hwmgr->pptable); | |
1944 | ||
1945 | ||
1946 | if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_PCC_GPIO_PINID, &gpio_pin_assignment)) { | |
1947 | temp_reg = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCNB_PWRMGT_CNTL); | |
1948 | switch (gpio_pin_assignment.uc_gpio_pin_bit_shift) { | |
1949 | case 0: | |
1950 | temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW_MODE, 0x1); | |
1951 | break; | |
1952 | case 1: | |
1953 | temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW_MODE, 0x2); | |
1954 | break; | |
1955 | case 2: | |
1956 | temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW, 0x1); | |
1957 | break; | |
1958 | case 3: | |
1959 | temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, FORCE_NB_PS1, 0x1); | |
1960 | break; | |
1961 | case 4: | |
1962 | temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, DPM_ENABLED, 0x1); | |
1963 | break; | |
1964 | default: | |
1965 | PP_ASSERT_WITH_CODE(0, | |
1966 | "Failed to setup PCC HW register! Wrong GPIO assigned for VDDC_PCC_GPIO_PINID!", | |
1967 | ); | |
1968 | break; | |
1969 | } | |
1970 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCNB_PWRMGT_CNTL, temp_reg); | |
1971 | } | |
1972 | ||
1973 | if (table_info == NULL) | |
1974 | return 0; | |
1975 | ||
1976 | if (table_info->cac_dtp_table->usDefaultTargetOperatingTemp != 0 && | |
1977 | hwmgr->thermal_controller.advanceFanControlParameters.ucFanControlMode) { | |
1978 | hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMMinLimit = | |
1979 | (uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit; | |
1980 | ||
1981 | hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMMaxLimit = | |
1982 | (uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanPWM; | |
1983 | ||
1984 | hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMStep = 1; | |
1985 | ||
1986 | hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMMaxLimit = 100; | |
1987 | ||
1988 | hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMMinLimit = | |
1989 | (uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit; | |
1990 | ||
1991 | hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMStep = 1; | |
1992 | ||
1993 | table_info->cac_dtp_table->usDefaultTargetOperatingTemp = (table_info->cac_dtp_table->usDefaultTargetOperatingTemp >= 50) ? | |
1994 | (table_info->cac_dtp_table->usDefaultTargetOperatingTemp - 50) : 0; | |
1995 | ||
1996 | table_info->cac_dtp_table->usOperatingTempMaxLimit = table_info->cac_dtp_table->usDefaultTargetOperatingTemp; | |
1997 | table_info->cac_dtp_table->usOperatingTempStep = 1; | |
1998 | table_info->cac_dtp_table->usOperatingTempHyst = 1; | |
1999 | ||
2000 | hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanPWM = | |
2001 | hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanPWM; | |
2002 | ||
2003 | hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM = | |
2004 | hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanRPM; | |
2005 | ||
2006 | hwmgr->dyn_state.cac_dtp_table->usOperatingTempMinLimit = | |
2007 | table_info->cac_dtp_table->usOperatingTempMinLimit; | |
2008 | ||
2009 | hwmgr->dyn_state.cac_dtp_table->usOperatingTempMaxLimit = | |
2010 | table_info->cac_dtp_table->usOperatingTempMaxLimit; | |
2011 | ||
2012 | hwmgr->dyn_state.cac_dtp_table->usDefaultTargetOperatingTemp = | |
2013 | table_info->cac_dtp_table->usDefaultTargetOperatingTemp; | |
2014 | ||
2015 | hwmgr->dyn_state.cac_dtp_table->usOperatingTempStep = | |
2016 | table_info->cac_dtp_table->usOperatingTempStep; | |
2017 | ||
2018 | hwmgr->dyn_state.cac_dtp_table->usTargetOperatingTemp = | |
2019 | table_info->cac_dtp_table->usTargetOperatingTemp; | |
cf54d6d9 RZ |
2020 | if (hwmgr->feature_mask & PP_OD_FUZZY_FAN_CONTROL_MASK) |
2021 | phm_cap_set(hwmgr->platform_descriptor.platformCaps, | |
2022 | PHM_PlatformCaps_ODFuzzyFanControlSupport); | |
599a7e9f RZ |
2023 | } |
2024 | ||
2025 | return 0; | |
2026 | } | |
2027 | ||
2028 | /** | |
2029 | * Change virtual leakage voltage to actual value. | |
2030 | * | |
2031 | * @param hwmgr the address of the powerplay hardware manager. | |
2032 | * @param pointer to changing voltage | |
2033 | * @param pointer to leakage table | |
2034 | */ | |
2035 | static void smu7_patch_ppt_v0_with_vdd_leakage(struct pp_hwmgr *hwmgr, | |
2036 | uint32_t *voltage, struct smu7_leakage_voltage *leakage_table) | |
2037 | { | |
2038 | uint32_t index; | |
2039 | ||
2040 | /* search for leakage voltage ID 0xff01 ~ 0xff08 */ | |
2041 | for (index = 0; index < leakage_table->count; index++) { | |
2042 | /* if this voltage matches a leakage voltage ID */ | |
2043 | /* patch with actual leakage voltage */ | |
2044 | if (leakage_table->leakage_id[index] == *voltage) { | |
2045 | *voltage = leakage_table->actual_voltage[index]; | |
2046 | break; | |
2047 | } | |
2048 | } | |
2049 | ||
2050 | if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0) | |
b5c11b8e | 2051 | pr_err("Voltage value looks like a Leakage ID but it's not patched \n"); |
599a7e9f RZ |
2052 | } |
2053 | ||
2054 | ||
2055 | static int smu7_patch_vddc(struct pp_hwmgr *hwmgr, | |
2056 | struct phm_clock_voltage_dependency_table *tab) | |
2057 | { | |
2058 | uint16_t i; | |
2059 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
2060 | ||
2061 | if (tab) | |
2062 | for (i = 0; i < tab->count; i++) | |
2063 | smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v, | |
2064 | &data->vddc_leakage); | |
2065 | ||
2066 | return 0; | |
2067 | } | |
2068 | ||
2069 | static int smu7_patch_vddci(struct pp_hwmgr *hwmgr, | |
2070 | struct phm_clock_voltage_dependency_table *tab) | |
2071 | { | |
2072 | uint16_t i; | |
2073 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
2074 | ||
2075 | if (tab) | |
2076 | for (i = 0; i < tab->count; i++) | |
2077 | smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v, | |
2078 | &data->vddci_leakage); | |
2079 | ||
2080 | return 0; | |
2081 | } | |
2082 | ||
2083 | static int smu7_patch_vce_vddc(struct pp_hwmgr *hwmgr, | |
2084 | struct phm_vce_clock_voltage_dependency_table *tab) | |
2085 | { | |
2086 | uint16_t i; | |
2087 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
2088 | ||
2089 | if (tab) | |
2090 | for (i = 0; i < tab->count; i++) | |
2091 | smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v, | |
2092 | &data->vddc_leakage); | |
2093 | ||
2094 | return 0; | |
2095 | } | |
2096 | ||
2097 | ||
2098 | static int smu7_patch_uvd_vddc(struct pp_hwmgr *hwmgr, | |
2099 | struct phm_uvd_clock_voltage_dependency_table *tab) | |
2100 | { | |
2101 | uint16_t i; | |
2102 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
2103 | ||
2104 | if (tab) | |
2105 | for (i = 0; i < tab->count; i++) | |
2106 | smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v, | |
2107 | &data->vddc_leakage); | |
2108 | ||
2109 | return 0; | |
2110 | } | |
2111 | ||
2112 | static int smu7_patch_vddc_shed_limit(struct pp_hwmgr *hwmgr, | |
2113 | struct phm_phase_shedding_limits_table *tab) | |
2114 | { | |
2115 | uint16_t i; | |
2116 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
2117 | ||
2118 | if (tab) | |
2119 | for (i = 0; i < tab->count; i++) | |
2120 | smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].Voltage, | |
2121 | &data->vddc_leakage); | |
2122 | ||
2123 | return 0; | |
2124 | } | |
2125 | ||
2126 | static int smu7_patch_samu_vddc(struct pp_hwmgr *hwmgr, | |
2127 | struct phm_samu_clock_voltage_dependency_table *tab) | |
2128 | { | |
2129 | uint16_t i; | |
2130 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
2131 | ||
2132 | if (tab) | |
2133 | for (i = 0; i < tab->count; i++) | |
2134 | smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v, | |
2135 | &data->vddc_leakage); | |
2136 | ||
2137 | return 0; | |
2138 | } | |
2139 | ||
2140 | static int smu7_patch_acp_vddc(struct pp_hwmgr *hwmgr, | |
2141 | struct phm_acp_clock_voltage_dependency_table *tab) | |
2142 | { | |
2143 | uint16_t i; | |
2144 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
2145 | ||
2146 | if (tab) | |
2147 | for (i = 0; i < tab->count; i++) | |
2148 | smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v, | |
2149 | &data->vddc_leakage); | |
2150 | ||
2151 | return 0; | |
2152 | } | |
2153 | ||
2154 | static int smu7_patch_limits_vddc(struct pp_hwmgr *hwmgr, | |
77f7f71f | 2155 | struct phm_clock_and_voltage_limits *tab) |
599a7e9f | 2156 | { |
77f7f71f | 2157 | uint32_t vddc, vddci; |
599a7e9f RZ |
2158 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
2159 | ||
2160 | if (tab) { | |
a29d1260 | 2161 | vddc = tab->vddc; |
77f7f71f AD |
2162 | smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &vddc, |
2163 | &data->vddc_leakage); | |
2164 | tab->vddc = vddc; | |
a29d1260 | 2165 | vddci = tab->vddci; |
77f7f71f AD |
2166 | smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &vddci, |
2167 | &data->vddci_leakage); | |
2168 | tab->vddci = vddci; | |
599a7e9f RZ |
2169 | } |
2170 | ||
2171 | return 0; | |
2172 | } | |
2173 | ||
2174 | static int smu7_patch_cac_vddc(struct pp_hwmgr *hwmgr, struct phm_cac_leakage_table *tab) | |
2175 | { | |
2176 | uint32_t i; | |
2177 | uint32_t vddc; | |
2178 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
2179 | ||
2180 | if (tab) { | |
2181 | for (i = 0; i < tab->count; i++) { | |
2182 | vddc = (uint32_t)(tab->entries[i].Vddc); | |
2183 | smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &vddc, &data->vddc_leakage); | |
2184 | tab->entries[i].Vddc = (uint16_t)vddc; | |
2185 | } | |
2186 | } | |
2187 | ||
2188 | return 0; | |
2189 | } | |
2190 | ||
2191 | static int smu7_patch_dependency_tables_with_leakage(struct pp_hwmgr *hwmgr) | |
2192 | { | |
2193 | int tmp; | |
2194 | ||
2195 | tmp = smu7_patch_vddc(hwmgr, hwmgr->dyn_state.vddc_dependency_on_sclk); | |
2196 | if (tmp) | |
2197 | return -EINVAL; | |
2198 | ||
2199 | tmp = smu7_patch_vddc(hwmgr, hwmgr->dyn_state.vddc_dependency_on_mclk); | |
2200 | if (tmp) | |
2201 | return -EINVAL; | |
2202 | ||
2203 | tmp = smu7_patch_vddc(hwmgr, hwmgr->dyn_state.vddc_dep_on_dal_pwrl); | |
2204 | if (tmp) | |
2205 | return -EINVAL; | |
2206 | ||
2207 | tmp = smu7_patch_vddci(hwmgr, hwmgr->dyn_state.vddci_dependency_on_mclk); | |
2208 | if (tmp) | |
2209 | return -EINVAL; | |
2210 | ||
2211 | tmp = smu7_patch_vce_vddc(hwmgr, hwmgr->dyn_state.vce_clock_voltage_dependency_table); | |
2212 | if (tmp) | |
2213 | return -EINVAL; | |
2214 | ||
2215 | tmp = smu7_patch_uvd_vddc(hwmgr, hwmgr->dyn_state.uvd_clock_voltage_dependency_table); | |
2216 | if (tmp) | |
2217 | return -EINVAL; | |
2218 | ||
2219 | tmp = smu7_patch_samu_vddc(hwmgr, hwmgr->dyn_state.samu_clock_voltage_dependency_table); | |
2220 | if (tmp) | |
2221 | return -EINVAL; | |
2222 | ||
2223 | tmp = smu7_patch_acp_vddc(hwmgr, hwmgr->dyn_state.acp_clock_voltage_dependency_table); | |
2224 | if (tmp) | |
2225 | return -EINVAL; | |
2226 | ||
2227 | tmp = smu7_patch_vddc_shed_limit(hwmgr, hwmgr->dyn_state.vddc_phase_shed_limits_table); | |
2228 | if (tmp) | |
2229 | return -EINVAL; | |
2230 | ||
2231 | tmp = smu7_patch_limits_vddc(hwmgr, &hwmgr->dyn_state.max_clock_voltage_on_ac); | |
2232 | if (tmp) | |
2233 | return -EINVAL; | |
2234 | ||
2235 | tmp = smu7_patch_limits_vddc(hwmgr, &hwmgr->dyn_state.max_clock_voltage_on_dc); | |
2236 | if (tmp) | |
2237 | return -EINVAL; | |
2238 | ||
2239 | tmp = smu7_patch_cac_vddc(hwmgr, hwmgr->dyn_state.cac_leakage_table); | |
2240 | if (tmp) | |
2241 | return -EINVAL; | |
2242 | ||
2243 | return 0; | |
2244 | } | |
2245 | ||
2246 | ||
2247 | static int smu7_set_private_data_based_on_pptable_v0(struct pp_hwmgr *hwmgr) | |
2248 | { | |
2249 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
2250 | ||
2251 | struct phm_clock_voltage_dependency_table *allowed_sclk_vddc_table = hwmgr->dyn_state.vddc_dependency_on_sclk; | |
2252 | struct phm_clock_voltage_dependency_table *allowed_mclk_vddc_table = hwmgr->dyn_state.vddc_dependency_on_mclk; | |
2253 | struct phm_clock_voltage_dependency_table *allowed_mclk_vddci_table = hwmgr->dyn_state.vddci_dependency_on_mclk; | |
2254 | ||
2255 | PP_ASSERT_WITH_CODE(allowed_sclk_vddc_table != NULL, | |
2256 | "VDDC dependency on SCLK table is missing. This table is mandatory\n", return -EINVAL); | |
2257 | PP_ASSERT_WITH_CODE(allowed_sclk_vddc_table->count >= 1, | |
2258 | "VDDC dependency on SCLK table has to have is missing. This table is mandatory\n", return -EINVAL); | |
2259 | ||
2260 | PP_ASSERT_WITH_CODE(allowed_mclk_vddc_table != NULL, | |
2261 | "VDDC dependency on MCLK table is missing. This table is mandatory\n", return -EINVAL); | |
2262 | PP_ASSERT_WITH_CODE(allowed_mclk_vddc_table->count >= 1, | |
2263 | "VDD dependency on MCLK table has to have is missing. This table is mandatory\n", return -EINVAL); | |
2264 | ||
2265 | data->min_vddc_in_pptable = (uint16_t)allowed_sclk_vddc_table->entries[0].v; | |
2266 | data->max_vddc_in_pptable = (uint16_t)allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v; | |
2267 | ||
2268 | hwmgr->dyn_state.max_clock_voltage_on_ac.sclk = | |
2269 | allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk; | |
2270 | hwmgr->dyn_state.max_clock_voltage_on_ac.mclk = | |
2271 | allowed_mclk_vddc_table->entries[allowed_mclk_vddc_table->count - 1].clk; | |
2272 | hwmgr->dyn_state.max_clock_voltage_on_ac.vddc = | |
2273 | allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v; | |
2274 | ||
2275 | if (allowed_mclk_vddci_table != NULL && allowed_mclk_vddci_table->count >= 1) { | |
2276 | data->min_vddci_in_pptable = (uint16_t)allowed_mclk_vddci_table->entries[0].v; | |
2277 | data->max_vddci_in_pptable = (uint16_t)allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v; | |
2278 | } | |
2279 | ||
2280 | if (hwmgr->dyn_state.vddci_dependency_on_mclk != NULL && hwmgr->dyn_state.vddci_dependency_on_mclk->count > 1) | |
2281 | hwmgr->dyn_state.max_clock_voltage_on_ac.vddci = hwmgr->dyn_state.vddci_dependency_on_mclk->entries[hwmgr->dyn_state.vddci_dependency_on_mclk->count - 1].v; | |
2282 | ||
2283 | return 0; | |
2284 | } | |
2285 | ||
a0aa7046 RZ |
2286 | static int smu7_hwmgr_backend_fini(struct pp_hwmgr *hwmgr) |
2287 | { | |
2288 | if (NULL != hwmgr->dyn_state.vddc_dep_on_dal_pwrl) { | |
2289 | kfree(hwmgr->dyn_state.vddc_dep_on_dal_pwrl); | |
2290 | hwmgr->dyn_state.vddc_dep_on_dal_pwrl = NULL; | |
2291 | } | |
2292 | pp_smu7_thermal_fini(hwmgr); | |
2293 | if (NULL != hwmgr->backend) { | |
2294 | kfree(hwmgr->backend); | |
2295 | hwmgr->backend = NULL; | |
2296 | } | |
2297 | ||
2298 | return 0; | |
2299 | } | |
2300 | ||
f8a4c11b | 2301 | static int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr) |
599a7e9f RZ |
2302 | { |
2303 | struct smu7_hwmgr *data; | |
2304 | int result; | |
2305 | ||
2306 | data = kzalloc(sizeof(struct smu7_hwmgr), GFP_KERNEL); | |
2307 | if (data == NULL) | |
2308 | return -ENOMEM; | |
2309 | ||
2310 | hwmgr->backend = data; | |
a0aa7046 | 2311 | pp_smu7_thermal_initialize(hwmgr); |
599a7e9f RZ |
2312 | |
2313 | smu7_patch_voltage_workaround(hwmgr); | |
2314 | smu7_init_dpm_defaults(hwmgr); | |
2315 | ||
2316 | /* Get leakage voltage based on leakage ID. */ | |
2317 | result = smu7_get_evv_voltages(hwmgr); | |
2318 | ||
2319 | if (result) { | |
b5c11b8e | 2320 | pr_info("Get EVV Voltage Failed. Abort Driver loading!\n"); |
599a7e9f RZ |
2321 | return -EINVAL; |
2322 | } | |
2323 | ||
2324 | if (hwmgr->pp_table_version == PP_TABLE_V1) { | |
2325 | smu7_complete_dependency_tables(hwmgr); | |
2326 | smu7_set_private_data_based_on_pptable_v1(hwmgr); | |
2327 | } else if (hwmgr->pp_table_version == PP_TABLE_V0) { | |
2328 | smu7_patch_dependency_tables_with_leakage(hwmgr); | |
2329 | smu7_set_private_data_based_on_pptable_v0(hwmgr); | |
2330 | } | |
2331 | ||
2332 | /* Initalize Dynamic State Adjustment Rule Settings */ | |
2333 | result = phm_initializa_dynamic_state_adjustment_rule_settings(hwmgr); | |
2334 | ||
2335 | if (0 == result) { | |
2336 | struct cgs_system_info sys_info = {0}; | |
2337 | ||
2338 | data->is_tlu_enabled = false; | |
2339 | ||
2340 | hwmgr->platform_descriptor.hardwareActivityPerformanceLevels = | |
2341 | SMU7_MAX_HARDWARE_POWERLEVELS; | |
2342 | hwmgr->platform_descriptor.hardwarePerformanceLevels = 2; | |
2343 | hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50; | |
2344 | ||
2345 | sys_info.size = sizeof(struct cgs_system_info); | |
2346 | sys_info.info_id = CGS_SYSTEM_INFO_PCIE_GEN_INFO; | |
2347 | result = cgs_query_system_info(hwmgr->device, &sys_info); | |
2348 | if (result) | |
2349 | data->pcie_gen_cap = AMDGPU_DEFAULT_PCIE_GEN_MASK; | |
2350 | else | |
2351 | data->pcie_gen_cap = (uint32_t)sys_info.value; | |
2352 | if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) | |
2353 | data->pcie_spc_cap = 20; | |
2354 | sys_info.size = sizeof(struct cgs_system_info); | |
2355 | sys_info.info_id = CGS_SYSTEM_INFO_PCIE_MLW; | |
2356 | result = cgs_query_system_info(hwmgr->device, &sys_info); | |
2357 | if (result) | |
2358 | data->pcie_lane_cap = AMDGPU_DEFAULT_PCIE_MLW_MASK; | |
2359 | else | |
2360 | data->pcie_lane_cap = (uint32_t)sys_info.value; | |
2361 | ||
2362 | hwmgr->platform_descriptor.vbiosInterruptId = 0x20000400; /* IRQ_SOURCE1_SW_INT */ | |
2363 | /* The true clock step depends on the frequency, typically 4.5 or 9 MHz. Here we use 5. */ | |
2364 | hwmgr->platform_descriptor.clockStep.engineClock = 500; | |
2365 | hwmgr->platform_descriptor.clockStep.memoryClock = 500; | |
2366 | smu7_thermal_parameter_init(hwmgr); | |
2367 | } else { | |
2368 | /* Ignore return value in here, we are cleaning up a mess. */ | |
a0aa7046 | 2369 | smu7_hwmgr_backend_fini(hwmgr); |
599a7e9f RZ |
2370 | } |
2371 | ||
2372 | return 0; | |
2373 | } | |
2374 | ||
2375 | static int smu7_force_dpm_highest(struct pp_hwmgr *hwmgr) | |
2376 | { | |
2377 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
2378 | uint32_t level, tmp; | |
2379 | ||
2380 | if (!data->pcie_dpm_key_disabled) { | |
2381 | if (data->dpm_level_enable_mask.pcie_dpm_enable_mask) { | |
2382 | level = 0; | |
2383 | tmp = data->dpm_level_enable_mask.pcie_dpm_enable_mask; | |
2384 | while (tmp >>= 1) | |
2385 | level++; | |
2386 | ||
2387 | if (level) | |
2388 | smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, | |
2389 | PPSMC_MSG_PCIeDPM_ForceLevel, level); | |
2390 | } | |
2391 | } | |
2392 | ||
2393 | if (!data->sclk_dpm_key_disabled) { | |
2394 | if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) { | |
2395 | level = 0; | |
2396 | tmp = data->dpm_level_enable_mask.sclk_dpm_enable_mask; | |
2397 | while (tmp >>= 1) | |
2398 | level++; | |
2399 | ||
2400 | if (level) | |
2401 | smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, | |
2402 | PPSMC_MSG_SCLKDPM_SetEnabledMask, | |
2403 | (1 << level)); | |
2404 | } | |
2405 | } | |
2406 | ||
2407 | if (!data->mclk_dpm_key_disabled) { | |
2408 | if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) { | |
2409 | level = 0; | |
2410 | tmp = data->dpm_level_enable_mask.mclk_dpm_enable_mask; | |
2411 | while (tmp >>= 1) | |
2412 | level++; | |
2413 | ||
2414 | if (level) | |
2415 | smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, | |
2416 | PPSMC_MSG_MCLKDPM_SetEnabledMask, | |
2417 | (1 << level)); | |
2418 | } | |
2419 | } | |
2420 | ||
2421 | return 0; | |
2422 | } | |
2423 | ||
2424 | static int smu7_upload_dpm_level_enable_mask(struct pp_hwmgr *hwmgr) | |
2425 | { | |
2426 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
2427 | ||
2428 | if (hwmgr->pp_table_version == PP_TABLE_V1) | |
2429 | phm_apply_dal_min_voltage_request(hwmgr); | |
2430 | /* TO DO for v0 iceland and Ci*/ | |
2431 | ||
2432 | if (!data->sclk_dpm_key_disabled) { | |
2433 | if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) | |
2434 | smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, | |
2435 | PPSMC_MSG_SCLKDPM_SetEnabledMask, | |
2436 | data->dpm_level_enable_mask.sclk_dpm_enable_mask); | |
2437 | } | |
2438 | ||
2439 | if (!data->mclk_dpm_key_disabled) { | |
2440 | if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) | |
2441 | smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, | |
2442 | PPSMC_MSG_MCLKDPM_SetEnabledMask, | |
2443 | data->dpm_level_enable_mask.mclk_dpm_enable_mask); | |
2444 | } | |
2445 | ||
2446 | return 0; | |
2447 | } | |
2448 | ||
2449 | static int smu7_unforce_dpm_levels(struct pp_hwmgr *hwmgr) | |
2450 | { | |
2451 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
2452 | ||
2453 | if (!smum_is_dpm_running(hwmgr)) | |
2454 | return -EINVAL; | |
2455 | ||
2456 | if (!data->pcie_dpm_key_disabled) { | |
2457 | smum_send_msg_to_smc(hwmgr->smumgr, | |
2458 | PPSMC_MSG_PCIeDPM_UnForceLevel); | |
2459 | } | |
2460 | ||
2461 | return smu7_upload_dpm_level_enable_mask(hwmgr); | |
2462 | } | |
2463 | ||
2464 | static int smu7_force_dpm_lowest(struct pp_hwmgr *hwmgr) | |
2465 | { | |
2466 | struct smu7_hwmgr *data = | |
2467 | (struct smu7_hwmgr *)(hwmgr->backend); | |
2468 | uint32_t level; | |
2469 | ||
2470 | if (!data->sclk_dpm_key_disabled) | |
2471 | if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) { | |
2472 | level = phm_get_lowest_enabled_level(hwmgr, | |
2473 | data->dpm_level_enable_mask.sclk_dpm_enable_mask); | |
2474 | smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, | |
2475 | PPSMC_MSG_SCLKDPM_SetEnabledMask, | |
2476 | (1 << level)); | |
2477 | ||
2478 | } | |
2479 | ||
2480 | if (!data->mclk_dpm_key_disabled) { | |
2481 | if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) { | |
2482 | level = phm_get_lowest_enabled_level(hwmgr, | |
2483 | data->dpm_level_enable_mask.mclk_dpm_enable_mask); | |
2484 | smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, | |
2485 | PPSMC_MSG_MCLKDPM_SetEnabledMask, | |
2486 | (1 << level)); | |
2487 | } | |
2488 | } | |
2489 | ||
2490 | if (!data->pcie_dpm_key_disabled) { | |
2491 | if (data->dpm_level_enable_mask.pcie_dpm_enable_mask) { | |
2492 | level = phm_get_lowest_enabled_level(hwmgr, | |
2493 | data->dpm_level_enable_mask.pcie_dpm_enable_mask); | |
2494 | smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, | |
2495 | PPSMC_MSG_PCIeDPM_ForceLevel, | |
2496 | (level)); | |
2497 | } | |
2498 | } | |
2499 | ||
2500 | return 0; | |
570272d2 RZ |
2501 | } |
2502 | ||
2503 | static int smu7_get_profiling_clk(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level, | |
2504 | uint32_t *sclk_mask, uint32_t *mclk_mask, uint32_t *pcie_mask) | |
2505 | { | |
2506 | uint32_t percentage; | |
2507 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
2508 | struct smu7_dpm_table *golden_dpm_table = &data->golden_dpm_table; | |
2509 | int32_t tmp_mclk; | |
2510 | int32_t tmp_sclk; | |
2511 | int32_t count; | |
2512 | ||
2513 | if (golden_dpm_table->mclk_table.count < 1) | |
2514 | return -EINVAL; | |
2515 | ||
2516 | percentage = 100 * golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count - 1].value / | |
2517 | golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count - 1].value; | |
599a7e9f | 2518 | |
570272d2 RZ |
2519 | if (golden_dpm_table->mclk_table.count == 1) { |
2520 | percentage = 70; | |
2521 | tmp_mclk = golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count - 1].value; | |
2522 | *mclk_mask = golden_dpm_table->mclk_table.count - 1; | |
2523 | } else { | |
2524 | tmp_mclk = golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count - 2].value; | |
2525 | *mclk_mask = golden_dpm_table->mclk_table.count - 2; | |
2526 | } | |
2527 | ||
2528 | tmp_sclk = tmp_mclk * percentage / 100; | |
2529 | ||
2530 | if (hwmgr->pp_table_version == PP_TABLE_V0) { | |
2531 | for (count = hwmgr->dyn_state.vddc_dependency_on_sclk->count-1; | |
2532 | count >= 0; count--) { | |
2533 | if (tmp_sclk >= hwmgr->dyn_state.vddc_dependency_on_sclk->entries[count].clk) { | |
2534 | tmp_sclk = hwmgr->dyn_state.vddc_dependency_on_sclk->entries[count].clk; | |
2535 | *sclk_mask = count; | |
2536 | break; | |
2537 | } | |
2538 | } | |
2539 | if (count < 0 || level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) | |
2540 | *sclk_mask = 0; | |
2541 | ||
2542 | if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) | |
2543 | *sclk_mask = hwmgr->dyn_state.vddc_dependency_on_sclk->count-1; | |
2544 | } else if (hwmgr->pp_table_version == PP_TABLE_V1) { | |
2545 | struct phm_ppt_v1_information *table_info = | |
2546 | (struct phm_ppt_v1_information *)(hwmgr->pptable); | |
2547 | ||
2548 | for (count = table_info->vdd_dep_on_sclk->count-1; count >= 0; count--) { | |
2549 | if (tmp_sclk >= table_info->vdd_dep_on_sclk->entries[count].clk) { | |
2550 | tmp_sclk = table_info->vdd_dep_on_sclk->entries[count].clk; | |
2551 | *sclk_mask = count; | |
2552 | break; | |
2553 | } | |
2554 | } | |
2555 | if (count < 0 || level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) | |
2556 | *sclk_mask = 0; | |
2557 | ||
2558 | if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) | |
2559 | *sclk_mask = table_info->vdd_dep_on_sclk->count - 1; | |
2560 | } | |
2561 | ||
2562 | if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) | |
2563 | *mclk_mask = 0; | |
2564 | else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) | |
2565 | *mclk_mask = golden_dpm_table->mclk_table.count - 1; | |
2566 | ||
2567 | *pcie_mask = data->dpm_table.pcie_speed_table.count - 1; | |
2568 | return 0; | |
599a7e9f | 2569 | } |
570272d2 | 2570 | |
599a7e9f RZ |
2571 | static int smu7_force_dpm_level(struct pp_hwmgr *hwmgr, |
2572 | enum amd_dpm_forced_level level) | |
2573 | { | |
2574 | int ret = 0; | |
570272d2 RZ |
2575 | uint32_t sclk_mask = 0; |
2576 | uint32_t mclk_mask = 0; | |
2577 | uint32_t pcie_mask = 0; | |
2578 | uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD | | |
2579 | AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK | | |
2580 | AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK | | |
2581 | AMD_DPM_FORCED_LEVEL_PROFILE_PEAK; | |
2582 | ||
2583 | if (level == hwmgr->dpm_level) | |
2584 | return ret; | |
2585 | ||
2586 | if (!(hwmgr->dpm_level & profile_mode_mask)) { | |
2587 | /* enter profile mode, save current level, disable gfx cg*/ | |
2588 | if (level & profile_mode_mask) { | |
2589 | hwmgr->saved_dpm_level = hwmgr->dpm_level; | |
2590 | cgs_set_clockgating_state(hwmgr->device, | |
2591 | AMD_IP_BLOCK_TYPE_GFX, | |
2592 | AMD_CG_STATE_UNGATE); | |
2593 | } | |
2594 | } else { | |
2595 | /* exit profile mode, restore level, enable gfx cg*/ | |
2596 | if (!(level & profile_mode_mask)) { | |
2597 | if (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT) | |
2598 | level = hwmgr->saved_dpm_level; | |
2599 | cgs_set_clockgating_state(hwmgr->device, | |
2600 | AMD_IP_BLOCK_TYPE_GFX, | |
2601 | AMD_CG_STATE_GATE); | |
2602 | } | |
2603 | } | |
599a7e9f RZ |
2604 | |
2605 | switch (level) { | |
2606 | case AMD_DPM_FORCED_LEVEL_HIGH: | |
2607 | ret = smu7_force_dpm_highest(hwmgr); | |
2608 | if (ret) | |
2609 | return ret; | |
570272d2 | 2610 | hwmgr->dpm_level = level; |
599a7e9f RZ |
2611 | break; |
2612 | case AMD_DPM_FORCED_LEVEL_LOW: | |
2613 | ret = smu7_force_dpm_lowest(hwmgr); | |
2614 | if (ret) | |
2615 | return ret; | |
570272d2 | 2616 | hwmgr->dpm_level = level; |
599a7e9f RZ |
2617 | break; |
2618 | case AMD_DPM_FORCED_LEVEL_AUTO: | |
2619 | ret = smu7_unforce_dpm_levels(hwmgr); | |
2620 | if (ret) | |
2621 | return ret; | |
570272d2 | 2622 | hwmgr->dpm_level = level; |
599a7e9f | 2623 | break; |
570272d2 RZ |
2624 | case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: |
2625 | case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK: | |
2626 | case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK: | |
2627 | case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: | |
2628 | ret = smu7_get_profiling_clk(hwmgr, level, &sclk_mask, &mclk_mask, &pcie_mask); | |
2629 | if (ret) | |
2630 | return ret; | |
2631 | hwmgr->dpm_level = level; | |
2632 | smu7_force_clock_level(hwmgr, PP_SCLK, 1<<sclk_mask); | |
2633 | smu7_force_clock_level(hwmgr, PP_MCLK, 1<<mclk_mask); | |
2634 | smu7_force_clock_level(hwmgr, PP_PCIE, 1<<pcie_mask); | |
5ada90d5 | 2635 | |
570272d2 | 2636 | break; |
cb256cc3 RZ |
2637 | case AMD_DPM_FORCED_LEVEL_MANUAL: |
2638 | hwmgr->dpm_level = level; | |
2639 | break; | |
570272d2 | 2640 | case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT: |
599a7e9f RZ |
2641 | default: |
2642 | break; | |
2643 | } | |
2644 | ||
5ada90d5 | 2645 | if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->saved_dpm_level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) |
570272d2 | 2646 | smu7_fan_ctrl_set_fan_speed_percent(hwmgr, 100); |
5ada90d5 | 2647 | else if (level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->saved_dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) |
570272d2 | 2648 | smu7_fan_ctrl_reset_fan_speed_to_default(hwmgr); |
599a7e9f | 2649 | |
570272d2 | 2650 | return 0; |
599a7e9f RZ |
2651 | } |
2652 | ||
2653 | static int smu7_get_power_state_size(struct pp_hwmgr *hwmgr) | |
2654 | { | |
2655 | return sizeof(struct smu7_power_state); | |
2656 | } | |
2657 | ||
09be4a52 AD |
2658 | static int smu7_vblank_too_short(struct pp_hwmgr *hwmgr, |
2659 | uint32_t vblank_time_us) | |
2660 | { | |
2661 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
2662 | uint32_t switch_limit_us; | |
2663 | ||
2664 | switch (hwmgr->chip_id) { | |
2665 | case CHIP_POLARIS10: | |
2666 | case CHIP_POLARIS11: | |
2667 | case CHIP_POLARIS12: | |
2668 | switch_limit_us = data->is_memory_gddr5 ? 190 : 150; | |
2669 | break; | |
2670 | default: | |
2671 | switch_limit_us = data->is_memory_gddr5 ? 450 : 150; | |
2672 | break; | |
2673 | } | |
2674 | ||
2675 | if (vblank_time_us < switch_limit_us) | |
2676 | return true; | |
2677 | else | |
2678 | return false; | |
2679 | } | |
599a7e9f RZ |
2680 | |
2681 | static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, | |
2682 | struct pp_power_state *request_ps, | |
2683 | const struct pp_power_state *current_ps) | |
2684 | { | |
2685 | ||
2686 | struct smu7_power_state *smu7_ps = | |
2687 | cast_phw_smu7_power_state(&request_ps->hardware); | |
2688 | uint32_t sclk; | |
2689 | uint32_t mclk; | |
2690 | struct PP_Clocks minimum_clocks = {0}; | |
2691 | bool disable_mclk_switching; | |
2692 | bool disable_mclk_switching_for_frame_lock; | |
2693 | struct cgs_display_info info = {0}; | |
09be4a52 | 2694 | struct cgs_mode_info mode_info = {0}; |
599a7e9f RZ |
2695 | const struct phm_clock_and_voltage_limits *max_limits; |
2696 | uint32_t i; | |
2697 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
2698 | struct phm_ppt_v1_information *table_info = | |
2699 | (struct phm_ppt_v1_information *)(hwmgr->pptable); | |
2700 | int32_t count; | |
2701 | int32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0; | |
2702 | ||
09be4a52 | 2703 | info.mode_info = &mode_info; |
599a7e9f RZ |
2704 | data->battery_state = (PP_StateUILabel_Battery == |
2705 | request_ps->classification.ui_label); | |
2706 | ||
2707 | PP_ASSERT_WITH_CODE(smu7_ps->performance_level_count == 2, | |
2708 | "VI should always have 2 performance levels", | |
2709 | ); | |
2710 | ||
2711 | max_limits = (PP_PowerSource_AC == hwmgr->power_source) ? | |
2712 | &(hwmgr->dyn_state.max_clock_voltage_on_ac) : | |
2713 | &(hwmgr->dyn_state.max_clock_voltage_on_dc); | |
2714 | ||
2715 | /* Cap clock DPM tables at DC MAX if it is in DC. */ | |
2716 | if (PP_PowerSource_DC == hwmgr->power_source) { | |
2717 | for (i = 0; i < smu7_ps->performance_level_count; i++) { | |
2718 | if (smu7_ps->performance_levels[i].memory_clock > max_limits->mclk) | |
2719 | smu7_ps->performance_levels[i].memory_clock = max_limits->mclk; | |
2720 | if (smu7_ps->performance_levels[i].engine_clock > max_limits->sclk) | |
2721 | smu7_ps->performance_levels[i].engine_clock = max_limits->sclk; | |
2722 | } | |
2723 | } | |
2724 | ||
2725 | smu7_ps->vce_clks.evclk = hwmgr->vce_arbiter.evclk; | |
2726 | smu7_ps->vce_clks.ecclk = hwmgr->vce_arbiter.ecclk; | |
2727 | ||
2728 | cgs_get_active_displays_info(hwmgr->device, &info); | |
2729 | ||
599a7e9f RZ |
2730 | minimum_clocks.engineClock = hwmgr->display_config.min_core_set_clock; |
2731 | minimum_clocks.memoryClock = hwmgr->display_config.min_mem_set_clock; | |
2732 | ||
2733 | if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, | |
2734 | PHM_PlatformCaps_StablePState)) { | |
2735 | max_limits = &(hwmgr->dyn_state.max_clock_voltage_on_ac); | |
2736 | stable_pstate_sclk = (max_limits->sclk * 75) / 100; | |
2737 | ||
2738 | for (count = table_info->vdd_dep_on_sclk->count - 1; | |
2739 | count >= 0; count--) { | |
2740 | if (stable_pstate_sclk >= | |
2741 | table_info->vdd_dep_on_sclk->entries[count].clk) { | |
2742 | stable_pstate_sclk = | |
2743 | table_info->vdd_dep_on_sclk->entries[count].clk; | |
2744 | break; | |
2745 | } | |
2746 | } | |
2747 | ||
2748 | if (count < 0) | |
2749 | stable_pstate_sclk = table_info->vdd_dep_on_sclk->entries[0].clk; | |
2750 | ||
2751 | stable_pstate_mclk = max_limits->mclk; | |
2752 | ||
2753 | minimum_clocks.engineClock = stable_pstate_sclk; | |
2754 | minimum_clocks.memoryClock = stable_pstate_mclk; | |
2755 | } | |
2756 | ||
2757 | if (minimum_clocks.engineClock < hwmgr->gfx_arbiter.sclk) | |
2758 | minimum_clocks.engineClock = hwmgr->gfx_arbiter.sclk; | |
2759 | ||
2760 | if (minimum_clocks.memoryClock < hwmgr->gfx_arbiter.mclk) | |
2761 | minimum_clocks.memoryClock = hwmgr->gfx_arbiter.mclk; | |
2762 | ||
2763 | smu7_ps->sclk_threshold = hwmgr->gfx_arbiter.sclk_threshold; | |
2764 | ||
2765 | if (0 != hwmgr->gfx_arbiter.sclk_over_drive) { | |
2766 | PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.sclk_over_drive <= | |
2767 | hwmgr->platform_descriptor.overdriveLimit.engineClock), | |
2768 | "Overdrive sclk exceeds limit", | |
2769 | hwmgr->gfx_arbiter.sclk_over_drive = | |
2770 | hwmgr->platform_descriptor.overdriveLimit.engineClock); | |
2771 | ||
2772 | if (hwmgr->gfx_arbiter.sclk_over_drive >= hwmgr->gfx_arbiter.sclk) | |
2773 | smu7_ps->performance_levels[1].engine_clock = | |
2774 | hwmgr->gfx_arbiter.sclk_over_drive; | |
2775 | } | |
2776 | ||
2777 | if (0 != hwmgr->gfx_arbiter.mclk_over_drive) { | |
2778 | PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.mclk_over_drive <= | |
2779 | hwmgr->platform_descriptor.overdriveLimit.memoryClock), | |
2780 | "Overdrive mclk exceeds limit", | |
2781 | hwmgr->gfx_arbiter.mclk_over_drive = | |
2782 | hwmgr->platform_descriptor.overdriveLimit.memoryClock); | |
2783 | ||
2784 | if (hwmgr->gfx_arbiter.mclk_over_drive >= hwmgr->gfx_arbiter.mclk) | |
2785 | smu7_ps->performance_levels[1].memory_clock = | |
2786 | hwmgr->gfx_arbiter.mclk_over_drive; | |
2787 | } | |
2788 | ||
2789 | disable_mclk_switching_for_frame_lock = phm_cap_enabled( | |
2790 | hwmgr->platform_descriptor.platformCaps, | |
2791 | PHM_PlatformCaps_DisableMclkSwitchingForFrameLock); | |
2792 | ||
2793 | ||
09be4a52 AD |
2794 | disable_mclk_switching = ((1 < info.display_count) || |
2795 | disable_mclk_switching_for_frame_lock || | |
2275a3a2 AD |
2796 | smu7_vblank_too_short(hwmgr, mode_info.vblank_time_us) || |
2797 | (mode_info.refresh_rate > 120)); | |
599a7e9f RZ |
2798 | |
2799 | sclk = smu7_ps->performance_levels[0].engine_clock; | |
2800 | mclk = smu7_ps->performance_levels[0].memory_clock; | |
2801 | ||
2802 | if (disable_mclk_switching) | |
2803 | mclk = smu7_ps->performance_levels | |
2804 | [smu7_ps->performance_level_count - 1].memory_clock; | |
2805 | ||
2806 | if (sclk < minimum_clocks.engineClock) | |
2807 | sclk = (minimum_clocks.engineClock > max_limits->sclk) ? | |
2808 | max_limits->sclk : minimum_clocks.engineClock; | |
2809 | ||
2810 | if (mclk < minimum_clocks.memoryClock) | |
2811 | mclk = (minimum_clocks.memoryClock > max_limits->mclk) ? | |
2812 | max_limits->mclk : minimum_clocks.memoryClock; | |
2813 | ||
2814 | smu7_ps->performance_levels[0].engine_clock = sclk; | |
2815 | smu7_ps->performance_levels[0].memory_clock = mclk; | |
2816 | ||
2817 | smu7_ps->performance_levels[1].engine_clock = | |
2818 | (smu7_ps->performance_levels[1].engine_clock >= | |
2819 | smu7_ps->performance_levels[0].engine_clock) ? | |
2820 | smu7_ps->performance_levels[1].engine_clock : | |
2821 | smu7_ps->performance_levels[0].engine_clock; | |
2822 | ||
2823 | if (disable_mclk_switching) { | |
2824 | if (mclk < smu7_ps->performance_levels[1].memory_clock) | |
2825 | mclk = smu7_ps->performance_levels[1].memory_clock; | |
2826 | ||
2827 | smu7_ps->performance_levels[0].memory_clock = mclk; | |
2828 | smu7_ps->performance_levels[1].memory_clock = mclk; | |
2829 | } else { | |
2830 | if (smu7_ps->performance_levels[1].memory_clock < | |
2831 | smu7_ps->performance_levels[0].memory_clock) | |
2832 | smu7_ps->performance_levels[1].memory_clock = | |
2833 | smu7_ps->performance_levels[0].memory_clock; | |
2834 | } | |
2835 | ||
2836 | if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, | |
2837 | PHM_PlatformCaps_StablePState)) { | |
2838 | for (i = 0; i < smu7_ps->performance_level_count; i++) { | |
2839 | smu7_ps->performance_levels[i].engine_clock = stable_pstate_sclk; | |
2840 | smu7_ps->performance_levels[i].memory_clock = stable_pstate_mclk; | |
2841 | smu7_ps->performance_levels[i].pcie_gen = data->pcie_gen_performance.max; | |
2842 | smu7_ps->performance_levels[i].pcie_lane = data->pcie_gen_performance.max; | |
2843 | } | |
2844 | } | |
2845 | return 0; | |
2846 | } | |
2847 | ||
2848 | ||
2849 | static int smu7_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low) | |
2850 | { | |
2851 | struct pp_power_state *ps; | |
2852 | struct smu7_power_state *smu7_ps; | |
2853 | ||
2854 | if (hwmgr == NULL) | |
2855 | return -EINVAL; | |
2856 | ||
2857 | ps = hwmgr->request_ps; | |
2858 | ||
2859 | if (ps == NULL) | |
2860 | return -EINVAL; | |
2861 | ||
2862 | smu7_ps = cast_phw_smu7_power_state(&ps->hardware); | |
2863 | ||
2864 | if (low) | |
2865 | return smu7_ps->performance_levels[0].memory_clock; | |
2866 | else | |
2867 | return smu7_ps->performance_levels | |
2868 | [smu7_ps->performance_level_count-1].memory_clock; | |
2869 | } | |
2870 | ||
2871 | static int smu7_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low) | |
2872 | { | |
2873 | struct pp_power_state *ps; | |
2874 | struct smu7_power_state *smu7_ps; | |
2875 | ||
2876 | if (hwmgr == NULL) | |
2877 | return -EINVAL; | |
2878 | ||
2879 | ps = hwmgr->request_ps; | |
2880 | ||
2881 | if (ps == NULL) | |
2882 | return -EINVAL; | |
2883 | ||
2884 | smu7_ps = cast_phw_smu7_power_state(&ps->hardware); | |
2885 | ||
2886 | if (low) | |
2887 | return smu7_ps->performance_levels[0].engine_clock; | |
2888 | else | |
2889 | return smu7_ps->performance_levels | |
2890 | [smu7_ps->performance_level_count-1].engine_clock; | |
2891 | } | |
2892 | ||
2893 | static int smu7_dpm_patch_boot_state(struct pp_hwmgr *hwmgr, | |
2894 | struct pp_hw_power_state *hw_ps) | |
2895 | { | |
2896 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
2897 | struct smu7_power_state *ps = (struct smu7_power_state *)hw_ps; | |
2898 | ATOM_FIRMWARE_INFO_V2_2 *fw_info; | |
2899 | uint16_t size; | |
2900 | uint8_t frev, crev; | |
2901 | int index = GetIndexIntoMasterTable(DATA, FirmwareInfo); | |
2902 | ||
2903 | /* First retrieve the Boot clocks and VDDC from the firmware info table. | |
2904 | * We assume here that fw_info is unchanged if this call fails. | |
2905 | */ | |
2906 | fw_info = (ATOM_FIRMWARE_INFO_V2_2 *)cgs_atom_get_data_table( | |
2907 | hwmgr->device, index, | |
2908 | &size, &frev, &crev); | |
2909 | if (!fw_info) | |
2910 | /* During a test, there is no firmware info table. */ | |
2911 | return 0; | |
2912 | ||
2913 | /* Patch the state. */ | |
2914 | data->vbios_boot_state.sclk_bootup_value = | |
2915 | le32_to_cpu(fw_info->ulDefaultEngineClock); | |
2916 | data->vbios_boot_state.mclk_bootup_value = | |
2917 | le32_to_cpu(fw_info->ulDefaultMemoryClock); | |
2918 | data->vbios_boot_state.mvdd_bootup_value = | |
2919 | le16_to_cpu(fw_info->usBootUpMVDDCVoltage); | |
2920 | data->vbios_boot_state.vddc_bootup_value = | |
2921 | le16_to_cpu(fw_info->usBootUpVDDCVoltage); | |
2922 | data->vbios_boot_state.vddci_bootup_value = | |
2923 | le16_to_cpu(fw_info->usBootUpVDDCIVoltage); | |
2924 | data->vbios_boot_state.pcie_gen_bootup_value = | |
2925 | smu7_get_current_pcie_speed(hwmgr); | |
2926 | ||
2927 | data->vbios_boot_state.pcie_lane_bootup_value = | |
2928 | (uint16_t)smu7_get_current_pcie_lane_number(hwmgr); | |
2929 | ||
2930 | /* set boot power state */ | |
2931 | ps->performance_levels[0].memory_clock = data->vbios_boot_state.mclk_bootup_value; | |
2932 | ps->performance_levels[0].engine_clock = data->vbios_boot_state.sclk_bootup_value; | |
2933 | ps->performance_levels[0].pcie_gen = data->vbios_boot_state.pcie_gen_bootup_value; | |
2934 | ps->performance_levels[0].pcie_lane = data->vbios_boot_state.pcie_lane_bootup_value; | |
2935 | ||
2936 | return 0; | |
2937 | } | |
2938 | ||
2939 | static int smu7_get_number_of_powerplay_table_entries(struct pp_hwmgr *hwmgr) | |
2940 | { | |
2941 | int result; | |
2942 | unsigned long ret = 0; | |
2943 | ||
2944 | if (hwmgr->pp_table_version == PP_TABLE_V0) { | |
2945 | result = pp_tables_get_num_of_entries(hwmgr, &ret); | |
2946 | return result ? 0 : ret; | |
2947 | } else if (hwmgr->pp_table_version == PP_TABLE_V1) { | |
2948 | result = get_number_of_powerplay_table_entries_v1_0(hwmgr); | |
2949 | return result; | |
2950 | } | |
2951 | return 0; | |
2952 | } | |
2953 | ||
2954 | static int smu7_get_pp_table_entry_callback_func_v1(struct pp_hwmgr *hwmgr, | |
2955 | void *state, struct pp_power_state *power_state, | |
2956 | void *pp_table, uint32_t classification_flag) | |
2957 | { | |
2958 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
2959 | struct smu7_power_state *smu7_power_state = | |
2960 | (struct smu7_power_state *)(&(power_state->hardware)); | |
2961 | struct smu7_performance_level *performance_level; | |
2962 | ATOM_Tonga_State *state_entry = (ATOM_Tonga_State *)state; | |
2963 | ATOM_Tonga_POWERPLAYTABLE *powerplay_table = | |
2964 | (ATOM_Tonga_POWERPLAYTABLE *)pp_table; | |
2965 | PPTable_Generic_SubTable_Header *sclk_dep_table = | |
2966 | (PPTable_Generic_SubTable_Header *) | |
2967 | (((unsigned long)powerplay_table) + | |
2968 | le16_to_cpu(powerplay_table->usSclkDependencyTableOffset)); | |
2969 | ||
2970 | ATOM_Tonga_MCLK_Dependency_Table *mclk_dep_table = | |
2971 | (ATOM_Tonga_MCLK_Dependency_Table *) | |
2972 | (((unsigned long)powerplay_table) + | |
2973 | le16_to_cpu(powerplay_table->usMclkDependencyTableOffset)); | |
2974 | ||
2975 | /* The following fields are not initialized here: id orderedList allStatesList */ | |
2976 | power_state->classification.ui_label = | |
2977 | (le16_to_cpu(state_entry->usClassification) & | |
2978 | ATOM_PPLIB_CLASSIFICATION_UI_MASK) >> | |
2979 | ATOM_PPLIB_CLASSIFICATION_UI_SHIFT; | |
2980 | power_state->classification.flags = classification_flag; | |
2981 | /* NOTE: There is a classification2 flag in BIOS that is not being used right now */ | |
2982 | ||
2983 | power_state->classification.temporary_state = false; | |
2984 | power_state->classification.to_be_deleted = false; | |
2985 | ||
2986 | power_state->validation.disallowOnDC = | |
2987 | (0 != (le32_to_cpu(state_entry->ulCapsAndSettings) & | |
2988 | ATOM_Tonga_DISALLOW_ON_DC)); | |
2989 | ||
2990 | power_state->pcie.lanes = 0; | |
2991 | ||
2992 | power_state->display.disableFrameModulation = false; | |
2993 | power_state->display.limitRefreshrate = false; | |
2994 | power_state->display.enableVariBright = | |
2995 | (0 != (le32_to_cpu(state_entry->ulCapsAndSettings) & | |
2996 | ATOM_Tonga_ENABLE_VARIBRIGHT)); | |
2997 | ||
2998 | power_state->validation.supportedPowerLevels = 0; | |
2999 | power_state->uvd_clocks.VCLK = 0; | |
3000 | power_state->uvd_clocks.DCLK = 0; | |
3001 | power_state->temperatures.min = 0; | |
3002 | power_state->temperatures.max = 0; | |
3003 | ||
3004 | performance_level = &(smu7_power_state->performance_levels | |
3005 | [smu7_power_state->performance_level_count++]); | |
3006 | ||
3007 | PP_ASSERT_WITH_CODE( | |
3008 | (smu7_power_state->performance_level_count < smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_GRAPHICS)), | |
3009 | "Performance levels exceeds SMC limit!", | |
3010 | return -EINVAL); | |
3011 | ||
3012 | PP_ASSERT_WITH_CODE( | |
3013 | (smu7_power_state->performance_level_count <= | |
3014 | hwmgr->platform_descriptor.hardwareActivityPerformanceLevels), | |
3015 | "Performance levels exceeds Driver limit!", | |
3016 | return -EINVAL); | |
3017 | ||
3018 | /* Performance levels are arranged from low to high. */ | |
3019 | performance_level->memory_clock = mclk_dep_table->entries | |
3020 | [state_entry->ucMemoryClockIndexLow].ulMclk; | |
3021 | if (sclk_dep_table->ucRevId == 0) | |
3022 | performance_level->engine_clock = ((ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table)->entries | |
3023 | [state_entry->ucEngineClockIndexLow].ulSclk; | |
3024 | else if (sclk_dep_table->ucRevId == 1) | |
3025 | performance_level->engine_clock = ((ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table)->entries | |
3026 | [state_entry->ucEngineClockIndexLow].ulSclk; | |
3027 | performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap, | |
3028 | state_entry->ucPCIEGenLow); | |
3029 | performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap, | |
3030 | state_entry->ucPCIELaneHigh); | |
3031 | ||
3032 | performance_level = &(smu7_power_state->performance_levels | |
3033 | [smu7_power_state->performance_level_count++]); | |
3034 | performance_level->memory_clock = mclk_dep_table->entries | |
3035 | [state_entry->ucMemoryClockIndexHigh].ulMclk; | |
3036 | ||
3037 | if (sclk_dep_table->ucRevId == 0) | |
3038 | performance_level->engine_clock = ((ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table)->entries | |
3039 | [state_entry->ucEngineClockIndexHigh].ulSclk; | |
3040 | else if (sclk_dep_table->ucRevId == 1) | |
3041 | performance_level->engine_clock = ((ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table)->entries | |
3042 | [state_entry->ucEngineClockIndexHigh].ulSclk; | |
3043 | ||
3044 | performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap, | |
3045 | state_entry->ucPCIEGenHigh); | |
3046 | performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap, | |
3047 | state_entry->ucPCIELaneHigh); | |
3048 | ||
3049 | return 0; | |
3050 | } | |
3051 | ||
3052 | static int smu7_get_pp_table_entry_v1(struct pp_hwmgr *hwmgr, | |
3053 | unsigned long entry_index, struct pp_power_state *state) | |
3054 | { | |
3055 | int result; | |
3056 | struct smu7_power_state *ps; | |
3057 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
3058 | struct phm_ppt_v1_information *table_info = | |
3059 | (struct phm_ppt_v1_information *)(hwmgr->pptable); | |
3060 | struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table = | |
3061 | table_info->vdd_dep_on_mclk; | |
3062 | ||
3063 | state->hardware.magic = PHM_VIslands_Magic; | |
3064 | ||
3065 | ps = (struct smu7_power_state *)(&state->hardware); | |
3066 | ||
3067 | result = get_powerplay_table_entry_v1_0(hwmgr, entry_index, state, | |
3068 | smu7_get_pp_table_entry_callback_func_v1); | |
3069 | ||
3070 | /* This is the earliest time we have all the dependency table and the VBIOS boot state | |
3071 | * as PP_Tables_GetPowerPlayTableEntry retrieves the VBIOS boot state | |
3072 | * if there is only one VDDCI/MCLK level, check if it's the same as VBIOS boot state | |
3073 | */ | |
3074 | if (dep_mclk_table != NULL && dep_mclk_table->count == 1) { | |
3075 | if (dep_mclk_table->entries[0].clk != | |
3076 | data->vbios_boot_state.mclk_bootup_value) | |
b5c11b8e | 3077 | pr_err("Single MCLK entry VDDCI/MCLK dependency table " |
599a7e9f RZ |
3078 | "does not match VBIOS boot MCLK level"); |
3079 | if (dep_mclk_table->entries[0].vddci != | |
3080 | data->vbios_boot_state.vddci_bootup_value) | |
b5c11b8e | 3081 | pr_err("Single VDDCI entry VDDCI/MCLK dependency table " |
599a7e9f RZ |
3082 | "does not match VBIOS boot VDDCI level"); |
3083 | } | |
3084 | ||
3085 | /* set DC compatible flag if this state supports DC */ | |
3086 | if (!state->validation.disallowOnDC) | |
3087 | ps->dc_compatible = true; | |
3088 | ||
3089 | if (state->classification.flags & PP_StateClassificationFlag_ACPI) | |
3090 | data->acpi_pcie_gen = ps->performance_levels[0].pcie_gen; | |
3091 | ||
3092 | ps->uvd_clks.vclk = state->uvd_clocks.VCLK; | |
3093 | ps->uvd_clks.dclk = state->uvd_clocks.DCLK; | |
3094 | ||
3095 | if (!result) { | |
3096 | uint32_t i; | |
3097 | ||
3098 | switch (state->classification.ui_label) { | |
3099 | case PP_StateUILabel_Performance: | |
3100 | data->use_pcie_performance_levels = true; | |
3101 | for (i = 0; i < ps->performance_level_count; i++) { | |
3102 | if (data->pcie_gen_performance.max < | |
3103 | ps->performance_levels[i].pcie_gen) | |
3104 | data->pcie_gen_performance.max = | |
3105 | ps->performance_levels[i].pcie_gen; | |
3106 | ||
3107 | if (data->pcie_gen_performance.min > | |
3108 | ps->performance_levels[i].pcie_gen) | |
3109 | data->pcie_gen_performance.min = | |
3110 | ps->performance_levels[i].pcie_gen; | |
3111 | ||
3112 | if (data->pcie_lane_performance.max < | |
3113 | ps->performance_levels[i].pcie_lane) | |
3114 | data->pcie_lane_performance.max = | |
3115 | ps->performance_levels[i].pcie_lane; | |
3116 | if (data->pcie_lane_performance.min > | |
3117 | ps->performance_levels[i].pcie_lane) | |
3118 | data->pcie_lane_performance.min = | |
3119 | ps->performance_levels[i].pcie_lane; | |
3120 | } | |
3121 | break; | |
3122 | case PP_StateUILabel_Battery: | |
3123 | data->use_pcie_power_saving_levels = true; | |
3124 | ||
3125 | for (i = 0; i < ps->performance_level_count; i++) { | |
3126 | if (data->pcie_gen_power_saving.max < | |
3127 | ps->performance_levels[i].pcie_gen) | |
3128 | data->pcie_gen_power_saving.max = | |
3129 | ps->performance_levels[i].pcie_gen; | |
3130 | ||
3131 | if (data->pcie_gen_power_saving.min > | |
3132 | ps->performance_levels[i].pcie_gen) | |
3133 | data->pcie_gen_power_saving.min = | |
3134 | ps->performance_levels[i].pcie_gen; | |
3135 | ||
3136 | if (data->pcie_lane_power_saving.max < | |
3137 | ps->performance_levels[i].pcie_lane) | |
3138 | data->pcie_lane_power_saving.max = | |
3139 | ps->performance_levels[i].pcie_lane; | |
3140 | ||
3141 | if (data->pcie_lane_power_saving.min > | |
3142 | ps->performance_levels[i].pcie_lane) | |
3143 | data->pcie_lane_power_saving.min = | |
3144 | ps->performance_levels[i].pcie_lane; | |
3145 | } | |
3146 | break; | |
3147 | default: | |
3148 | break; | |
3149 | } | |
3150 | } | |
3151 | return 0; | |
3152 | } | |
3153 | ||
3154 | static int smu7_get_pp_table_entry_callback_func_v0(struct pp_hwmgr *hwmgr, | |
3155 | struct pp_hw_power_state *power_state, | |
3156 | unsigned int index, const void *clock_info) | |
3157 | { | |
3158 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
3159 | struct smu7_power_state *ps = cast_phw_smu7_power_state(power_state); | |
3160 | const ATOM_PPLIB_CI_CLOCK_INFO *visland_clk_info = clock_info; | |
3161 | struct smu7_performance_level *performance_level; | |
3162 | uint32_t engine_clock, memory_clock; | |
3163 | uint16_t pcie_gen_from_bios; | |
3164 | ||
3165 | engine_clock = visland_clk_info->ucEngineClockHigh << 16 | visland_clk_info->usEngineClockLow; | |
3166 | memory_clock = visland_clk_info->ucMemoryClockHigh << 16 | visland_clk_info->usMemoryClockLow; | |
3167 | ||
3168 | if (!(data->mc_micro_code_feature & DISABLE_MC_LOADMICROCODE) && memory_clock > data->highest_mclk) | |
3169 | data->highest_mclk = memory_clock; | |
3170 | ||
599a7e9f RZ |
3171 | PP_ASSERT_WITH_CODE( |
3172 | (ps->performance_level_count < smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_GRAPHICS)), | |
3173 | "Performance levels exceeds SMC limit!", | |
3174 | return -EINVAL); | |
3175 | ||
3176 | PP_ASSERT_WITH_CODE( | |
da7800a8 | 3177 | (ps->performance_level_count < |
599a7e9f | 3178 | hwmgr->platform_descriptor.hardwareActivityPerformanceLevels), |
da7800a8 RZ |
3179 | "Performance levels exceeds Driver limit, Skip!", |
3180 | return 0); | |
3181 | ||
3182 | performance_level = &(ps->performance_levels | |
3183 | [ps->performance_level_count++]); | |
599a7e9f RZ |
3184 | |
3185 | /* Performance levels are arranged from low to high. */ | |
3186 | performance_level->memory_clock = memory_clock; | |
3187 | performance_level->engine_clock = engine_clock; | |
3188 | ||
3189 | pcie_gen_from_bios = visland_clk_info->ucPCIEGen; | |
3190 | ||
3191 | performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap, pcie_gen_from_bios); | |
3192 | performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap, visland_clk_info->usPCIELane); | |
3193 | ||
3194 | return 0; | |
3195 | } | |
3196 | ||
3197 | static int smu7_get_pp_table_entry_v0(struct pp_hwmgr *hwmgr, | |
3198 | unsigned long entry_index, struct pp_power_state *state) | |
3199 | { | |
3200 | int result; | |
3201 | struct smu7_power_state *ps; | |
3202 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
3203 | struct phm_clock_voltage_dependency_table *dep_mclk_table = | |
3204 | hwmgr->dyn_state.vddci_dependency_on_mclk; | |
3205 | ||
3206 | memset(&state->hardware, 0x00, sizeof(struct pp_hw_power_state)); | |
3207 | ||
3208 | state->hardware.magic = PHM_VIslands_Magic; | |
3209 | ||
3210 | ps = (struct smu7_power_state *)(&state->hardware); | |
3211 | ||
3212 | result = pp_tables_get_entry(hwmgr, entry_index, state, | |
3213 | smu7_get_pp_table_entry_callback_func_v0); | |
3214 | ||
3215 | /* | |
3216 | * This is the earliest time we have all the dependency table | |
3217 | * and the VBIOS boot state as | |
3218 | * PP_Tables_GetPowerPlayTableEntry retrieves the VBIOS boot | |
3219 | * state if there is only one VDDCI/MCLK level, check if it's | |
3220 | * the same as VBIOS boot state | |
3221 | */ | |
3222 | if (dep_mclk_table != NULL && dep_mclk_table->count == 1) { | |
3223 | if (dep_mclk_table->entries[0].clk != | |
3224 | data->vbios_boot_state.mclk_bootup_value) | |
b5c11b8e | 3225 | pr_err("Single MCLK entry VDDCI/MCLK dependency table " |
599a7e9f RZ |
3226 | "does not match VBIOS boot MCLK level"); |
3227 | if (dep_mclk_table->entries[0].v != | |
3228 | data->vbios_boot_state.vddci_bootup_value) | |
b5c11b8e | 3229 | pr_err("Single VDDCI entry VDDCI/MCLK dependency table " |
599a7e9f RZ |
3230 | "does not match VBIOS boot VDDCI level"); |
3231 | } | |
3232 | ||
3233 | /* set DC compatible flag if this state supports DC */ | |
3234 | if (!state->validation.disallowOnDC) | |
3235 | ps->dc_compatible = true; | |
3236 | ||
3237 | if (state->classification.flags & PP_StateClassificationFlag_ACPI) | |
3238 | data->acpi_pcie_gen = ps->performance_levels[0].pcie_gen; | |
3239 | ||
3240 | ps->uvd_clks.vclk = state->uvd_clocks.VCLK; | |
3241 | ps->uvd_clks.dclk = state->uvd_clocks.DCLK; | |
3242 | ||
3243 | if (!result) { | |
3244 | uint32_t i; | |
3245 | ||
3246 | switch (state->classification.ui_label) { | |
3247 | case PP_StateUILabel_Performance: | |
3248 | data->use_pcie_performance_levels = true; | |
3249 | ||
3250 | for (i = 0; i < ps->performance_level_count; i++) { | |
3251 | if (data->pcie_gen_performance.max < | |
3252 | ps->performance_levels[i].pcie_gen) | |
3253 | data->pcie_gen_performance.max = | |
3254 | ps->performance_levels[i].pcie_gen; | |
3255 | ||
3256 | if (data->pcie_gen_performance.min > | |
3257 | ps->performance_levels[i].pcie_gen) | |
3258 | data->pcie_gen_performance.min = | |
3259 | ps->performance_levels[i].pcie_gen; | |
3260 | ||
3261 | if (data->pcie_lane_performance.max < | |
3262 | ps->performance_levels[i].pcie_lane) | |
3263 | data->pcie_lane_performance.max = | |
3264 | ps->performance_levels[i].pcie_lane; | |
3265 | ||
3266 | if (data->pcie_lane_performance.min > | |
3267 | ps->performance_levels[i].pcie_lane) | |
3268 | data->pcie_lane_performance.min = | |
3269 | ps->performance_levels[i].pcie_lane; | |
3270 | } | |
3271 | break; | |
3272 | case PP_StateUILabel_Battery: | |
3273 | data->use_pcie_power_saving_levels = true; | |
3274 | ||
3275 | for (i = 0; i < ps->performance_level_count; i++) { | |
3276 | if (data->pcie_gen_power_saving.max < | |
3277 | ps->performance_levels[i].pcie_gen) | |
3278 | data->pcie_gen_power_saving.max = | |
3279 | ps->performance_levels[i].pcie_gen; | |
3280 | ||
3281 | if (data->pcie_gen_power_saving.min > | |
3282 | ps->performance_levels[i].pcie_gen) | |
3283 | data->pcie_gen_power_saving.min = | |
3284 | ps->performance_levels[i].pcie_gen; | |
3285 | ||
3286 | if (data->pcie_lane_power_saving.max < | |
3287 | ps->performance_levels[i].pcie_lane) | |
3288 | data->pcie_lane_power_saving.max = | |
3289 | ps->performance_levels[i].pcie_lane; | |
3290 | ||
3291 | if (data->pcie_lane_power_saving.min > | |
3292 | ps->performance_levels[i].pcie_lane) | |
3293 | data->pcie_lane_power_saving.min = | |
3294 | ps->performance_levels[i].pcie_lane; | |
3295 | } | |
3296 | break; | |
3297 | default: | |
3298 | break; | |
3299 | } | |
3300 | } | |
3301 | return 0; | |
3302 | } | |
3303 | ||
3304 | static int smu7_get_pp_table_entry(struct pp_hwmgr *hwmgr, | |
3305 | unsigned long entry_index, struct pp_power_state *state) | |
3306 | { | |
3307 | if (hwmgr->pp_table_version == PP_TABLE_V0) | |
3308 | return smu7_get_pp_table_entry_v0(hwmgr, entry_index, state); | |
3309 | else if (hwmgr->pp_table_version == PP_TABLE_V1) | |
3310 | return smu7_get_pp_table_entry_v1(hwmgr, entry_index, state); | |
3311 | ||
3312 | return 0; | |
3313 | } | |
3314 | ||
2245b60f EH |
3315 | static int smu7_get_gpu_power(struct pp_hwmgr *hwmgr, |
3316 | struct pp_gpu_power *query) | |
3317 | { | |
3318 | PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr->smumgr, | |
3319 | PPSMC_MSG_PmStatusLogStart), | |
3320 | "Failed to start pm status log!", | |
3321 | return -1); | |
3322 | ||
a7c7bc4c | 3323 | msleep_interruptible(20); |
2245b60f EH |
3324 | |
3325 | PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr->smumgr, | |
3326 | PPSMC_MSG_PmStatusLogSample), | |
3327 | "Failed to sample pm status log!", | |
3328 | return -1); | |
3329 | ||
3330 | query->vddc_power = cgs_read_ind_register(hwmgr->device, | |
3331 | CGS_IND_REG__SMC, | |
3332 | ixSMU_PM_STATUS_40); | |
3333 | query->vddci_power = cgs_read_ind_register(hwmgr->device, | |
3334 | CGS_IND_REG__SMC, | |
3335 | ixSMU_PM_STATUS_49); | |
3336 | query->max_gpu_power = cgs_read_ind_register(hwmgr->device, | |
3337 | CGS_IND_REG__SMC, | |
3338 | ixSMU_PM_STATUS_94); | |
3339 | query->average_gpu_power = cgs_read_ind_register(hwmgr->device, | |
3340 | CGS_IND_REG__SMC, | |
3341 | ixSMU_PM_STATUS_95); | |
3342 | ||
3343 | return 0; | |
3344 | } | |
3345 | ||
9f8df7d7 TSD |
3346 | static int smu7_read_sensor(struct pp_hwmgr *hwmgr, int idx, |
3347 | void *value, int *size) | |
a6e36952 TSD |
3348 | { |
3349 | uint32_t sclk, mclk, activity_percent; | |
3350 | uint32_t offset; | |
3351 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
3352 | ||
9f8df7d7 TSD |
3353 | /* size must be at least 4 bytes for all sensors */ |
3354 | if (*size < 4) | |
3355 | return -EINVAL; | |
3356 | ||
a6e36952 TSD |
3357 | switch (idx) { |
3358 | case AMDGPU_PP_SENSOR_GFX_SCLK: | |
3359 | smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetSclkFrequency); | |
3360 | sclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0); | |
cd7b0c66 | 3361 | *((uint32_t *)value) = sclk; |
9f8df7d7 | 3362 | *size = 4; |
a6e36952 TSD |
3363 | return 0; |
3364 | case AMDGPU_PP_SENSOR_GFX_MCLK: | |
3365 | smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetMclkFrequency); | |
3366 | mclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0); | |
cd7b0c66 | 3367 | *((uint32_t *)value) = mclk; |
9f8df7d7 | 3368 | *size = 4; |
a6e36952 TSD |
3369 | return 0; |
3370 | case AMDGPU_PP_SENSOR_GPU_LOAD: | |
3371 | offset = data->soft_regs_start + smum_get_offsetof(hwmgr->smumgr, | |
3372 | SMU_SoftRegisters, | |
3373 | AverageGraphicsActivity); | |
3374 | ||
3375 | activity_percent = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset); | |
3376 | activity_percent += 0x80; | |
3377 | activity_percent >>= 8; | |
cd7b0c66 | 3378 | *((uint32_t *)value) = activity_percent > 100 ? 100 : activity_percent; |
9f8df7d7 | 3379 | *size = 4; |
a6e36952 TSD |
3380 | return 0; |
3381 | case AMDGPU_PP_SENSOR_GPU_TEMP: | |
cd7b0c66 | 3382 | *((uint32_t *)value) = smu7_thermal_get_temperature(hwmgr); |
9f8df7d7 | 3383 | *size = 4; |
a6e36952 | 3384 | return 0; |
3de4ec57 | 3385 | case AMDGPU_PP_SENSOR_UVD_POWER: |
cd7b0c66 | 3386 | *((uint32_t *)value) = data->uvd_power_gated ? 0 : 1; |
9f8df7d7 | 3387 | *size = 4; |
3de4ec57 TSD |
3388 | return 0; |
3389 | case AMDGPU_PP_SENSOR_VCE_POWER: | |
cd7b0c66 | 3390 | *((uint32_t *)value) = data->vce_power_gated ? 0 : 1; |
9f8df7d7 | 3391 | *size = 4; |
3de4ec57 | 3392 | return 0; |
2245b60f | 3393 | case AMDGPU_PP_SENSOR_GPU_POWER: |
9f8df7d7 TSD |
3394 | if (*size < sizeof(struct pp_gpu_power)) |
3395 | return -EINVAL; | |
3396 | *size = sizeof(struct pp_gpu_power); | |
2245b60f | 3397 | return smu7_get_gpu_power(hwmgr, (struct pp_gpu_power *)value); |
a6e36952 TSD |
3398 | default: |
3399 | return -EINVAL; | |
3400 | } | |
3401 | } | |
3402 | ||
599a7e9f RZ |
3403 | static int smu7_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, const void *input) |
3404 | { | |
3405 | const struct phm_set_power_state_input *states = | |
3406 | (const struct phm_set_power_state_input *)input; | |
3407 | const struct smu7_power_state *smu7_ps = | |
3408 | cast_const_phw_smu7_power_state(states->pnew_state); | |
3409 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
3410 | struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table); | |
3411 | uint32_t sclk = smu7_ps->performance_levels | |
3412 | [smu7_ps->performance_level_count - 1].engine_clock; | |
3413 | struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table); | |
3414 | uint32_t mclk = smu7_ps->performance_levels | |
3415 | [smu7_ps->performance_level_count - 1].memory_clock; | |
3416 | struct PP_Clocks min_clocks = {0}; | |
3417 | uint32_t i; | |
3418 | struct cgs_display_info info = {0}; | |
3419 | ||
3420 | data->need_update_smu7_dpm_table = 0; | |
3421 | ||
3422 | for (i = 0; i < sclk_table->count; i++) { | |
3423 | if (sclk == sclk_table->dpm_levels[i].value) | |
3424 | break; | |
3425 | } | |
3426 | ||
3427 | if (i >= sclk_table->count) | |
3428 | data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK; | |
3429 | else { | |
3430 | /* TODO: Check SCLK in DAL's minimum clocks | |
3431 | * in case DeepSleep divider update is required. | |
3432 | */ | |
3433 | if (data->display_timing.min_clock_in_sr != min_clocks.engineClockInSR && | |
3434 | (min_clocks.engineClockInSR >= SMU7_MINIMUM_ENGINE_CLOCK || | |
3435 | data->display_timing.min_clock_in_sr >= SMU7_MINIMUM_ENGINE_CLOCK)) | |
3436 | data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK; | |
3437 | } | |
3438 | ||
3439 | for (i = 0; i < mclk_table->count; i++) { | |
3440 | if (mclk == mclk_table->dpm_levels[i].value) | |
3441 | break; | |
3442 | } | |
3443 | ||
3444 | if (i >= mclk_table->count) | |
3445 | data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK; | |
3446 | ||
3447 | cgs_get_active_displays_info(hwmgr->device, &info); | |
3448 | ||
3449 | if (data->display_timing.num_existing_displays != info.display_count) | |
3450 | data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK; | |
3451 | ||
3452 | return 0; | |
3453 | } | |
3454 | ||
3455 | static uint16_t smu7_get_maximum_link_speed(struct pp_hwmgr *hwmgr, | |
3456 | const struct smu7_power_state *smu7_ps) | |
3457 | { | |
3458 | uint32_t i; | |
3459 | uint32_t sclk, max_sclk = 0; | |
3460 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
3461 | struct smu7_dpm_table *dpm_table = &data->dpm_table; | |
3462 | ||
3463 | for (i = 0; i < smu7_ps->performance_level_count; i++) { | |
3464 | sclk = smu7_ps->performance_levels[i].engine_clock; | |
3465 | if (max_sclk < sclk) | |
3466 | max_sclk = sclk; | |
3467 | } | |
3468 | ||
3469 | for (i = 0; i < dpm_table->sclk_table.count; i++) { | |
3470 | if (dpm_table->sclk_table.dpm_levels[i].value == max_sclk) | |
3471 | return (uint16_t) ((i >= dpm_table->pcie_speed_table.count) ? | |
3472 | dpm_table->pcie_speed_table.dpm_levels | |
3473 | [dpm_table->pcie_speed_table.count - 1].value : | |
3474 | dpm_table->pcie_speed_table.dpm_levels[i].value); | |
3475 | } | |
3476 | ||
3477 | return 0; | |
3478 | } | |
3479 | ||
3480 | static int smu7_request_link_speed_change_before_state_change( | |
3481 | struct pp_hwmgr *hwmgr, const void *input) | |
3482 | { | |
3483 | const struct phm_set_power_state_input *states = | |
3484 | (const struct phm_set_power_state_input *)input; | |
3485 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
3486 | const struct smu7_power_state *smu7_nps = | |
3487 | cast_const_phw_smu7_power_state(states->pnew_state); | |
3488 | const struct smu7_power_state *polaris10_cps = | |
3489 | cast_const_phw_smu7_power_state(states->pcurrent_state); | |
3490 | ||
3491 | uint16_t target_link_speed = smu7_get_maximum_link_speed(hwmgr, smu7_nps); | |
3492 | uint16_t current_link_speed; | |
3493 | ||
3494 | if (data->force_pcie_gen == PP_PCIEGenInvalid) | |
3495 | current_link_speed = smu7_get_maximum_link_speed(hwmgr, polaris10_cps); | |
3496 | else | |
3497 | current_link_speed = data->force_pcie_gen; | |
3498 | ||
3499 | data->force_pcie_gen = PP_PCIEGenInvalid; | |
3500 | data->pspp_notify_required = false; | |
3501 | ||
3502 | if (target_link_speed > current_link_speed) { | |
3503 | switch (target_link_speed) { | |
3504 | case PP_PCIEGen3: | |
3505 | if (0 == acpi_pcie_perf_request(hwmgr->device, PCIE_PERF_REQ_GEN3, false)) | |
3506 | break; | |
3507 | data->force_pcie_gen = PP_PCIEGen2; | |
3508 | if (current_link_speed == PP_PCIEGen2) | |
3509 | break; | |
3510 | case PP_PCIEGen2: | |
3511 | if (0 == acpi_pcie_perf_request(hwmgr->device, PCIE_PERF_REQ_GEN2, false)) | |
3512 | break; | |
3513 | default: | |
3514 | data->force_pcie_gen = smu7_get_current_pcie_speed(hwmgr); | |
3515 | break; | |
3516 | } | |
3517 | } else { | |
3518 | if (target_link_speed < current_link_speed) | |
3519 | data->pspp_notify_required = true; | |
3520 | } | |
3521 | ||
3522 | return 0; | |
3523 | } | |
3524 | ||
3525 | static int smu7_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) | |
3526 | { | |
3527 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
3528 | ||
3529 | if (0 == data->need_update_smu7_dpm_table) | |
3530 | return 0; | |
3531 | ||
3532 | if ((0 == data->sclk_dpm_key_disabled) && | |
3533 | (data->need_update_smu7_dpm_table & | |
3534 | (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) { | |
3535 | PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr), | |
3536 | "Trying to freeze SCLK DPM when DPM is disabled", | |
3537 | ); | |
3538 | PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr, | |
3539 | PPSMC_MSG_SCLKDPM_FreezeLevel), | |
3540 | "Failed to freeze SCLK DPM during FreezeSclkMclkDPM Function!", | |
3541 | return -EINVAL); | |
3542 | } | |
3543 | ||
3544 | if ((0 == data->mclk_dpm_key_disabled) && | |
3545 | (data->need_update_smu7_dpm_table & | |
3546 | DPMTABLE_OD_UPDATE_MCLK)) { | |
3547 | PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr), | |
3548 | "Trying to freeze MCLK DPM when DPM is disabled", | |
3549 | ); | |
3550 | PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr, | |
3551 | PPSMC_MSG_MCLKDPM_FreezeLevel), | |
3552 | "Failed to freeze MCLK DPM during FreezeSclkMclkDPM Function!", | |
3553 | return -EINVAL); | |
3554 | } | |
3555 | ||
3556 | return 0; | |
3557 | } | |
3558 | ||
3559 | static int smu7_populate_and_upload_sclk_mclk_dpm_levels( | |
3560 | struct pp_hwmgr *hwmgr, const void *input) | |
3561 | { | |
3562 | int result = 0; | |
3563 | const struct phm_set_power_state_input *states = | |
3564 | (const struct phm_set_power_state_input *)input; | |
3565 | const struct smu7_power_state *smu7_ps = | |
3566 | cast_const_phw_smu7_power_state(states->pnew_state); | |
3567 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
3568 | uint32_t sclk = smu7_ps->performance_levels | |
3569 | [smu7_ps->performance_level_count - 1].engine_clock; | |
3570 | uint32_t mclk = smu7_ps->performance_levels | |
3571 | [smu7_ps->performance_level_count - 1].memory_clock; | |
3572 | struct smu7_dpm_table *dpm_table = &data->dpm_table; | |
3573 | ||
3574 | struct smu7_dpm_table *golden_dpm_table = &data->golden_dpm_table; | |
3575 | uint32_t dpm_count, clock_percent; | |
3576 | uint32_t i; | |
3577 | ||
3578 | if (0 == data->need_update_smu7_dpm_table) | |
3579 | return 0; | |
3580 | ||
3581 | if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK) { | |
3582 | dpm_table->sclk_table.dpm_levels | |
3583 | [dpm_table->sclk_table.count - 1].value = sclk; | |
3584 | ||
3585 | if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinACSupport) || | |
3586 | phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinDCSupport)) { | |
3587 | /* Need to do calculation based on the golden DPM table | |
3588 | * as the Heatmap GPU Clock axis is also based on the default values | |
3589 | */ | |
3590 | PP_ASSERT_WITH_CODE( | |
3591 | (golden_dpm_table->sclk_table.dpm_levels | |
3592 | [golden_dpm_table->sclk_table.count - 1].value != 0), | |
3593 | "Divide by 0!", | |
3594 | return -EINVAL); | |
3595 | dpm_count = dpm_table->sclk_table.count < 2 ? 0 : dpm_table->sclk_table.count - 2; | |
3596 | ||
3597 | for (i = dpm_count; i > 1; i--) { | |
3598 | if (sclk > golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count-1].value) { | |
3599 | clock_percent = | |
3600 | ((sclk | |
3601 | - golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count-1].value | |
3602 | ) * 100) | |
3603 | / golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count-1].value; | |
3604 | ||
3605 | dpm_table->sclk_table.dpm_levels[i].value = | |
3606 | golden_dpm_table->sclk_table.dpm_levels[i].value + | |
3607 | (golden_dpm_table->sclk_table.dpm_levels[i].value * | |
3608 | clock_percent)/100; | |
3609 | ||
3610 | } else if (golden_dpm_table->sclk_table.dpm_levels[dpm_table->sclk_table.count-1].value > sclk) { | |
3611 | clock_percent = | |
3612 | ((golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count - 1].value | |
3613 | - sclk) * 100) | |
3614 | / golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count-1].value; | |
3615 | ||
3616 | dpm_table->sclk_table.dpm_levels[i].value = | |
3617 | golden_dpm_table->sclk_table.dpm_levels[i].value - | |
3618 | (golden_dpm_table->sclk_table.dpm_levels[i].value * | |
3619 | clock_percent) / 100; | |
3620 | } else | |
3621 | dpm_table->sclk_table.dpm_levels[i].value = | |
3622 | golden_dpm_table->sclk_table.dpm_levels[i].value; | |
3623 | } | |
3624 | } | |
3625 | } | |
3626 | ||
3627 | if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK) { | |
3628 | dpm_table->mclk_table.dpm_levels | |
3629 | [dpm_table->mclk_table.count - 1].value = mclk; | |
3630 | ||
3631 | if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinACSupport) || | |
3632 | phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinDCSupport)) { | |
3633 | ||
3634 | PP_ASSERT_WITH_CODE( | |
3635 | (golden_dpm_table->mclk_table.dpm_levels | |
3636 | [golden_dpm_table->mclk_table.count-1].value != 0), | |
3637 | "Divide by 0!", | |
3638 | return -EINVAL); | |
3639 | dpm_count = dpm_table->mclk_table.count < 2 ? 0 : dpm_table->mclk_table.count - 2; | |
3640 | for (i = dpm_count; i > 1; i--) { | |
3641 | if (golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value < mclk) { | |
3642 | clock_percent = ((mclk - | |
3643 | golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value) * 100) | |
3644 | / golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value; | |
3645 | ||
3646 | dpm_table->mclk_table.dpm_levels[i].value = | |
3647 | golden_dpm_table->mclk_table.dpm_levels[i].value + | |
3648 | (golden_dpm_table->mclk_table.dpm_levels[i].value * | |
3649 | clock_percent) / 100; | |
3650 | ||
3651 | } else if (golden_dpm_table->mclk_table.dpm_levels[dpm_table->mclk_table.count-1].value > mclk) { | |
3652 | clock_percent = ( | |
3653 | (golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value - mclk) | |
3654 | * 100) | |
3655 | / golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value; | |
3656 | ||
3657 | dpm_table->mclk_table.dpm_levels[i].value = | |
3658 | golden_dpm_table->mclk_table.dpm_levels[i].value - | |
3659 | (golden_dpm_table->mclk_table.dpm_levels[i].value * | |
3660 | clock_percent) / 100; | |
3661 | } else | |
3662 | dpm_table->mclk_table.dpm_levels[i].value = | |
3663 | golden_dpm_table->mclk_table.dpm_levels[i].value; | |
3664 | } | |
3665 | } | |
3666 | } | |
3667 | ||
3668 | if (data->need_update_smu7_dpm_table & | |
3669 | (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK)) { | |
3670 | result = smum_populate_all_graphic_levels(hwmgr); | |
3671 | PP_ASSERT_WITH_CODE((0 == result), | |
3672 | "Failed to populate SCLK during PopulateNewDPMClocksStates Function!", | |
3673 | return result); | |
3674 | } | |
3675 | ||
3676 | if (data->need_update_smu7_dpm_table & | |
3677 | (DPMTABLE_OD_UPDATE_MCLK + DPMTABLE_UPDATE_MCLK)) { | |
3678 | /*populate MCLK dpm table to SMU7 */ | |
3679 | result = smum_populate_all_memory_levels(hwmgr); | |
3680 | PP_ASSERT_WITH_CODE((0 == result), | |
3681 | "Failed to populate MCLK during PopulateNewDPMClocksStates Function!", | |
3682 | return result); | |
3683 | } | |
3684 | ||
3685 | return result; | |
3686 | } | |
3687 | ||
3688 | static int smu7_trim_single_dpm_states(struct pp_hwmgr *hwmgr, | |
3689 | struct smu7_single_dpm_table *dpm_table, | |
3690 | uint32_t low_limit, uint32_t high_limit) | |
3691 | { | |
3692 | uint32_t i; | |
3693 | ||
3694 | for (i = 0; i < dpm_table->count; i++) { | |
3695 | if ((dpm_table->dpm_levels[i].value < low_limit) | |
3696 | || (dpm_table->dpm_levels[i].value > high_limit)) | |
3697 | dpm_table->dpm_levels[i].enabled = false; | |
3698 | else | |
3699 | dpm_table->dpm_levels[i].enabled = true; | |
3700 | } | |
3701 | ||
3702 | return 0; | |
3703 | } | |
3704 | ||
3705 | static int smu7_trim_dpm_states(struct pp_hwmgr *hwmgr, | |
3706 | const struct smu7_power_state *smu7_ps) | |
3707 | { | |
3708 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
3709 | uint32_t high_limit_count; | |
3710 | ||
3711 | PP_ASSERT_WITH_CODE((smu7_ps->performance_level_count >= 1), | |
3712 | "power state did not have any performance level", | |
3713 | return -EINVAL); | |
3714 | ||
3715 | high_limit_count = (1 == smu7_ps->performance_level_count) ? 0 : 1; | |
3716 | ||
3717 | smu7_trim_single_dpm_states(hwmgr, | |
3718 | &(data->dpm_table.sclk_table), | |
3719 | smu7_ps->performance_levels[0].engine_clock, | |
3720 | smu7_ps->performance_levels[high_limit_count].engine_clock); | |
3721 | ||
3722 | smu7_trim_single_dpm_states(hwmgr, | |
3723 | &(data->dpm_table.mclk_table), | |
3724 | smu7_ps->performance_levels[0].memory_clock, | |
3725 | smu7_ps->performance_levels[high_limit_count].memory_clock); | |
3726 | ||
3727 | return 0; | |
3728 | } | |
3729 | ||
3730 | static int smu7_generate_dpm_level_enable_mask( | |
3731 | struct pp_hwmgr *hwmgr, const void *input) | |
3732 | { | |
3733 | int result; | |
3734 | const struct phm_set_power_state_input *states = | |
3735 | (const struct phm_set_power_state_input *)input; | |
3736 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
3737 | const struct smu7_power_state *smu7_ps = | |
3738 | cast_const_phw_smu7_power_state(states->pnew_state); | |
3739 | ||
3740 | result = smu7_trim_dpm_states(hwmgr, smu7_ps); | |
3741 | if (result) | |
3742 | return result; | |
3743 | ||
3744 | data->dpm_level_enable_mask.sclk_dpm_enable_mask = | |
3745 | phm_get_dpm_level_enable_mask_value(&data->dpm_table.sclk_table); | |
3746 | data->dpm_level_enable_mask.mclk_dpm_enable_mask = | |
3747 | phm_get_dpm_level_enable_mask_value(&data->dpm_table.mclk_table); | |
3748 | data->dpm_level_enable_mask.pcie_dpm_enable_mask = | |
3749 | phm_get_dpm_level_enable_mask_value(&data->dpm_table.pcie_speed_table); | |
3750 | ||
3751 | return 0; | |
3752 | } | |
3753 | ||
3754 | static int smu7_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) | |
3755 | { | |
3756 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
3757 | ||
3758 | if (0 == data->need_update_smu7_dpm_table) | |
3759 | return 0; | |
3760 | ||
3761 | if ((0 == data->sclk_dpm_key_disabled) && | |
3762 | (data->need_update_smu7_dpm_table & | |
3763 | (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) { | |
3764 | ||
3765 | PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr), | |
3766 | "Trying to Unfreeze SCLK DPM when DPM is disabled", | |
3767 | ); | |
3768 | PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr, | |
3769 | PPSMC_MSG_SCLKDPM_UnfreezeLevel), | |
3770 | "Failed to unfreeze SCLK DPM during UnFreezeSclkMclkDPM Function!", | |
3771 | return -EINVAL); | |
3772 | } | |
3773 | ||
3774 | if ((0 == data->mclk_dpm_key_disabled) && | |
3775 | (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) { | |
3776 | ||
3777 | PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr), | |
3778 | "Trying to Unfreeze MCLK DPM when DPM is disabled", | |
3779 | ); | |
3780 | PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr, | |
3781 | PPSMC_MSG_SCLKDPM_UnfreezeLevel), | |
3782 | "Failed to unfreeze MCLK DPM during UnFreezeSclkMclkDPM Function!", | |
3783 | return -EINVAL); | |
3784 | } | |
3785 | ||
3786 | data->need_update_smu7_dpm_table = 0; | |
3787 | ||
3788 | return 0; | |
3789 | } | |
3790 | ||
3791 | static int smu7_notify_link_speed_change_after_state_change( | |
3792 | struct pp_hwmgr *hwmgr, const void *input) | |
3793 | { | |
3794 | const struct phm_set_power_state_input *states = | |
3795 | (const struct phm_set_power_state_input *)input; | |
3796 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
3797 | const struct smu7_power_state *smu7_ps = | |
3798 | cast_const_phw_smu7_power_state(states->pnew_state); | |
3799 | uint16_t target_link_speed = smu7_get_maximum_link_speed(hwmgr, smu7_ps); | |
3800 | uint8_t request; | |
3801 | ||
3802 | if (data->pspp_notify_required) { | |
3803 | if (target_link_speed == PP_PCIEGen3) | |
3804 | request = PCIE_PERF_REQ_GEN3; | |
3805 | else if (target_link_speed == PP_PCIEGen2) | |
3806 | request = PCIE_PERF_REQ_GEN2; | |
3807 | else | |
3808 | request = PCIE_PERF_REQ_GEN1; | |
3809 | ||
3810 | if (request == PCIE_PERF_REQ_GEN1 && | |
3811 | smu7_get_current_pcie_speed(hwmgr) > 0) | |
3812 | return 0; | |
3813 | ||
3814 | if (acpi_pcie_perf_request(hwmgr->device, request, false)) { | |
3815 | if (PP_PCIEGen2 == target_link_speed) | |
b5c11b8e | 3816 | pr_info("PSPP request to switch to Gen2 from Gen3 Failed!"); |
599a7e9f | 3817 | else |
b5c11b8e | 3818 | pr_info("PSPP request to switch to Gen1 from Gen2 Failed!"); |
599a7e9f RZ |
3819 | } |
3820 | } | |
3821 | ||
3822 | return 0; | |
3823 | } | |
3824 | ||
3825 | static int smu7_notify_smc_display(struct pp_hwmgr *hwmgr) | |
3826 | { | |
3827 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
3828 | ||
3829 | if (hwmgr->feature_mask & PP_VBI_TIME_SUPPORT_MASK) | |
3830 | smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, | |
3831 | (PPSMC_Msg)PPSMC_MSG_SetVBITimeout, data->frame_time_x2); | |
3832 | return (smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)PPSMC_HasDisplay) == 0) ? 0 : -EINVAL; | |
3833 | } | |
3834 | ||
3835 | static int smu7_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input) | |
3836 | { | |
3837 | int tmp_result, result = 0; | |
3838 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
3839 | ||
3840 | tmp_result = smu7_find_dpm_states_clocks_in_dpm_table(hwmgr, input); | |
3841 | PP_ASSERT_WITH_CODE((0 == tmp_result), | |
3842 | "Failed to find DPM states clocks in DPM table!", | |
3843 | result = tmp_result); | |
3844 | ||
3845 | if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, | |
3846 | PHM_PlatformCaps_PCIEPerformanceRequest)) { | |
3847 | tmp_result = | |
3848 | smu7_request_link_speed_change_before_state_change(hwmgr, input); | |
3849 | PP_ASSERT_WITH_CODE((0 == tmp_result), | |
3850 | "Failed to request link speed change before state change!", | |
3851 | result = tmp_result); | |
3852 | } | |
3853 | ||
3854 | tmp_result = smu7_freeze_sclk_mclk_dpm(hwmgr); | |
3855 | PP_ASSERT_WITH_CODE((0 == tmp_result), | |
3856 | "Failed to freeze SCLK MCLK DPM!", result = tmp_result); | |
3857 | ||
3858 | tmp_result = smu7_populate_and_upload_sclk_mclk_dpm_levels(hwmgr, input); | |
3859 | PP_ASSERT_WITH_CODE((0 == tmp_result), | |
3860 | "Failed to populate and upload SCLK MCLK DPM levels!", | |
3861 | result = tmp_result); | |
3862 | ||
3863 | tmp_result = smu7_generate_dpm_level_enable_mask(hwmgr, input); | |
3864 | PP_ASSERT_WITH_CODE((0 == tmp_result), | |
3865 | "Failed to generate DPM level enabled mask!", | |
3866 | result = tmp_result); | |
3867 | ||
3868 | tmp_result = smum_update_sclk_threshold(hwmgr); | |
3869 | PP_ASSERT_WITH_CODE((0 == tmp_result), | |
3870 | "Failed to update SCLK threshold!", | |
3871 | result = tmp_result); | |
3872 | ||
3873 | tmp_result = smu7_notify_smc_display(hwmgr); | |
3874 | PP_ASSERT_WITH_CODE((0 == tmp_result), | |
3875 | "Failed to notify smc display settings!", | |
3876 | result = tmp_result); | |
3877 | ||
3878 | tmp_result = smu7_unfreeze_sclk_mclk_dpm(hwmgr); | |
3879 | PP_ASSERT_WITH_CODE((0 == tmp_result), | |
3880 | "Failed to unfreeze SCLK MCLK DPM!", | |
3881 | result = tmp_result); | |
3882 | ||
3883 | tmp_result = smu7_upload_dpm_level_enable_mask(hwmgr); | |
3884 | PP_ASSERT_WITH_CODE((0 == tmp_result), | |
3885 | "Failed to upload DPM level enabled mask!", | |
3886 | result = tmp_result); | |
3887 | ||
3888 | if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, | |
3889 | PHM_PlatformCaps_PCIEPerformanceRequest)) { | |
3890 | tmp_result = | |
3891 | smu7_notify_link_speed_change_after_state_change(hwmgr, input); | |
3892 | PP_ASSERT_WITH_CODE((0 == tmp_result), | |
3893 | "Failed to notify link speed change after state change!", | |
3894 | result = tmp_result); | |
3895 | } | |
3896 | data->apply_optimized_settings = false; | |
3897 | return result; | |
3898 | } | |
3899 | ||
3900 | static int smu7_set_max_fan_pwm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_pwm) | |
3901 | { | |
3902 | hwmgr->thermal_controller. | |
3903 | advanceFanControlParameters.usMaxFanPWM = us_max_fan_pwm; | |
3904 | ||
3905 | if (phm_is_hw_access_blocked(hwmgr)) | |
3906 | return 0; | |
3907 | ||
3908 | return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, | |
3909 | PPSMC_MSG_SetFanPwmMax, us_max_fan_pwm); | |
3910 | } | |
3911 | ||
f8a4c11b BX |
3912 | static int |
3913 | smu7_notify_smc_display_change(struct pp_hwmgr *hwmgr, bool has_display) | |
599a7e9f RZ |
3914 | { |
3915 | PPSMC_Msg msg = has_display ? (PPSMC_Msg)PPSMC_HasDisplay : (PPSMC_Msg)PPSMC_NoDisplay; | |
3916 | ||
3917 | return (smum_send_msg_to_smc(hwmgr->smumgr, msg) == 0) ? 0 : -1; | |
3918 | } | |
3919 | ||
f8a4c11b BX |
3920 | static int |
3921 | smu7_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr) | |
599a7e9f RZ |
3922 | { |
3923 | uint32_t num_active_displays = 0; | |
3924 | struct cgs_display_info info = {0}; | |
3925 | ||
3926 | info.mode_info = NULL; | |
3927 | cgs_get_active_displays_info(hwmgr->device, &info); | |
3928 | ||
3929 | num_active_displays = info.display_count; | |
3930 | ||
3931 | if (num_active_displays > 1 && hwmgr->display_config.multi_monitor_in_sync != true) | |
3932 | smu7_notify_smc_display_change(hwmgr, false); | |
3933 | ||
3934 | return 0; | |
3935 | } | |
3936 | ||
3937 | /** | |
3938 | * Programs the display gap | |
3939 | * | |
3940 | * @param hwmgr the address of the powerplay hardware manager. | |
3941 | * @return always OK | |
3942 | */ | |
f8a4c11b | 3943 | static int smu7_program_display_gap(struct pp_hwmgr *hwmgr) |
599a7e9f RZ |
3944 | { |
3945 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
3946 | uint32_t num_active_displays = 0; | |
3947 | uint32_t display_gap = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL); | |
3948 | uint32_t display_gap2; | |
3949 | uint32_t pre_vbi_time_in_us; | |
3950 | uint32_t frame_time_in_us; | |
3951 | uint32_t ref_clock; | |
3952 | uint32_t refresh_rate = 0; | |
3953 | struct cgs_display_info info = {0}; | |
3954 | struct cgs_mode_info mode_info; | |
3955 | ||
3956 | info.mode_info = &mode_info; | |
3957 | ||
3958 | cgs_get_active_displays_info(hwmgr->device, &info); | |
3959 | num_active_displays = info.display_count; | |
3960 | ||
3961 | display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL, DISP_GAP, (num_active_displays > 0) ? DISPLAY_GAP_VBLANK_OR_WM : DISPLAY_GAP_IGNORE); | |
3962 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL, display_gap); | |
3963 | ||
3964 | ref_clock = mode_info.ref_clock; | |
3965 | refresh_rate = mode_info.refresh_rate; | |
3966 | ||
3967 | if (0 == refresh_rate) | |
3968 | refresh_rate = 60; | |
3969 | ||
3970 | frame_time_in_us = 1000000 / refresh_rate; | |
3971 | ||
3972 | pre_vbi_time_in_us = frame_time_in_us - 200 - mode_info.vblank_time_us; | |
3973 | data->frame_time_x2 = frame_time_in_us * 2 / 100; | |
3974 | ||
3975 | display_gap2 = pre_vbi_time_in_us * (ref_clock / 100); | |
3976 | ||
3977 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL2, display_gap2); | |
3978 | ||
3979 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, | |
3980 | data->soft_regs_start + smum_get_offsetof(hwmgr->smumgr, | |
3981 | SMU_SoftRegisters, | |
3982 | PreVBlankGap), 0x64); | |
3983 | ||
3984 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, | |
3985 | data->soft_regs_start + smum_get_offsetof(hwmgr->smumgr, | |
3986 | SMU_SoftRegisters, | |
3987 | VBlankTimeout), | |
3988 | (frame_time_in_us - pre_vbi_time_in_us)); | |
3989 | ||
3990 | return 0; | |
3991 | } | |
3992 | ||
f8a4c11b | 3993 | static int smu7_display_configuration_changed_task(struct pp_hwmgr *hwmgr) |
599a7e9f RZ |
3994 | { |
3995 | return smu7_program_display_gap(hwmgr); | |
3996 | } | |
3997 | ||
3998 | /** | |
3999 | * Set maximum target operating fan output RPM | |
4000 | * | |
4001 | * @param hwmgr: the address of the powerplay hardware manager. | |
4002 | * @param usMaxFanRpm: max operating fan RPM value. | |
4003 | * @return The response that came from the SMC. | |
4004 | */ | |
4005 | static int smu7_set_max_fan_rpm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_rpm) | |
4006 | { | |
4007 | hwmgr->thermal_controller. | |
4008 | advanceFanControlParameters.usMaxFanRPM = us_max_fan_rpm; | |
4009 | ||
4010 | if (phm_is_hw_access_blocked(hwmgr)) | |
4011 | return 0; | |
4012 | ||
4013 | return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, | |
4014 | PPSMC_MSG_SetFanRpmMax, us_max_fan_rpm); | |
4015 | } | |
4016 | ||
f8a4c11b | 4017 | static int smu7_register_internal_thermal_interrupt(struct pp_hwmgr *hwmgr, |
599a7e9f RZ |
4018 | const void *thermal_interrupt_info) |
4019 | { | |
4020 | return 0; | |
4021 | } | |
4022 | ||
f8a4c11b BX |
4023 | static bool |
4024 | smu7_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr) | |
599a7e9f RZ |
4025 | { |
4026 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
4027 | bool is_update_required = false; | |
4028 | struct cgs_display_info info = {0, 0, NULL}; | |
4029 | ||
4030 | cgs_get_active_displays_info(hwmgr->device, &info); | |
4031 | ||
4032 | if (data->display_timing.num_existing_displays != info.display_count) | |
4033 | is_update_required = true; | |
4034 | ||
4035 | if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) { | |
4036 | if (data->display_timing.min_clock_in_sr != hwmgr->display_config.min_core_set_clock_in_sr && | |
4037 | (data->display_timing.min_clock_in_sr >= SMU7_MINIMUM_ENGINE_CLOCK || | |
4038 | hwmgr->display_config.min_core_set_clock_in_sr >= SMU7_MINIMUM_ENGINE_CLOCK)) | |
4039 | is_update_required = true; | |
4040 | } | |
4041 | return is_update_required; | |
4042 | } | |
4043 | ||
4044 | static inline bool smu7_are_power_levels_equal(const struct smu7_performance_level *pl1, | |
4045 | const struct smu7_performance_level *pl2) | |
4046 | { | |
4047 | return ((pl1->memory_clock == pl2->memory_clock) && | |
4048 | (pl1->engine_clock == pl2->engine_clock) && | |
4049 | (pl1->pcie_gen == pl2->pcie_gen) && | |
4050 | (pl1->pcie_lane == pl2->pcie_lane)); | |
4051 | } | |
4052 | ||
f8a4c11b BX |
4053 | static int smu7_check_states_equal(struct pp_hwmgr *hwmgr, |
4054 | const struct pp_hw_power_state *pstate1, | |
4055 | const struct pp_hw_power_state *pstate2, bool *equal) | |
599a7e9f | 4056 | { |
9faa6b02 RZ |
4057 | const struct smu7_power_state *psa; |
4058 | const struct smu7_power_state *psb; | |
599a7e9f RZ |
4059 | int i; |
4060 | ||
4061 | if (pstate1 == NULL || pstate2 == NULL || equal == NULL) | |
4062 | return -EINVAL; | |
4063 | ||
9faa6b02 RZ |
4064 | psa = cast_const_phw_smu7_power_state(pstate1); |
4065 | psb = cast_const_phw_smu7_power_state(pstate2); | |
599a7e9f RZ |
4066 | /* If the two states don't even have the same number of performance levels they cannot be the same state. */ |
4067 | if (psa->performance_level_count != psb->performance_level_count) { | |
4068 | *equal = false; | |
4069 | return 0; | |
4070 | } | |
4071 | ||
4072 | for (i = 0; i < psa->performance_level_count; i++) { | |
4073 | if (!smu7_are_power_levels_equal(&(psa->performance_levels[i]), &(psb->performance_levels[i]))) { | |
4074 | /* If we have found even one performance level pair that is different the states are different. */ | |
4075 | *equal = false; | |
4076 | return 0; | |
4077 | } | |
4078 | } | |
4079 | ||
4080 | /* If all performance levels are the same try to use the UVD clocks to break the tie.*/ | |
4081 | *equal = ((psa->uvd_clks.vclk == psb->uvd_clks.vclk) && (psa->uvd_clks.dclk == psb->uvd_clks.dclk)); | |
4082 | *equal &= ((psa->vce_clks.evclk == psb->vce_clks.evclk) && (psa->vce_clks.ecclk == psb->vce_clks.ecclk)); | |
4083 | *equal &= (psa->sclk_threshold == psb->sclk_threshold); | |
4084 | ||
4085 | return 0; | |
4086 | } | |
4087 | ||
f8a4c11b | 4088 | static int smu7_upload_mc_firmware(struct pp_hwmgr *hwmgr) |
599a7e9f RZ |
4089 | { |
4090 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
4091 | ||
4092 | uint32_t vbios_version; | |
4093 | uint32_t tmp; | |
4094 | ||
4095 | /* Read MC indirect register offset 0x9F bits [3:0] to see | |
4096 | * if VBIOS has already loaded a full version of MC ucode | |
4097 | * or not. | |
4098 | */ | |
4099 | ||
4100 | smu7_get_mc_microcode_version(hwmgr); | |
4101 | vbios_version = hwmgr->microcode_version_info.MC & 0xf; | |
4102 | ||
4103 | data->need_long_memory_training = false; | |
4104 | ||
4105 | cgs_write_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_INDEX, | |
4106 | ixMC_IO_DEBUG_UP_13); | |
4107 | tmp = cgs_read_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_DATA); | |
4108 | ||
4109 | if (tmp & (1 << 23)) { | |
4110 | data->mem_latency_high = MEM_LATENCY_HIGH; | |
4111 | data->mem_latency_low = MEM_LATENCY_LOW; | |
4112 | } else { | |
4113 | data->mem_latency_high = 330; | |
4114 | data->mem_latency_low = 330; | |
4115 | } | |
4116 | ||
4117 | return 0; | |
4118 | } | |
4119 | ||
4120 | static int smu7_read_clock_registers(struct pp_hwmgr *hwmgr) | |
4121 | { | |
4122 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
4123 | ||
4124 | data->clock_registers.vCG_SPLL_FUNC_CNTL = | |
4125 | cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL); | |
4126 | data->clock_registers.vCG_SPLL_FUNC_CNTL_2 = | |
4127 | cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_2); | |
4128 | data->clock_registers.vCG_SPLL_FUNC_CNTL_3 = | |
4129 | cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_3); | |
4130 | data->clock_registers.vCG_SPLL_FUNC_CNTL_4 = | |
4131 | cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_4); | |
4132 | data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM = | |
4133 | cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_SPREAD_SPECTRUM); | |
4134 | data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2 = | |
4135 | cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_SPREAD_SPECTRUM_2); | |
4136 | data->clock_registers.vDLL_CNTL = | |
4137 | cgs_read_register(hwmgr->device, mmDLL_CNTL); | |
4138 | data->clock_registers.vMCLK_PWRMGT_CNTL = | |
4139 | cgs_read_register(hwmgr->device, mmMCLK_PWRMGT_CNTL); | |
4140 | data->clock_registers.vMPLL_AD_FUNC_CNTL = | |
4141 | cgs_read_register(hwmgr->device, mmMPLL_AD_FUNC_CNTL); | |
4142 | data->clock_registers.vMPLL_DQ_FUNC_CNTL = | |
4143 | cgs_read_register(hwmgr->device, mmMPLL_DQ_FUNC_CNTL); | |
4144 | data->clock_registers.vMPLL_FUNC_CNTL = | |
4145 | cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL); | |
4146 | data->clock_registers.vMPLL_FUNC_CNTL_1 = | |
4147 | cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL_1); | |
4148 | data->clock_registers.vMPLL_FUNC_CNTL_2 = | |
4149 | cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL_2); | |
4150 | data->clock_registers.vMPLL_SS1 = | |
4151 | cgs_read_register(hwmgr->device, mmMPLL_SS1); | |
4152 | data->clock_registers.vMPLL_SS2 = | |
4153 | cgs_read_register(hwmgr->device, mmMPLL_SS2); | |
4154 | return 0; | |
4155 | ||
4156 | } | |
4157 | ||
4158 | /** | |
4159 | * Find out if memory is GDDR5. | |
4160 | * | |
4161 | * @param hwmgr the address of the powerplay hardware manager. | |
4162 | * @return always 0 | |
4163 | */ | |
4164 | static int smu7_get_memory_type(struct pp_hwmgr *hwmgr) | |
4165 | { | |
4166 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
4167 | uint32_t temp; | |
4168 | ||
4169 | temp = cgs_read_register(hwmgr->device, mmMC_SEQ_MISC0); | |
4170 | ||
4171 | data->is_memory_gddr5 = (MC_SEQ_MISC0_GDDR5_VALUE == | |
4172 | ((temp & MC_SEQ_MISC0_GDDR5_MASK) >> | |
4173 | MC_SEQ_MISC0_GDDR5_SHIFT)); | |
4174 | ||
4175 | return 0; | |
4176 | } | |
4177 | ||
4178 | /** | |
4179 | * Enables Dynamic Power Management by SMC | |
4180 | * | |
4181 | * @param hwmgr the address of the powerplay hardware manager. | |
4182 | * @return always 0 | |
4183 | */ | |
4184 | static int smu7_enable_acpi_power_management(struct pp_hwmgr *hwmgr) | |
4185 | { | |
4186 | PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, | |
4187 | GENERAL_PWRMGT, STATIC_PM_EN, 1); | |
4188 | ||
4189 | return 0; | |
4190 | } | |
4191 | ||
4192 | /** | |
4193 | * Initialize PowerGating States for different engines | |
4194 | * | |
4195 | * @param hwmgr the address of the powerplay hardware manager. | |
4196 | * @return always 0 | |
4197 | */ | |
4198 | static int smu7_init_power_gate_state(struct pp_hwmgr *hwmgr) | |
4199 | { | |
4200 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
4201 | ||
4202 | data->uvd_power_gated = false; | |
4203 | data->vce_power_gated = false; | |
4204 | data->samu_power_gated = false; | |
4205 | ||
4206 | return 0; | |
4207 | } | |
4208 | ||
4209 | static int smu7_init_sclk_threshold(struct pp_hwmgr *hwmgr) | |
4210 | { | |
4211 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
4212 | ||
4213 | data->low_sclk_interrupt_threshold = 0; | |
4214 | return 0; | |
4215 | } | |
4216 | ||
f8a4c11b | 4217 | static int smu7_setup_asic_task(struct pp_hwmgr *hwmgr) |
599a7e9f RZ |
4218 | { |
4219 | int tmp_result, result = 0; | |
4220 | ||
4221 | smu7_upload_mc_firmware(hwmgr); | |
4222 | ||
4223 | tmp_result = smu7_read_clock_registers(hwmgr); | |
4224 | PP_ASSERT_WITH_CODE((0 == tmp_result), | |
4225 | "Failed to read clock registers!", result = tmp_result); | |
4226 | ||
4227 | tmp_result = smu7_get_memory_type(hwmgr); | |
4228 | PP_ASSERT_WITH_CODE((0 == tmp_result), | |
4229 | "Failed to get memory type!", result = tmp_result); | |
4230 | ||
4231 | tmp_result = smu7_enable_acpi_power_management(hwmgr); | |
4232 | PP_ASSERT_WITH_CODE((0 == tmp_result), | |
4233 | "Failed to enable ACPI power management!", result = tmp_result); | |
4234 | ||
4235 | tmp_result = smu7_init_power_gate_state(hwmgr); | |
4236 | PP_ASSERT_WITH_CODE((0 == tmp_result), | |
4237 | "Failed to init power gate state!", result = tmp_result); | |
4238 | ||
4239 | tmp_result = smu7_get_mc_microcode_version(hwmgr); | |
4240 | PP_ASSERT_WITH_CODE((0 == tmp_result), | |
4241 | "Failed to get MC microcode version!", result = tmp_result); | |
4242 | ||
4243 | tmp_result = smu7_init_sclk_threshold(hwmgr); | |
4244 | PP_ASSERT_WITH_CODE((0 == tmp_result), | |
4245 | "Failed to init sclk threshold!", result = tmp_result); | |
4246 | ||
4247 | return result; | |
4248 | } | |
4249 | ||
4250 | static int smu7_force_clock_level(struct pp_hwmgr *hwmgr, | |
4251 | enum pp_clock_type type, uint32_t mask) | |
4252 | { | |
4253 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
4254 | ||
570272d2 RZ |
4255 | if (hwmgr->dpm_level & (AMD_DPM_FORCED_LEVEL_AUTO | |
4256 | AMD_DPM_FORCED_LEVEL_LOW | | |
4257 | AMD_DPM_FORCED_LEVEL_HIGH)) | |
599a7e9f RZ |
4258 | return -EINVAL; |
4259 | ||
4260 | switch (type) { | |
4261 | case PP_SCLK: | |
4262 | if (!data->sclk_dpm_key_disabled) | |
4263 | smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, | |
4264 | PPSMC_MSG_SCLKDPM_SetEnabledMask, | |
4265 | data->dpm_level_enable_mask.sclk_dpm_enable_mask & mask); | |
4266 | break; | |
4267 | case PP_MCLK: | |
4268 | if (!data->mclk_dpm_key_disabled) | |
4269 | smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, | |
4270 | PPSMC_MSG_MCLKDPM_SetEnabledMask, | |
4271 | data->dpm_level_enable_mask.mclk_dpm_enable_mask & mask); | |
4272 | break; | |
4273 | case PP_PCIE: | |
4274 | { | |
4275 | uint32_t tmp = mask & data->dpm_level_enable_mask.pcie_dpm_enable_mask; | |
4276 | uint32_t level = 0; | |
4277 | ||
4278 | while (tmp >>= 1) | |
4279 | level++; | |
4280 | ||
4281 | if (!data->pcie_dpm_key_disabled) | |
4282 | smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, | |
4283 | PPSMC_MSG_PCIeDPM_ForceLevel, | |
4284 | level); | |
4285 | break; | |
4286 | } | |
4287 | default: | |
4288 | break; | |
4289 | } | |
4290 | ||
4291 | return 0; | |
4292 | } | |
4293 | ||
4294 | static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr, | |
4295 | enum pp_clock_type type, char *buf) | |
4296 | { | |
4297 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
4298 | struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table); | |
4299 | struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table); | |
4300 | struct smu7_single_dpm_table *pcie_table = &(data->dpm_table.pcie_speed_table); | |
4301 | int i, now, size = 0; | |
4302 | uint32_t clock, pcie_speed; | |
4303 | ||
4304 | switch (type) { | |
4305 | case PP_SCLK: | |
4306 | smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetSclkFrequency); | |
4307 | clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0); | |
4308 | ||
4309 | for (i = 0; i < sclk_table->count; i++) { | |
4310 | if (clock > sclk_table->dpm_levels[i].value) | |
4311 | continue; | |
4312 | break; | |
4313 | } | |
4314 | now = i; | |
4315 | ||
4316 | for (i = 0; i < sclk_table->count; i++) | |
4317 | size += sprintf(buf + size, "%d: %uMhz %s\n", | |
4318 | i, sclk_table->dpm_levels[i].value / 100, | |
4319 | (i == now) ? "*" : ""); | |
4320 | break; | |
4321 | case PP_MCLK: | |
4322 | smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetMclkFrequency); | |
4323 | clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0); | |
4324 | ||
4325 | for (i = 0; i < mclk_table->count; i++) { | |
4326 | if (clock > mclk_table->dpm_levels[i].value) | |
4327 | continue; | |
4328 | break; | |
4329 | } | |
4330 | now = i; | |
4331 | ||
4332 | for (i = 0; i < mclk_table->count; i++) | |
4333 | size += sprintf(buf + size, "%d: %uMhz %s\n", | |
4334 | i, mclk_table->dpm_levels[i].value / 100, | |
4335 | (i == now) ? "*" : ""); | |
4336 | break; | |
4337 | case PP_PCIE: | |
4338 | pcie_speed = smu7_get_current_pcie_speed(hwmgr); | |
4339 | for (i = 0; i < pcie_table->count; i++) { | |
4340 | if (pcie_speed != pcie_table->dpm_levels[i].value) | |
4341 | continue; | |
4342 | break; | |
4343 | } | |
4344 | now = i; | |
4345 | ||
4346 | for (i = 0; i < pcie_table->count; i++) | |
4347 | size += sprintf(buf + size, "%d: %s %s\n", i, | |
4348 | (pcie_table->dpm_levels[i].value == 0) ? "2.5GB, x8" : | |
4349 | (pcie_table->dpm_levels[i].value == 1) ? "5.0GB, x16" : | |
4350 | (pcie_table->dpm_levels[i].value == 2) ? "8.0GB, x16" : "", | |
4351 | (i == now) ? "*" : ""); | |
4352 | break; | |
4353 | default: | |
4354 | break; | |
4355 | } | |
4356 | return size; | |
4357 | } | |
4358 | ||
4359 | static int smu7_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode) | |
4360 | { | |
2fde9ab2 | 4361 | int result = 0; |
599a7e9f | 4362 | |
2fde9ab2 RZ |
4363 | switch (mode) { |
4364 | case AMD_FAN_CTRL_NONE: | |
4365 | result = smu7_fan_ctrl_set_fan_speed_percent(hwmgr, 100); | |
4366 | break; | |
4367 | case AMD_FAN_CTRL_MANUAL: | |
4368 | if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, | |
4369 | PHM_PlatformCaps_MicrocodeFanControl)) | |
4370 | result = smu7_fan_ctrl_stop_smc_fan_control(hwmgr); | |
4371 | break; | |
4372 | case AMD_FAN_CTRL_AUTO: | |
4373 | result = smu7_fan_ctrl_set_static_mode(hwmgr, mode); | |
4374 | if (!result) | |
4375 | result = smu7_fan_ctrl_start_smc_fan_control(hwmgr); | |
4376 | break; | |
4377 | default: | |
4378 | break; | |
4379 | } | |
4380 | return result; | |
599a7e9f RZ |
4381 | } |
4382 | ||
4383 | static int smu7_get_fan_control_mode(struct pp_hwmgr *hwmgr) | |
4384 | { | |
2fde9ab2 | 4385 | return hwmgr->fan_ctrl_enabled ? AMD_FAN_CTRL_AUTO : AMD_FAN_CTRL_MANUAL; |
599a7e9f RZ |
4386 | } |
4387 | ||
4388 | static int smu7_get_sclk_od(struct pp_hwmgr *hwmgr) | |
4389 | { | |
4390 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
4391 | struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table); | |
4392 | struct smu7_single_dpm_table *golden_sclk_table = | |
4393 | &(data->golden_dpm_table.sclk_table); | |
4394 | int value; | |
4395 | ||
4396 | value = (sclk_table->dpm_levels[sclk_table->count - 1].value - | |
4397 | golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) * | |
4398 | 100 / | |
4399 | golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value; | |
4400 | ||
4401 | return value; | |
4402 | } | |
4403 | ||
4404 | static int smu7_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value) | |
4405 | { | |
4406 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
4407 | struct smu7_single_dpm_table *golden_sclk_table = | |
4408 | &(data->golden_dpm_table.sclk_table); | |
4409 | struct pp_power_state *ps; | |
4410 | struct smu7_power_state *smu7_ps; | |
4411 | ||
4412 | if (value > 20) | |
4413 | value = 20; | |
4414 | ||
4415 | ps = hwmgr->request_ps; | |
4416 | ||
4417 | if (ps == NULL) | |
4418 | return -EINVAL; | |
4419 | ||
4420 | smu7_ps = cast_phw_smu7_power_state(&ps->hardware); | |
4421 | ||
4422 | smu7_ps->performance_levels[smu7_ps->performance_level_count - 1].engine_clock = | |
4423 | golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value * | |
4424 | value / 100 + | |
4425 | golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value; | |
4426 | ||
4427 | return 0; | |
4428 | } | |
4429 | ||
4430 | static int smu7_get_mclk_od(struct pp_hwmgr *hwmgr) | |
4431 | { | |
4432 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
4433 | struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table); | |
4434 | struct smu7_single_dpm_table *golden_mclk_table = | |
4435 | &(data->golden_dpm_table.mclk_table); | |
4436 | int value; | |
4437 | ||
4438 | value = (mclk_table->dpm_levels[mclk_table->count - 1].value - | |
4439 | golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value) * | |
4440 | 100 / | |
4441 | golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value; | |
4442 | ||
4443 | return value; | |
4444 | } | |
4445 | ||
4446 | static int smu7_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value) | |
4447 | { | |
4448 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
4449 | struct smu7_single_dpm_table *golden_mclk_table = | |
4450 | &(data->golden_dpm_table.mclk_table); | |
4451 | struct pp_power_state *ps; | |
4452 | struct smu7_power_state *smu7_ps; | |
4453 | ||
4454 | if (value > 20) | |
4455 | value = 20; | |
4456 | ||
4457 | ps = hwmgr->request_ps; | |
4458 | ||
4459 | if (ps == NULL) | |
4460 | return -EINVAL; | |
4461 | ||
4462 | smu7_ps = cast_phw_smu7_power_state(&ps->hardware); | |
4463 | ||
4464 | smu7_ps->performance_levels[smu7_ps->performance_level_count - 1].memory_clock = | |
4465 | golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value * | |
4466 | value / 100 + | |
4467 | golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value; | |
4468 | ||
4469 | return 0; | |
4470 | } | |
4471 | ||
4472 | ||
4473 | static int smu7_get_sclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks) | |
4474 | { | |
4475 | struct phm_ppt_v1_information *table_info = | |
4476 | (struct phm_ppt_v1_information *)hwmgr->pptable; | |
954e6bee RZ |
4477 | struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table = NULL; |
4478 | struct phm_clock_voltage_dependency_table *sclk_table; | |
599a7e9f RZ |
4479 | int i; |
4480 | ||
954e6bee RZ |
4481 | if (hwmgr->pp_table_version == PP_TABLE_V1) { |
4482 | if (table_info == NULL || table_info->vdd_dep_on_sclk == NULL) | |
4483 | return -EINVAL; | |
4484 | dep_sclk_table = table_info->vdd_dep_on_sclk; | |
4d8d44c6 | 4485 | for (i = 0; i < dep_sclk_table->count; i++) |
954e6bee | 4486 | clocks->clock[i] = dep_sclk_table->entries[i].clk; |
4d8d44c6 | 4487 | clocks->count = dep_sclk_table->count; |
954e6bee RZ |
4488 | } else if (hwmgr->pp_table_version == PP_TABLE_V0) { |
4489 | sclk_table = hwmgr->dyn_state.vddc_dependency_on_sclk; | |
4d8d44c6 | 4490 | for (i = 0; i < sclk_table->count; i++) |
954e6bee | 4491 | clocks->clock[i] = sclk_table->entries[i].clk; |
4d8d44c6 | 4492 | clocks->count = sclk_table->count; |
599a7e9f | 4493 | } |
954e6bee | 4494 | |
599a7e9f RZ |
4495 | return 0; |
4496 | } | |
4497 | ||
4498 | static uint32_t smu7_get_mem_latency(struct pp_hwmgr *hwmgr, uint32_t clk) | |
4499 | { | |
4500 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
4501 | ||
4502 | if (clk >= MEM_FREQ_LOW_LATENCY && clk < MEM_FREQ_HIGH_LATENCY) | |
4503 | return data->mem_latency_high; | |
4504 | else if (clk >= MEM_FREQ_HIGH_LATENCY) | |
4505 | return data->mem_latency_low; | |
4506 | else | |
4507 | return MEM_LATENCY_ERR; | |
4508 | } | |
4509 | ||
4510 | static int smu7_get_mclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks) | |
4511 | { | |
4512 | struct phm_ppt_v1_information *table_info = | |
4513 | (struct phm_ppt_v1_information *)hwmgr->pptable; | |
4514 | struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table; | |
4515 | int i; | |
954e6bee | 4516 | struct phm_clock_voltage_dependency_table *mclk_table; |
599a7e9f | 4517 | |
954e6bee RZ |
4518 | if (hwmgr->pp_table_version == PP_TABLE_V1) { |
4519 | if (table_info == NULL) | |
4520 | return -EINVAL; | |
4521 | dep_mclk_table = table_info->vdd_dep_on_mclk; | |
4522 | for (i = 0; i < dep_mclk_table->count; i++) { | |
4523 | clocks->clock[i] = dep_mclk_table->entries[i].clk; | |
4524 | clocks->latency[i] = smu7_get_mem_latency(hwmgr, | |
599a7e9f | 4525 | dep_mclk_table->entries[i].clk); |
954e6bee | 4526 | } |
4d8d44c6 | 4527 | clocks->count = dep_mclk_table->count; |
954e6bee RZ |
4528 | } else if (hwmgr->pp_table_version == PP_TABLE_V0) { |
4529 | mclk_table = hwmgr->dyn_state.vddc_dependency_on_mclk; | |
4d8d44c6 | 4530 | for (i = 0; i < mclk_table->count; i++) |
954e6bee | 4531 | clocks->clock[i] = mclk_table->entries[i].clk; |
4d8d44c6 | 4532 | clocks->count = mclk_table->count; |
599a7e9f RZ |
4533 | } |
4534 | return 0; | |
4535 | } | |
4536 | ||
4537 | static int smu7_get_clock_by_type(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type type, | |
4538 | struct amd_pp_clocks *clocks) | |
4539 | { | |
4540 | switch (type) { | |
4541 | case amd_pp_sys_clock: | |
4542 | smu7_get_sclks(hwmgr, clocks); | |
4543 | break; | |
4544 | case amd_pp_mem_clock: | |
4545 | smu7_get_mclks(hwmgr, clocks); | |
4546 | break; | |
4547 | default: | |
4548 | return -EINVAL; | |
4549 | } | |
4550 | ||
4551 | return 0; | |
4552 | } | |
4553 | ||
ff3953d4 EH |
4554 | static void smu7_find_min_clock_masks(struct pp_hwmgr *hwmgr, |
4555 | uint32_t *sclk_mask, uint32_t *mclk_mask, | |
4556 | uint32_t min_sclk, uint32_t min_mclk) | |
4557 | { | |
4558 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
4559 | struct smu7_dpm_table *dpm_table = &(data->dpm_table); | |
4560 | uint32_t i; | |
4561 | ||
4562 | for (i = 0; i < dpm_table->sclk_table.count; i++) { | |
4563 | if (dpm_table->sclk_table.dpm_levels[i].enabled && | |
4564 | dpm_table->sclk_table.dpm_levels[i].value >= min_sclk) | |
4565 | *sclk_mask |= 1 << i; | |
4566 | } | |
4567 | ||
4568 | for (i = 0; i < dpm_table->mclk_table.count; i++) { | |
4569 | if (dpm_table->mclk_table.dpm_levels[i].enabled && | |
4570 | dpm_table->mclk_table.dpm_levels[i].value >= min_mclk) | |
4571 | *mclk_mask |= 1 << i; | |
4572 | } | |
4573 | } | |
4574 | ||
4575 | static int smu7_set_power_profile_state(struct pp_hwmgr *hwmgr, | |
4576 | struct amd_pp_profile *request) | |
4577 | { | |
4578 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
4579 | int tmp_result, result = 0; | |
4580 | uint32_t sclk_mask = 0, mclk_mask = 0; | |
4581 | ||
923d26db EH |
4582 | if (hwmgr->chip_id == CHIP_FIJI) { |
4583 | if (request->type == AMD_PP_GFX_PROFILE) | |
4584 | smu7_enable_power_containment(hwmgr); | |
4585 | else if (request->type == AMD_PP_COMPUTE_PROFILE) | |
4586 | smu7_disable_power_containment(hwmgr); | |
4587 | } | |
4588 | ||
ff3953d4 EH |
4589 | if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_AUTO) |
4590 | return -EINVAL; | |
4591 | ||
4592 | tmp_result = smu7_freeze_sclk_mclk_dpm(hwmgr); | |
4593 | PP_ASSERT_WITH_CODE(!tmp_result, | |
4594 | "Failed to freeze SCLK MCLK DPM!", | |
4595 | result = tmp_result); | |
4596 | ||
4597 | tmp_result = smum_populate_requested_graphic_levels(hwmgr, request); | |
4598 | PP_ASSERT_WITH_CODE(!tmp_result, | |
4599 | "Failed to populate requested graphic levels!", | |
4600 | result = tmp_result); | |
4601 | ||
4602 | tmp_result = smu7_unfreeze_sclk_mclk_dpm(hwmgr); | |
4603 | PP_ASSERT_WITH_CODE(!tmp_result, | |
4604 | "Failed to unfreeze SCLK MCLK DPM!", | |
4605 | result = tmp_result); | |
4606 | ||
4607 | smu7_find_min_clock_masks(hwmgr, &sclk_mask, &mclk_mask, | |
4608 | request->min_sclk, request->min_mclk); | |
4609 | ||
4610 | if (sclk_mask) { | |
4611 | if (!data->sclk_dpm_key_disabled) | |
4612 | smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, | |
4613 | PPSMC_MSG_SCLKDPM_SetEnabledMask, | |
4614 | data->dpm_level_enable_mask. | |
4615 | sclk_dpm_enable_mask & | |
4616 | sclk_mask); | |
4617 | } | |
4618 | ||
4619 | if (mclk_mask) { | |
4620 | if (!data->mclk_dpm_key_disabled) | |
4621 | smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, | |
4622 | PPSMC_MSG_MCLKDPM_SetEnabledMask, | |
4623 | data->dpm_level_enable_mask. | |
4624 | mclk_dpm_enable_mask & | |
4625 | mclk_mask); | |
4626 | } | |
4627 | ||
4628 | return result; | |
4629 | } | |
4630 | ||
f9c993ce EH |
4631 | static int smu7_avfs_control(struct pp_hwmgr *hwmgr, bool enable) |
4632 | { | |
4633 | if (enable) { | |
4634 | if (!PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, | |
4635 | CGS_IND_REG__SMC, FEATURE_STATUS, AVS_ON)) | |
4636 | PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc( | |
4637 | hwmgr->smumgr, PPSMC_MSG_EnableAvfs), | |
4638 | "Failed to enable AVFS!", | |
4639 | return -EINVAL); | |
4640 | } else if (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, | |
4641 | CGS_IND_REG__SMC, FEATURE_STATUS, AVS_ON)) | |
4642 | PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc( | |
4643 | hwmgr->smumgr, PPSMC_MSG_DisableAvfs), | |
4644 | "Failed to disable AVFS!", | |
4645 | return -EINVAL); | |
4646 | ||
4647 | return 0; | |
4648 | } | |
4649 | ||
a1c1a1de | 4650 | static const struct pp_hwmgr_func smu7_hwmgr_funcs = { |
599a7e9f | 4651 | .backend_init = &smu7_hwmgr_backend_init, |
a0aa7046 | 4652 | .backend_fini = &smu7_hwmgr_backend_fini, |
599a7e9f RZ |
4653 | .asic_setup = &smu7_setup_asic_task, |
4654 | .dynamic_state_management_enable = &smu7_enable_dpm_tasks, | |
4655 | .apply_state_adjust_rules = smu7_apply_state_adjust_rules, | |
4656 | .force_dpm_level = &smu7_force_dpm_level, | |
4657 | .power_state_set = smu7_set_power_state_tasks, | |
4658 | .get_power_state_size = smu7_get_power_state_size, | |
4659 | .get_mclk = smu7_dpm_get_mclk, | |
4660 | .get_sclk = smu7_dpm_get_sclk, | |
4661 | .patch_boot_state = smu7_dpm_patch_boot_state, | |
4662 | .get_pp_table_entry = smu7_get_pp_table_entry, | |
4663 | .get_num_of_pp_table_entries = smu7_get_number_of_powerplay_table_entries, | |
599a7e9f RZ |
4664 | .powerdown_uvd = smu7_powerdown_uvd, |
4665 | .powergate_uvd = smu7_powergate_uvd, | |
4666 | .powergate_vce = smu7_powergate_vce, | |
4667 | .disable_clock_power_gating = smu7_disable_clock_power_gating, | |
4668 | .update_clock_gatings = smu7_update_clock_gatings, | |
4669 | .notify_smc_display_config_after_ps_adjustment = smu7_notify_smc_display_config_after_ps_adjustment, | |
4670 | .display_config_changed = smu7_display_configuration_changed_task, | |
4671 | .set_max_fan_pwm_output = smu7_set_max_fan_pwm_output, | |
4672 | .set_max_fan_rpm_output = smu7_set_max_fan_rpm_output, | |
4673 | .get_temperature = smu7_thermal_get_temperature, | |
4674 | .stop_thermal_controller = smu7_thermal_stop_thermal_controller, | |
4675 | .get_fan_speed_info = smu7_fan_ctrl_get_fan_speed_info, | |
4676 | .get_fan_speed_percent = smu7_fan_ctrl_get_fan_speed_percent, | |
4677 | .set_fan_speed_percent = smu7_fan_ctrl_set_fan_speed_percent, | |
4678 | .reset_fan_speed_to_default = smu7_fan_ctrl_reset_fan_speed_to_default, | |
4679 | .get_fan_speed_rpm = smu7_fan_ctrl_get_fan_speed_rpm, | |
4680 | .set_fan_speed_rpm = smu7_fan_ctrl_set_fan_speed_rpm, | |
4681 | .uninitialize_thermal_controller = smu7_thermal_ctrl_uninitialize_thermal_controller, | |
4682 | .register_internal_thermal_interrupt = smu7_register_internal_thermal_interrupt, | |
4683 | .check_smc_update_required_for_display_configuration = smu7_check_smc_update_required_for_display_configuration, | |
4684 | .check_states_equal = smu7_check_states_equal, | |
4685 | .set_fan_control_mode = smu7_set_fan_control_mode, | |
4686 | .get_fan_control_mode = smu7_get_fan_control_mode, | |
4687 | .force_clock_level = smu7_force_clock_level, | |
4688 | .print_clock_levels = smu7_print_clock_levels, | |
4689 | .enable_per_cu_power_gating = smu7_enable_per_cu_power_gating, | |
4690 | .get_sclk_od = smu7_get_sclk_od, | |
4691 | .set_sclk_od = smu7_set_sclk_od, | |
4692 | .get_mclk_od = smu7_get_mclk_od, | |
4693 | .set_mclk_od = smu7_set_mclk_od, | |
4694 | .get_clock_by_type = smu7_get_clock_by_type, | |
a6e36952 | 4695 | .read_sensor = smu7_read_sensor, |
f28a9b65 | 4696 | .dynamic_state_management_disable = smu7_disable_dpm_tasks, |
ff3953d4 | 4697 | .set_power_profile_state = smu7_set_power_profile_state, |
f9c993ce | 4698 | .avfs_control = smu7_avfs_control, |
1dfc41d4 | 4699 | .disable_smc_firmware_ctf = smu7_thermal_disable_alert, |
599a7e9f RZ |
4700 | }; |
4701 | ||
4702 | uint8_t smu7_get_sleep_divider_id_from_clock(uint32_t clock, | |
4703 | uint32_t clock_insr) | |
4704 | { | |
4705 | uint8_t i; | |
4706 | uint32_t temp; | |
4707 | uint32_t min = max(clock_insr, (uint32_t)SMU7_MINIMUM_ENGINE_CLOCK); | |
4708 | ||
4709 | PP_ASSERT_WITH_CODE((clock >= min), "Engine clock can't satisfy stutter requirement!", return 0); | |
4710 | for (i = SMU7_MAX_DEEPSLEEP_DIVIDER_ID; ; i--) { | |
4711 | temp = clock >> i; | |
4712 | ||
4713 | if (temp >= min || i == 0) | |
4714 | break; | |
4715 | } | |
4716 | return i; | |
4717 | } | |
4718 | ||
a5b580e1 | 4719 | int smu7_init_function_pointers(struct pp_hwmgr *hwmgr) |
599a7e9f RZ |
4720 | { |
4721 | int ret = 0; | |
4722 | ||
4723 | hwmgr->hwmgr_func = &smu7_hwmgr_funcs; | |
4724 | if (hwmgr->pp_table_version == PP_TABLE_V0) | |
4725 | hwmgr->pptable_func = &pptable_funcs; | |
4726 | else if (hwmgr->pp_table_version == PP_TABLE_V1) | |
4727 | hwmgr->pptable_func = &pptable_v1_0_funcs; | |
4728 | ||
599a7e9f RZ |
4729 | return ret; |
4730 | } | |
4731 |