]>
Commit | Line | Data |
---|---|---|
f7c1ed34 ML |
1 | /* |
2 | * Copyright 2018 Advanced Micro Devices, Inc. | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice shall be included in | |
12 | * all copies or substantial portions of the Software. | |
13 | * | |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
20 | * OTHER DEALINGS IN THE SOFTWARE. | |
21 | * | |
22 | * Authors: AMD | |
23 | */ | |
24 | #include <linux/string.h> | |
25 | #include <linux/acpi.h> | |
26 | ||
fcd70cd3 | 27 | #include <drm/drm_probe_helper.h> |
f7c1ed34 ML |
28 | #include <drm/amdgpu_drm.h> |
29 | #include "dm_services.h" | |
30 | #include "amdgpu.h" | |
31 | #include "amdgpu_dm.h" | |
32 | #include "amdgpu_dm_irq.h" | |
33 | #include "amdgpu_pm.h" | |
34 | #include "dm_pp_smu.h" | |
94ed6d0c | 35 | #include "amdgpu_smu.h" |
f7c1ed34 ML |
36 | |
37 | ||
38 | bool dm_pp_apply_display_requirements( | |
39 | const struct dc_context *ctx, | |
40 | const struct dm_pp_display_configuration *pp_display_cfg) | |
41 | { | |
42 | struct amdgpu_device *adev = ctx->driver_context; | |
94ed6d0c | 43 | struct smu_context *smu = &adev->smu; |
d4d5eace | 44 | int i; |
f7c1ed34 ML |
45 | |
46 | if (adev->pm.dpm_enabled) { | |
47 | ||
48 | memset(&adev->pm.pm_display_cfg, 0, | |
49 | sizeof(adev->pm.pm_display_cfg)); | |
50 | ||
51 | adev->pm.pm_display_cfg.cpu_cc6_disable = | |
52 | pp_display_cfg->cpu_cc6_disable; | |
53 | ||
54 | adev->pm.pm_display_cfg.cpu_pstate_disable = | |
55 | pp_display_cfg->cpu_pstate_disable; | |
56 | ||
57 | adev->pm.pm_display_cfg.cpu_pstate_separation_time = | |
58 | pp_display_cfg->cpu_pstate_separation_time; | |
59 | ||
60 | adev->pm.pm_display_cfg.nb_pstate_switch_disable = | |
61 | pp_display_cfg->nb_pstate_switch_disable; | |
62 | ||
63 | adev->pm.pm_display_cfg.num_display = | |
64 | pp_display_cfg->display_count; | |
65 | adev->pm.pm_display_cfg.num_path_including_non_display = | |
66 | pp_display_cfg->display_count; | |
67 | ||
68 | adev->pm.pm_display_cfg.min_core_set_clock = | |
69 | pp_display_cfg->min_engine_clock_khz/10; | |
70 | adev->pm.pm_display_cfg.min_core_set_clock_in_sr = | |
71 | pp_display_cfg->min_engine_clock_deep_sleep_khz/10; | |
72 | adev->pm.pm_display_cfg.min_mem_set_clock = | |
73 | pp_display_cfg->min_memory_clock_khz/10; | |
74 | ||
3180fb67 | 75 | adev->pm.pm_display_cfg.min_dcef_deep_sleep_set_clk = |
76 | pp_display_cfg->min_engine_clock_deep_sleep_khz/10; | |
77 | adev->pm.pm_display_cfg.min_dcef_set_clk = | |
78 | pp_display_cfg->min_dcfclock_khz/10; | |
79 | ||
f7c1ed34 ML |
80 | adev->pm.pm_display_cfg.multi_monitor_in_sync = |
81 | pp_display_cfg->all_displays_in_sync; | |
82 | adev->pm.pm_display_cfg.min_vblank_time = | |
83 | pp_display_cfg->avail_mclk_switch_time_us; | |
84 | ||
85 | adev->pm.pm_display_cfg.display_clk = | |
86 | pp_display_cfg->disp_clk_khz/10; | |
87 | ||
88 | adev->pm.pm_display_cfg.dce_tolerable_mclk_in_active_latency = | |
89 | pp_display_cfg->avail_mclk_switch_time_in_disp_active_us; | |
90 | ||
91 | adev->pm.pm_display_cfg.crtc_index = pp_display_cfg->crtc_index; | |
92 | adev->pm.pm_display_cfg.line_time_in_us = | |
93 | pp_display_cfg->line_time_in_us; | |
94 | ||
95 | adev->pm.pm_display_cfg.vrefresh = pp_display_cfg->disp_configs[0].v_refresh; | |
96 | adev->pm.pm_display_cfg.crossfire_display_index = -1; | |
97 | adev->pm.pm_display_cfg.min_bus_bandwidth = 0; | |
98 | ||
d4d5eace | 99 | for (i = 0; i < pp_display_cfg->display_count; i++) { |
100 | const struct dm_pp_single_disp_config *dc_cfg = | |
101 | &pp_display_cfg->disp_configs[i]; | |
102 | adev->pm.pm_display_cfg.displays[i].controller_id = dc_cfg->pipe_idx + 1; | |
103 | } | |
104 | ||
6f059c64 | 105 | if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->display_configuration_change) |
f7c1ed34 ML |
106 | adev->powerplay.pp_funcs->display_configuration_change( |
107 | adev->powerplay.pp_handle, | |
108 | &adev->pm.pm_display_cfg); | |
94ed6d0c HR |
109 | else |
110 | smu_display_configuration_change(smu, | |
111 | &adev->pm.pm_display_cfg); | |
40d0ebd9 RZ |
112 | |
113 | amdgpu_pm_compute_clocks(adev); | |
f7c1ed34 ML |
114 | } |
115 | ||
116 | return true; | |
117 | } | |
118 | ||
119 | static void get_default_clock_levels( | |
120 | enum dm_pp_clock_type clk_type, | |
121 | struct dm_pp_clock_levels *clks) | |
122 | { | |
123 | uint32_t disp_clks_in_khz[6] = { | |
124 | 300000, 400000, 496560, 626090, 685720, 757900 }; | |
125 | uint32_t sclks_in_khz[6] = { | |
126 | 300000, 360000, 423530, 514290, 626090, 720000 }; | |
127 | uint32_t mclks_in_khz[2] = { 333000, 800000 }; | |
128 | ||
129 | switch (clk_type) { | |
130 | case DM_PP_CLOCK_TYPE_DISPLAY_CLK: | |
131 | clks->num_levels = 6; | |
132 | memmove(clks->clocks_in_khz, disp_clks_in_khz, | |
133 | sizeof(disp_clks_in_khz)); | |
134 | break; | |
135 | case DM_PP_CLOCK_TYPE_ENGINE_CLK: | |
136 | clks->num_levels = 6; | |
137 | memmove(clks->clocks_in_khz, sclks_in_khz, | |
138 | sizeof(sclks_in_khz)); | |
139 | break; | |
140 | case DM_PP_CLOCK_TYPE_MEMORY_CLK: | |
141 | clks->num_levels = 2; | |
142 | memmove(clks->clocks_in_khz, mclks_in_khz, | |
143 | sizeof(mclks_in_khz)); | |
144 | break; | |
145 | default: | |
146 | clks->num_levels = 0; | |
147 | break; | |
148 | } | |
149 | } | |
150 | ||
a43913ea KW |
151 | static enum smu_clk_type dc_to_smu_clock_type( |
152 | enum dm_pp_clock_type dm_pp_clk_type) | |
153 | { | |
d9ec5cfd LL |
154 | enum smu_clk_type smu_clk_type = SMU_CLK_COUNT; |
155 | ||
156 | switch (dm_pp_clk_type) { | |
157 | case DM_PP_CLOCK_TYPE_DISPLAY_CLK: | |
158 | smu_clk_type = SMU_DISPCLK; | |
159 | break; | |
160 | case DM_PP_CLOCK_TYPE_ENGINE_CLK: | |
161 | smu_clk_type = SMU_GFXCLK; | |
162 | break; | |
163 | case DM_PP_CLOCK_TYPE_MEMORY_CLK: | |
164 | smu_clk_type = SMU_MCLK; | |
165 | break; | |
166 | case DM_PP_CLOCK_TYPE_DCEFCLK: | |
167 | smu_clk_type = SMU_DCEFCLK; | |
168 | break; | |
169 | case DM_PP_CLOCK_TYPE_SOCCLK: | |
170 | smu_clk_type = SMU_SOCCLK; | |
171 | break; | |
172 | default: | |
173 | DRM_ERROR("DM_PPLIB: invalid clock type: %d!\n", | |
174 | dm_pp_clk_type); | |
175 | break; | |
176 | } | |
177 | ||
178 | return smu_clk_type; | |
a43913ea KW |
179 | } |
180 | ||
f7c1ed34 ML |
181 | static enum amd_pp_clock_type dc_to_pp_clock_type( |
182 | enum dm_pp_clock_type dm_pp_clk_type) | |
183 | { | |
184 | enum amd_pp_clock_type amd_pp_clk_type = 0; | |
185 | ||
186 | switch (dm_pp_clk_type) { | |
187 | case DM_PP_CLOCK_TYPE_DISPLAY_CLK: | |
188 | amd_pp_clk_type = amd_pp_disp_clock; | |
189 | break; | |
190 | case DM_PP_CLOCK_TYPE_ENGINE_CLK: | |
191 | amd_pp_clk_type = amd_pp_sys_clock; | |
192 | break; | |
193 | case DM_PP_CLOCK_TYPE_MEMORY_CLK: | |
194 | amd_pp_clk_type = amd_pp_mem_clock; | |
195 | break; | |
196 | case DM_PP_CLOCK_TYPE_DCEFCLK: | |
197 | amd_pp_clk_type = amd_pp_dcef_clock; | |
198 | break; | |
199 | case DM_PP_CLOCK_TYPE_DCFCLK: | |
200 | amd_pp_clk_type = amd_pp_dcf_clock; | |
201 | break; | |
202 | case DM_PP_CLOCK_TYPE_PIXELCLK: | |
203 | amd_pp_clk_type = amd_pp_pixel_clock; | |
204 | break; | |
205 | case DM_PP_CLOCK_TYPE_FCLK: | |
206 | amd_pp_clk_type = amd_pp_f_clock; | |
207 | break; | |
208 | case DM_PP_CLOCK_TYPE_DISPLAYPHYCLK: | |
66917e56 | 209 | amd_pp_clk_type = amd_pp_phy_clock; |
210 | break; | |
211 | case DM_PP_CLOCK_TYPE_DPPCLK: | |
f7c1ed34 ML |
212 | amd_pp_clk_type = amd_pp_dpp_clock; |
213 | break; | |
214 | default: | |
215 | DRM_ERROR("DM_PPLIB: invalid clock type: %d!\n", | |
216 | dm_pp_clk_type); | |
217 | break; | |
218 | } | |
219 | ||
220 | return amd_pp_clk_type; | |
221 | } | |
222 | ||
c2c09ed5 ML |
223 | static enum dm_pp_clocks_state pp_to_dc_powerlevel_state( |
224 | enum PP_DAL_POWERLEVEL max_clocks_state) | |
225 | { | |
226 | switch (max_clocks_state) { | |
227 | case PP_DAL_POWERLEVEL_0: | |
228 | return DM_PP_CLOCKS_DPM_STATE_LEVEL_0; | |
229 | case PP_DAL_POWERLEVEL_1: | |
230 | return DM_PP_CLOCKS_DPM_STATE_LEVEL_1; | |
231 | case PP_DAL_POWERLEVEL_2: | |
232 | return DM_PP_CLOCKS_DPM_STATE_LEVEL_2; | |
233 | case PP_DAL_POWERLEVEL_3: | |
234 | return DM_PP_CLOCKS_DPM_STATE_LEVEL_3; | |
235 | case PP_DAL_POWERLEVEL_4: | |
236 | return DM_PP_CLOCKS_DPM_STATE_LEVEL_4; | |
237 | case PP_DAL_POWERLEVEL_5: | |
238 | return DM_PP_CLOCKS_DPM_STATE_LEVEL_5; | |
239 | case PP_DAL_POWERLEVEL_6: | |
240 | return DM_PP_CLOCKS_DPM_STATE_LEVEL_6; | |
241 | case PP_DAL_POWERLEVEL_7: | |
242 | return DM_PP_CLOCKS_DPM_STATE_LEVEL_7; | |
243 | default: | |
244 | DRM_ERROR("DM_PPLIB: invalid powerlevel state: %d!\n", | |
245 | max_clocks_state); | |
246 | return DM_PP_CLOCKS_STATE_INVALID; | |
247 | } | |
248 | } | |
249 | ||
f7c1ed34 ML |
250 | static void pp_to_dc_clock_levels( |
251 | const struct amd_pp_clocks *pp_clks, | |
252 | struct dm_pp_clock_levels *dc_clks, | |
253 | enum dm_pp_clock_type dc_clk_type) | |
254 | { | |
255 | uint32_t i; | |
256 | ||
257 | if (pp_clks->count > DM_PP_MAX_CLOCK_LEVELS) { | |
258 | DRM_INFO("DM_PPLIB: Warning: %s clock: number of levels %d exceeds maximum of %d!\n", | |
259 | DC_DECODE_PP_CLOCK_TYPE(dc_clk_type), | |
260 | pp_clks->count, | |
261 | DM_PP_MAX_CLOCK_LEVELS); | |
262 | ||
263 | dc_clks->num_levels = DM_PP_MAX_CLOCK_LEVELS; | |
264 | } else | |
265 | dc_clks->num_levels = pp_clks->count; | |
266 | ||
267 | DRM_INFO("DM_PPLIB: values for %s clock\n", | |
268 | DC_DECODE_PP_CLOCK_TYPE(dc_clk_type)); | |
269 | ||
270 | for (i = 0; i < dc_clks->num_levels; i++) { | |
271 | DRM_INFO("DM_PPLIB:\t %d\n", pp_clks->clock[i]); | |
23ec3d14 | 272 | dc_clks->clocks_in_khz[i] = pp_clks->clock[i]; |
f7c1ed34 ML |
273 | } |
274 | } | |
275 | ||
276 | static void pp_to_dc_clock_levels_with_latency( | |
277 | const struct pp_clock_levels_with_latency *pp_clks, | |
278 | struct dm_pp_clock_levels_with_latency *clk_level_info, | |
279 | enum dm_pp_clock_type dc_clk_type) | |
280 | { | |
281 | uint32_t i; | |
282 | ||
283 | if (pp_clks->num_levels > DM_PP_MAX_CLOCK_LEVELS) { | |
284 | DRM_INFO("DM_PPLIB: Warning: %s clock: number of levels %d exceeds maximum of %d!\n", | |
285 | DC_DECODE_PP_CLOCK_TYPE(dc_clk_type), | |
286 | pp_clks->num_levels, | |
287 | DM_PP_MAX_CLOCK_LEVELS); | |
288 | ||
289 | clk_level_info->num_levels = DM_PP_MAX_CLOCK_LEVELS; | |
290 | } else | |
291 | clk_level_info->num_levels = pp_clks->num_levels; | |
292 | ||
293 | DRM_DEBUG("DM_PPLIB: values for %s clock\n", | |
294 | DC_DECODE_PP_CLOCK_TYPE(dc_clk_type)); | |
295 | ||
296 | for (i = 0; i < clk_level_info->num_levels; i++) { | |
23ec3d14 RZ |
297 | DRM_DEBUG("DM_PPLIB:\t %d in kHz\n", pp_clks->data[i].clocks_in_khz); |
298 | clk_level_info->data[i].clocks_in_khz = pp_clks->data[i].clocks_in_khz; | |
f7c1ed34 ML |
299 | clk_level_info->data[i].latency_in_us = pp_clks->data[i].latency_in_us; |
300 | } | |
301 | } | |
302 | ||
303 | static void pp_to_dc_clock_levels_with_voltage( | |
304 | const struct pp_clock_levels_with_voltage *pp_clks, | |
305 | struct dm_pp_clock_levels_with_voltage *clk_level_info, | |
306 | enum dm_pp_clock_type dc_clk_type) | |
307 | { | |
308 | uint32_t i; | |
309 | ||
310 | if (pp_clks->num_levels > DM_PP_MAX_CLOCK_LEVELS) { | |
311 | DRM_INFO("DM_PPLIB: Warning: %s clock: number of levels %d exceeds maximum of %d!\n", | |
312 | DC_DECODE_PP_CLOCK_TYPE(dc_clk_type), | |
313 | pp_clks->num_levels, | |
314 | DM_PP_MAX_CLOCK_LEVELS); | |
315 | ||
316 | clk_level_info->num_levels = DM_PP_MAX_CLOCK_LEVELS; | |
317 | } else | |
318 | clk_level_info->num_levels = pp_clks->num_levels; | |
319 | ||
320 | DRM_INFO("DM_PPLIB: values for %s clock\n", | |
321 | DC_DECODE_PP_CLOCK_TYPE(dc_clk_type)); | |
322 | ||
323 | for (i = 0; i < clk_level_info->num_levels; i++) { | |
5f0f531c PM |
324 | DRM_INFO("DM_PPLIB:\t %d in kHz, %d in mV\n", pp_clks->data[i].clocks_in_khz, |
325 | pp_clks->data[i].voltage_in_mv); | |
23ec3d14 | 326 | clk_level_info->data[i].clocks_in_khz = pp_clks->data[i].clocks_in_khz; |
f7c1ed34 ML |
327 | clk_level_info->data[i].voltage_in_mv = pp_clks->data[i].voltage_in_mv; |
328 | } | |
329 | } | |
330 | ||
331 | bool dm_pp_get_clock_levels_by_type( | |
332 | const struct dc_context *ctx, | |
333 | enum dm_pp_clock_type clk_type, | |
334 | struct dm_pp_clock_levels *dc_clks) | |
335 | { | |
336 | struct amdgpu_device *adev = ctx->driver_context; | |
337 | void *pp_handle = adev->powerplay.pp_handle; | |
338 | struct amd_pp_clocks pp_clks = { 0 }; | |
339 | struct amd_pp_simple_clock_info validation_clks = { 0 }; | |
340 | uint32_t i; | |
341 | ||
6f059c64 | 342 | if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_clock_by_type) { |
f7c1ed34 ML |
343 | if (adev->powerplay.pp_funcs->get_clock_by_type(pp_handle, |
344 | dc_to_pp_clock_type(clk_type), &pp_clks)) { | |
1ed2425f AD |
345 | /* Error in pplib. Provide default values. */ |
346 | get_default_clock_levels(clk_type, dc_clks); | |
b3ea88fe HR |
347 | return true; |
348 | } | |
349 | } else if (adev->smu.funcs && adev->smu.funcs->get_clock_by_type) { | |
350 | if (smu_get_clock_by_type(&adev->smu, | |
d196bbbc | 351 | dc_to_pp_clock_type(clk_type), |
b3ea88fe | 352 | &pp_clks)) { |
f7c1ed34 ML |
353 | get_default_clock_levels(clk_type, dc_clks); |
354 | return true; | |
355 | } | |
356 | } | |
357 | ||
358 | pp_to_dc_clock_levels(&pp_clks, dc_clks, clk_type); | |
359 | ||
6f059c64 | 360 | if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_display_mode_validation_clocks) { |
f7c1ed34 ML |
361 | if (adev->powerplay.pp_funcs->get_display_mode_validation_clocks( |
362 | pp_handle, &validation_clks)) { | |
363 | /* Error in pplib. Provide default values. */ | |
364 | DRM_INFO("DM_PPLIB: Warning: using default validation clocks!\n"); | |
365 | validation_clks.engine_max_clock = 72000; | |
366 | validation_clks.memory_max_clock = 80000; | |
367 | validation_clks.level = 0; | |
368 | } | |
6ec82684 HR |
369 | } else if (adev->smu.funcs && adev->smu.funcs->get_max_high_clocks) { |
370 | if (smu_get_max_high_clocks(&adev->smu, &validation_clks)) { | |
371 | DRM_INFO("DM_PPLIB: Warning: using default validation clocks!\n"); | |
372 | validation_clks.engine_max_clock = 72000; | |
373 | validation_clks.memory_max_clock = 80000; | |
374 | validation_clks.level = 0; | |
375 | } | |
f7c1ed34 ML |
376 | } |
377 | ||
378 | DRM_INFO("DM_PPLIB: Validation clocks:\n"); | |
379 | DRM_INFO("DM_PPLIB: engine_max_clock: %d\n", | |
380 | validation_clks.engine_max_clock); | |
381 | DRM_INFO("DM_PPLIB: memory_max_clock: %d\n", | |
382 | validation_clks.memory_max_clock); | |
383 | DRM_INFO("DM_PPLIB: level : %d\n", | |
384 | validation_clks.level); | |
385 | ||
386 | /* Translate 10 kHz to kHz. */ | |
387 | validation_clks.engine_max_clock *= 10; | |
388 | validation_clks.memory_max_clock *= 10; | |
389 | ||
390 | /* Determine the highest non-boosted level from the Validation Clocks */ | |
391 | if (clk_type == DM_PP_CLOCK_TYPE_ENGINE_CLK) { | |
392 | for (i = 0; i < dc_clks->num_levels; i++) { | |
393 | if (dc_clks->clocks_in_khz[i] > validation_clks.engine_max_clock) { | |
394 | /* This clock is higher the validation clock. | |
395 | * Than means the previous one is the highest | |
396 | * non-boosted one. */ | |
397 | DRM_INFO("DM_PPLIB: reducing engine clock level from %d to %d\n", | |
398 | dc_clks->num_levels, i); | |
399 | dc_clks->num_levels = i > 0 ? i : 1; | |
400 | break; | |
401 | } | |
402 | } | |
403 | } else if (clk_type == DM_PP_CLOCK_TYPE_MEMORY_CLK) { | |
404 | for (i = 0; i < dc_clks->num_levels; i++) { | |
405 | if (dc_clks->clocks_in_khz[i] > validation_clks.memory_max_clock) { | |
406 | DRM_INFO("DM_PPLIB: reducing memory clock level from %d to %d\n", | |
407 | dc_clks->num_levels, i); | |
408 | dc_clks->num_levels = i > 0 ? i : 1; | |
409 | break; | |
410 | } | |
411 | } | |
412 | } | |
413 | ||
414 | return true; | |
415 | } | |
416 | ||
417 | bool dm_pp_get_clock_levels_by_type_with_latency( | |
418 | const struct dc_context *ctx, | |
419 | enum dm_pp_clock_type clk_type, | |
420 | struct dm_pp_clock_levels_with_latency *clk_level_info) | |
421 | { | |
422 | struct amdgpu_device *adev = ctx->driver_context; | |
423 | void *pp_handle = adev->powerplay.pp_handle; | |
424 | struct pp_clock_levels_with_latency pp_clks = { 0 }; | |
425 | const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; | |
e5e4e223 HR |
426 | int ret; |
427 | ||
428 | if (pp_funcs && pp_funcs->get_clock_by_type_with_latency) { | |
429 | ret = pp_funcs->get_clock_by_type_with_latency(pp_handle, | |
430 | dc_to_pp_clock_type(clk_type), | |
431 | &pp_clks); | |
432 | if (ret) | |
433 | return false; | |
434 | } else if (adev->smu.ppt_funcs && adev->smu.ppt_funcs->get_clock_by_type_with_latency) { | |
435 | if (smu_get_clock_by_type_with_latency(&adev->smu, | |
d196bbbc | 436 | dc_to_smu_clock_type(clk_type), |
e5e4e223 HR |
437 | &pp_clks)) |
438 | return false; | |
439 | } | |
f7c1ed34 | 440 | |
f7c1ed34 ML |
441 | |
442 | pp_to_dc_clock_levels_with_latency(&pp_clks, clk_level_info, clk_type); | |
443 | ||
444 | return true; | |
445 | } | |
446 | ||
447 | bool dm_pp_get_clock_levels_by_type_with_voltage( | |
448 | const struct dc_context *ctx, | |
449 | enum dm_pp_clock_type clk_type, | |
450 | struct dm_pp_clock_levels_with_voltage *clk_level_info) | |
451 | { | |
452 | struct amdgpu_device *adev = ctx->driver_context; | |
453 | void *pp_handle = adev->powerplay.pp_handle; | |
454 | struct pp_clock_levels_with_voltage pp_clk_info = {0}; | |
455 | const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; | |
1e33d4d4 | 456 | int ret; |
f7c1ed34 | 457 | |
1e33d4d4 HR |
458 | if (pp_funcs && pp_funcs->get_clock_by_type_with_voltage) { |
459 | ret = pp_funcs->get_clock_by_type_with_voltage(pp_handle, | |
460 | dc_to_pp_clock_type(clk_type), | |
461 | &pp_clk_info); | |
462 | if (ret) | |
463 | return false; | |
464 | } else if (adev->smu.ppt_funcs && adev->smu.ppt_funcs->get_clock_by_type_with_voltage) { | |
465 | if (smu_get_clock_by_type_with_voltage(&adev->smu, | |
466 | dc_to_pp_clock_type(clk_type), | |
467 | &pp_clk_info)) | |
468 | return false; | |
469 | } | |
f7c1ed34 ML |
470 | |
471 | pp_to_dc_clock_levels_with_voltage(&pp_clk_info, clk_level_info, clk_type); | |
472 | ||
473 | return true; | |
474 | } | |
475 | ||
476 | bool dm_pp_notify_wm_clock_changes( | |
477 | const struct dc_context *ctx, | |
478 | struct dm_pp_wm_sets_with_clock_ranges *wm_with_clock_ranges) | |
479 | { | |
480 | /* TODO: to be implemented */ | |
481 | return false; | |
482 | } | |
483 | ||
484 | bool dm_pp_apply_power_level_change_request( | |
485 | const struct dc_context *ctx, | |
486 | struct dm_pp_power_level_change_request *level_change_req) | |
487 | { | |
488 | /* TODO: to be implemented */ | |
489 | return false; | |
490 | } | |
491 | ||
492 | bool dm_pp_apply_clock_for_voltage_request( | |
493 | const struct dc_context *ctx, | |
494 | struct dm_pp_clock_for_voltage_req *clock_for_voltage_req) | |
495 | { | |
496 | struct amdgpu_device *adev = ctx->driver_context; | |
497 | struct pp_display_clock_request pp_clock_request = {0}; | |
498 | int ret = 0; | |
499 | ||
500 | pp_clock_request.clock_type = dc_to_pp_clock_type(clock_for_voltage_req->clk_type); | |
501 | pp_clock_request.clock_freq_in_khz = clock_for_voltage_req->clocks_in_khz; | |
502 | ||
503 | if (!pp_clock_request.clock_type) | |
504 | return false; | |
505 | ||
6f059c64 | 506 | if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->display_clock_voltage_request) |
f7c1ed34 ML |
507 | ret = adev->powerplay.pp_funcs->display_clock_voltage_request( |
508 | adev->powerplay.pp_handle, | |
509 | &pp_clock_request); | |
04885368 HR |
510 | else if (adev->smu.funcs && |
511 | adev->smu.funcs->display_clock_voltage_request) | |
512 | ret = smu_display_clock_voltage_request(&adev->smu, | |
513 | &pp_clock_request); | |
f7c1ed34 ML |
514 | if (ret) |
515 | return false; | |
516 | return true; | |
517 | } | |
518 | ||
519 | bool dm_pp_get_static_clocks( | |
520 | const struct dc_context *ctx, | |
521 | struct dm_pp_static_clock_info *static_clk_info) | |
522 | { | |
523 | struct amdgpu_device *adev = ctx->driver_context; | |
524 | struct amd_pp_clock_info pp_clk_info = {0}; | |
525 | int ret = 0; | |
526 | ||
6f059c64 | 527 | if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_current_clocks) |
f7c1ed34 ML |
528 | ret = adev->powerplay.pp_funcs->get_current_clocks( |
529 | adev->powerplay.pp_handle, | |
530 | &pp_clk_info); | |
5e2d3881 HR |
531 | else if (adev->smu.funcs) |
532 | ret = smu_get_current_clocks(&adev->smu, &pp_clk_info); | |
b24fa266 AD |
533 | else |
534 | return false; | |
f7c1ed34 ML |
535 | if (ret) |
536 | return false; | |
537 | ||
c2c09ed5 | 538 | static_clk_info->max_clocks_state = pp_to_dc_powerlevel_state(pp_clk_info.max_clocks_state); |
3dbd823e RZ |
539 | static_clk_info->max_mclk_khz = pp_clk_info.max_memory_clock * 10; |
540 | static_clk_info->max_sclk_khz = pp_clk_info.max_engine_clock * 10; | |
f7c1ed34 ML |
541 | |
542 | return true; | |
543 | } | |
544 | ||
f7c1ed34 ML |
545 | void pp_rv_set_wm_ranges(struct pp_smu *pp, |
546 | struct pp_smu_wm_range_sets *ranges) | |
547 | { | |
265f5ba6 | 548 | const struct dc_context *ctx = pp->dm; |
b0a634ac RZ |
549 | struct amdgpu_device *adev = ctx->driver_context; |
550 | void *pp_handle = adev->powerplay.pp_handle; | |
551 | const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; | |
552 | struct dm_pp_wm_sets_with_clock_ranges_soc15 wm_with_clock_ranges; | |
553 | struct dm_pp_clock_range_for_dmif_wm_set_soc15 *wm_dce_clocks = wm_with_clock_ranges.wm_dmif_clocks_ranges; | |
554 | struct dm_pp_clock_range_for_mcif_wm_set_soc15 *wm_soc_clocks = wm_with_clock_ranges.wm_mcif_clocks_ranges; | |
555 | int32_t i; | |
f7c1ed34 | 556 | |
b0a634ac RZ |
557 | wm_with_clock_ranges.num_wm_dmif_sets = ranges->num_reader_wm_sets; |
558 | wm_with_clock_ranges.num_wm_mcif_sets = ranges->num_writer_wm_sets; | |
f7c1ed34 | 559 | |
b0a634ac | 560 | for (i = 0; i < wm_with_clock_ranges.num_wm_dmif_sets; i++) { |
f7c1ed34 | 561 | if (ranges->reader_wm_sets[i].wm_inst > 3) |
b0a634ac | 562 | wm_dce_clocks[i].wm_set_id = WM_SET_A; |
f7c1ed34 | 563 | else |
b0a634ac | 564 | wm_dce_clocks[i].wm_set_id = |
f7c1ed34 | 565 | ranges->reader_wm_sets[i].wm_inst; |
b0a634ac | 566 | wm_dce_clocks[i].wm_max_dcfclk_clk_in_khz = |
ba7b267a | 567 | ranges->reader_wm_sets[i].max_drain_clk_mhz * 1000; |
b0a634ac | 568 | wm_dce_clocks[i].wm_min_dcfclk_clk_in_khz = |
ba7b267a | 569 | ranges->reader_wm_sets[i].min_drain_clk_mhz * 1000; |
b0a634ac | 570 | wm_dce_clocks[i].wm_max_mem_clk_in_khz = |
ba7b267a | 571 | ranges->reader_wm_sets[i].max_fill_clk_mhz * 1000; |
b0a634ac | 572 | wm_dce_clocks[i].wm_min_mem_clk_in_khz = |
ba7b267a | 573 | ranges->reader_wm_sets[i].min_fill_clk_mhz * 1000; |
f7c1ed34 ML |
574 | } |
575 | ||
b0a634ac | 576 | for (i = 0; i < wm_with_clock_ranges.num_wm_mcif_sets; i++) { |
f7c1ed34 | 577 | if (ranges->writer_wm_sets[i].wm_inst > 3) |
b0a634ac | 578 | wm_soc_clocks[i].wm_set_id = WM_SET_A; |
f7c1ed34 | 579 | else |
b0a634ac | 580 | wm_soc_clocks[i].wm_set_id = |
f7c1ed34 | 581 | ranges->writer_wm_sets[i].wm_inst; |
b0a634ac | 582 | wm_soc_clocks[i].wm_max_socclk_clk_in_khz = |
ba7b267a | 583 | ranges->writer_wm_sets[i].max_fill_clk_mhz * 1000; |
b0a634ac | 584 | wm_soc_clocks[i].wm_min_socclk_clk_in_khz = |
ba7b267a | 585 | ranges->writer_wm_sets[i].min_fill_clk_mhz * 1000; |
b0a634ac | 586 | wm_soc_clocks[i].wm_max_mem_clk_in_khz = |
ba7b267a | 587 | ranges->writer_wm_sets[i].max_drain_clk_mhz * 1000; |
b0a634ac | 588 | wm_soc_clocks[i].wm_min_mem_clk_in_khz = |
ba7b267a | 589 | ranges->writer_wm_sets[i].min_drain_clk_mhz * 1000; |
f7c1ed34 ML |
590 | } |
591 | ||
2e069391 HR |
592 | if (pp_funcs && pp_funcs->set_watermarks_for_clocks_ranges) |
593 | pp_funcs->set_watermarks_for_clocks_ranges(pp_handle, | |
594 | &wm_with_clock_ranges); | |
595 | else if (adev->smu.funcs && | |
596 | adev->smu.funcs->set_watermarks_for_clock_ranges) | |
597 | smu_set_watermarks_for_clock_ranges(&adev->smu, | |
598 | &wm_with_clock_ranges); | |
f7c1ed34 ML |
599 | } |
600 | ||
601 | void pp_rv_set_pme_wa_enable(struct pp_smu *pp) | |
602 | { | |
265f5ba6 | 603 | const struct dc_context *ctx = pp->dm; |
b0a634ac RZ |
604 | struct amdgpu_device *adev = ctx->driver_context; |
605 | void *pp_handle = adev->powerplay.pp_handle; | |
606 | const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; | |
607 | ||
367eeed4 HR |
608 | if (pp_funcs && pp_funcs->notify_smu_enable_pwe) |
609 | pp_funcs->notify_smu_enable_pwe(pp_handle); | |
610 | else if (adev->smu.funcs) | |
611 | smu_notify_smu_enable_pwe(&adev->smu); | |
f7c1ed34 ML |
612 | } |
613 | ||
588715bd | 614 | void pp_rv_set_active_display_count(struct pp_smu *pp, int count) |
615 | { | |
616 | const struct dc_context *ctx = pp->dm; | |
617 | struct amdgpu_device *adev = ctx->driver_context; | |
618 | void *pp_handle = adev->powerplay.pp_handle; | |
619 | const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; | |
620 | ||
621 | if (!pp_funcs || !pp_funcs->set_active_display_count) | |
622 | return; | |
623 | ||
624 | pp_funcs->set_active_display_count(pp_handle, count); | |
625 | } | |
626 | ||
627 | void pp_rv_set_min_deep_sleep_dcfclk(struct pp_smu *pp, int clock) | |
628 | { | |
629 | const struct dc_context *ctx = pp->dm; | |
630 | struct amdgpu_device *adev = ctx->driver_context; | |
631 | void *pp_handle = adev->powerplay.pp_handle; | |
632 | const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; | |
633 | ||
634 | if (!pp_funcs || !pp_funcs->set_min_deep_sleep_dcefclk) | |
635 | return; | |
636 | ||
637 | pp_funcs->set_min_deep_sleep_dcefclk(pp_handle, clock); | |
638 | } | |
639 | ||
640 | void pp_rv_set_hard_min_dcefclk_by_freq(struct pp_smu *pp, int clock) | |
641 | { | |
642 | const struct dc_context *ctx = pp->dm; | |
643 | struct amdgpu_device *adev = ctx->driver_context; | |
644 | void *pp_handle = adev->powerplay.pp_handle; | |
645 | const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; | |
646 | ||
647 | if (!pp_funcs || !pp_funcs->set_hard_min_dcefclk_by_freq) | |
648 | return; | |
649 | ||
650 | pp_funcs->set_hard_min_dcefclk_by_freq(pp_handle, clock); | |
651 | } | |
652 | ||
653 | void pp_rv_set_hard_min_fclk_by_freq(struct pp_smu *pp, int mhz) | |
654 | { | |
655 | const struct dc_context *ctx = pp->dm; | |
656 | struct amdgpu_device *adev = ctx->driver_context; | |
657 | void *pp_handle = adev->powerplay.pp_handle; | |
658 | const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; | |
659 | ||
660 | if (!pp_funcs || !pp_funcs->set_hard_min_fclk_by_freq) | |
661 | return; | |
662 | ||
663 | pp_funcs->set_hard_min_fclk_by_freq(pp_handle, mhz); | |
664 | } | |
665 | ||
79a7b060 | 666 | enum pp_smu_status pp_nv_set_wm_ranges(struct pp_smu *pp, |
667 | struct pp_smu_wm_range_sets *ranges) | |
668 | { | |
669 | const struct dc_context *ctx = pp->dm; | |
670 | struct amdgpu_device *adev = ctx->driver_context; | |
671 | struct smu_context *smu = &adev->smu; | |
672 | struct dm_pp_wm_sets_with_clock_ranges_soc15 wm_with_clock_ranges; | |
673 | struct dm_pp_clock_range_for_dmif_wm_set_soc15 *wm_dce_clocks = | |
674 | wm_with_clock_ranges.wm_dmif_clocks_ranges; | |
675 | struct dm_pp_clock_range_for_mcif_wm_set_soc15 *wm_soc_clocks = | |
676 | wm_with_clock_ranges.wm_mcif_clocks_ranges; | |
677 | int32_t i; | |
678 | ||
679 | wm_with_clock_ranges.num_wm_dmif_sets = ranges->num_reader_wm_sets; | |
680 | wm_with_clock_ranges.num_wm_mcif_sets = ranges->num_writer_wm_sets; | |
681 | ||
682 | for (i = 0; i < wm_with_clock_ranges.num_wm_dmif_sets; i++) { | |
683 | if (ranges->reader_wm_sets[i].wm_inst > 3) | |
684 | wm_dce_clocks[i].wm_set_id = WM_SET_A; | |
685 | else | |
686 | wm_dce_clocks[i].wm_set_id = | |
687 | ranges->reader_wm_sets[i].wm_inst; | |
688 | wm_dce_clocks[i].wm_max_dcfclk_clk_in_khz = | |
689 | ranges->reader_wm_sets[i].max_drain_clk_mhz * 1000; | |
690 | wm_dce_clocks[i].wm_min_dcfclk_clk_in_khz = | |
691 | ranges->reader_wm_sets[i].min_drain_clk_mhz * 1000; | |
692 | wm_dce_clocks[i].wm_max_mem_clk_in_khz = | |
693 | ranges->reader_wm_sets[i].max_fill_clk_mhz * 1000; | |
694 | wm_dce_clocks[i].wm_min_mem_clk_in_khz = | |
695 | ranges->reader_wm_sets[i].min_fill_clk_mhz * 1000; | |
696 | } | |
697 | ||
698 | for (i = 0; i < wm_with_clock_ranges.num_wm_mcif_sets; i++) { | |
699 | if (ranges->writer_wm_sets[i].wm_inst > 3) | |
700 | wm_soc_clocks[i].wm_set_id = WM_SET_A; | |
701 | else | |
702 | wm_soc_clocks[i].wm_set_id = | |
703 | ranges->writer_wm_sets[i].wm_inst; | |
704 | wm_soc_clocks[i].wm_max_socclk_clk_in_khz = | |
705 | ranges->writer_wm_sets[i].max_fill_clk_mhz * 1000; | |
706 | wm_soc_clocks[i].wm_min_socclk_clk_in_khz = | |
707 | ranges->writer_wm_sets[i].min_fill_clk_mhz * 1000; | |
708 | wm_soc_clocks[i].wm_max_mem_clk_in_khz = | |
709 | ranges->writer_wm_sets[i].max_drain_clk_mhz * 1000; | |
710 | wm_soc_clocks[i].wm_min_mem_clk_in_khz = | |
711 | ranges->writer_wm_sets[i].min_drain_clk_mhz * 1000; | |
712 | } | |
713 | ||
714 | if (!smu->funcs) | |
715 | return PP_SMU_RESULT_UNSUPPORTED; | |
716 | ||
717 | /* 0: successful or smu.funcs->set_watermarks_for_clock_ranges = NULL; | |
718 | * 1: fail | |
719 | */ | |
720 | if (smu_set_watermarks_for_clock_ranges(&adev->smu, | |
721 | &wm_with_clock_ranges)) | |
722 | return PP_SMU_RESULT_UNSUPPORTED; | |
723 | ||
724 | return PP_SMU_RESULT_OK; | |
725 | } | |
726 | ||
727 | enum pp_smu_status pp_nv_set_pme_wa_enable(struct pp_smu *pp) | |
728 | { | |
729 | const struct dc_context *ctx = pp->dm; | |
730 | struct amdgpu_device *adev = ctx->driver_context; | |
731 | struct smu_context *smu = &adev->smu; | |
732 | ||
733 | if (!smu->funcs) | |
734 | return PP_SMU_RESULT_UNSUPPORTED; | |
735 | ||
736 | /* 0: successful or smu.funcs->set_azalia_d3_pme = NULL; 1: fail */ | |
737 | if (smu_set_azalia_d3_pme(smu)) | |
738 | return PP_SMU_RESULT_FAIL; | |
739 | ||
740 | return PP_SMU_RESULT_OK; | |
741 | } | |
742 | ||
743 | enum pp_smu_status pp_nv_set_display_count(struct pp_smu *pp, int count) | |
744 | { | |
745 | const struct dc_context *ctx = pp->dm; | |
746 | struct amdgpu_device *adev = ctx->driver_context; | |
747 | struct smu_context *smu = &adev->smu; | |
748 | ||
749 | if (!smu->funcs) | |
750 | return PP_SMU_RESULT_UNSUPPORTED; | |
751 | ||
752 | /* 0: successful or smu.funcs->set_display_count = NULL; 1: fail */ | |
753 | if (smu_set_display_count(smu, count)) | |
754 | return PP_SMU_RESULT_FAIL; | |
755 | ||
756 | return PP_SMU_RESULT_OK; | |
757 | } | |
758 | ||
759 | enum pp_smu_status pp_nv_set_min_deep_sleep_dcfclk(struct pp_smu *pp, int mhz) | |
760 | { | |
761 | const struct dc_context *ctx = pp->dm; | |
762 | struct amdgpu_device *adev = ctx->driver_context; | |
763 | struct smu_context *smu = &adev->smu; | |
764 | ||
765 | if (!smu->funcs) | |
766 | return PP_SMU_RESULT_UNSUPPORTED; | |
767 | ||
768 | /* 0: successful or smu.funcs->set_deep_sleep_dcefclk = NULL;1: fail */ | |
769 | if (smu_set_deep_sleep_dcefclk(smu, mhz)) | |
770 | return PP_SMU_RESULT_FAIL; | |
771 | ||
772 | return PP_SMU_RESULT_OK; | |
773 | } | |
774 | ||
775 | enum pp_smu_status pp_nv_set_hard_min_dcefclk_by_freq( | |
776 | struct pp_smu *pp, int mhz) | |
777 | { | |
778 | const struct dc_context *ctx = pp->dm; | |
779 | struct amdgpu_device *adev = ctx->driver_context; | |
780 | struct smu_context *smu = &adev->smu; | |
781 | struct pp_display_clock_request clock_req; | |
782 | ||
783 | if (!smu->funcs) | |
784 | return PP_SMU_RESULT_UNSUPPORTED; | |
785 | ||
786 | clock_req.clock_type = amd_pp_dcef_clock; | |
787 | clock_req.clock_freq_in_khz = mhz * 1000; | |
788 | ||
789 | /* 0: successful or smu.funcs->display_clock_voltage_request = NULL | |
790 | * 1: fail | |
791 | */ | |
792 | if (smu_display_clock_voltage_request(smu, &clock_req)) | |
793 | return PP_SMU_RESULT_FAIL; | |
794 | ||
795 | return PP_SMU_RESULT_OK; | |
796 | } | |
797 | ||
798 | enum pp_smu_status pp_nv_set_hard_min_uclk_by_freq(struct pp_smu *pp, int mhz) | |
799 | { | |
800 | const struct dc_context *ctx = pp->dm; | |
801 | struct amdgpu_device *adev = ctx->driver_context; | |
802 | struct smu_context *smu = &adev->smu; | |
803 | struct pp_display_clock_request clock_req; | |
804 | ||
805 | if (!smu->funcs) | |
806 | return PP_SMU_RESULT_UNSUPPORTED; | |
807 | ||
808 | clock_req.clock_type = amd_pp_mem_clock; | |
809 | clock_req.clock_freq_in_khz = mhz * 1000; | |
810 | ||
811 | /* 0: successful or smu.funcs->display_clock_voltage_request = NULL | |
812 | * 1: fail | |
813 | */ | |
814 | if (smu_display_clock_voltage_request(smu, &clock_req)) | |
815 | return PP_SMU_RESULT_FAIL; | |
816 | ||
817 | return PP_SMU_RESULT_OK; | |
818 | } | |
819 | ||
6e92e156 KF |
820 | enum pp_smu_status pp_nv_set_pstate_handshake_support( |
821 | struct pp_smu *pp, BOOLEAN pstate_handshake_supported) | |
822 | { | |
823 | const struct dc_context *ctx = pp->dm; | |
824 | struct amdgpu_device *adev = ctx->driver_context; | |
825 | struct smu_context *smu = &adev->smu; | |
826 | ||
827 | if (smu_display_disable_memory_clock_switch(smu, !pstate_handshake_supported)) | |
828 | return PP_SMU_RESULT_FAIL; | |
829 | ||
830 | return PP_SMU_RESULT_OK; | |
831 | } | |
832 | ||
79a7b060 | 833 | enum pp_smu_status pp_nv_set_voltage_by_freq(struct pp_smu *pp, |
834 | enum pp_smu_nv_clock_id clock_id, int mhz) | |
835 | { | |
836 | const struct dc_context *ctx = pp->dm; | |
837 | struct amdgpu_device *adev = ctx->driver_context; | |
838 | struct smu_context *smu = &adev->smu; | |
839 | struct pp_display_clock_request clock_req; | |
840 | ||
841 | if (!smu->funcs) | |
842 | return PP_SMU_RESULT_UNSUPPORTED; | |
843 | ||
844 | switch (clock_id) { | |
845 | case PP_SMU_NV_DISPCLK: | |
846 | clock_req.clock_type = amd_pp_disp_clock; | |
847 | break; | |
848 | case PP_SMU_NV_PHYCLK: | |
849 | clock_req.clock_type = amd_pp_phy_clock; | |
850 | break; | |
851 | case PP_SMU_NV_PIXELCLK: | |
852 | clock_req.clock_type = amd_pp_pixel_clock; | |
853 | break; | |
854 | default: | |
855 | break; | |
856 | } | |
857 | clock_req.clock_freq_in_khz = mhz * 1000; | |
858 | ||
859 | /* 0: successful or smu.funcs->display_clock_voltage_request = NULL | |
860 | * 1: fail | |
861 | */ | |
862 | if (smu_display_clock_voltage_request(smu, &clock_req)) | |
863 | return PP_SMU_RESULT_FAIL; | |
864 | ||
865 | return PP_SMU_RESULT_OK; | |
866 | } | |
867 | ||
868 | enum pp_smu_status pp_nv_get_maximum_sustainable_clocks( | |
869 | struct pp_smu *pp, struct pp_smu_nv_clock_table *max_clocks) | |
870 | { | |
871 | const struct dc_context *ctx = pp->dm; | |
872 | struct amdgpu_device *adev = ctx->driver_context; | |
873 | struct smu_context *smu = &adev->smu; | |
874 | ||
875 | if (!smu->funcs) | |
876 | return PP_SMU_RESULT_UNSUPPORTED; | |
877 | ||
878 | if (!smu->funcs->get_max_sustainable_clocks_by_dc) | |
879 | return PP_SMU_RESULT_UNSUPPORTED; | |
880 | ||
881 | if (!smu->funcs->get_max_sustainable_clocks_by_dc(smu, max_clocks)) | |
882 | return PP_SMU_RESULT_OK; | |
883 | ||
884 | return PP_SMU_RESULT_FAIL; | |
885 | } | |
886 | ||
887 | enum pp_smu_status pp_nv_get_uclk_dpm_states(struct pp_smu *pp, | |
888 | unsigned int *clock_values_in_khz, unsigned int *num_states) | |
889 | { | |
890 | const struct dc_context *ctx = pp->dm; | |
891 | struct amdgpu_device *adev = ctx->driver_context; | |
892 | struct smu_context *smu = &adev->smu; | |
893 | ||
894 | if (!smu->ppt_funcs) | |
895 | return PP_SMU_RESULT_UNSUPPORTED; | |
896 | ||
897 | if (!smu->ppt_funcs->get_uclk_dpm_states) | |
898 | return PP_SMU_RESULT_UNSUPPORTED; | |
899 | ||
900 | if (!smu->ppt_funcs->get_uclk_dpm_states(smu, | |
901 | clock_values_in_khz, num_states)) | |
902 | return PP_SMU_RESULT_OK; | |
903 | ||
904 | return PP_SMU_RESULT_FAIL; | |
905 | } | |
906 | ||
0f1a6ad7 | 907 | void dm_pp_get_funcs( |
f7c1ed34 | 908 | struct dc_context *ctx, |
0f1a6ad7 | 909 | struct pp_smu_funcs *funcs) |
f7c1ed34 | 910 | { |
79a7b060 | 911 | switch (ctx->dce_version) { |
912 | case DCN_VERSION_1_0: | |
913 | case DCN_VERSION_1_01: | |
914 | funcs->ctx.ver = PP_SMU_VER_RV; | |
915 | funcs->rv_funcs.pp_smu.dm = ctx; | |
916 | funcs->rv_funcs.set_wm_ranges = pp_rv_set_wm_ranges; | |
917 | funcs->rv_funcs.set_pme_wa_enable = pp_rv_set_pme_wa_enable; | |
918 | funcs->rv_funcs.set_display_count = | |
919 | pp_rv_set_active_display_count; | |
920 | funcs->rv_funcs.set_min_deep_sleep_dcfclk = | |
921 | pp_rv_set_min_deep_sleep_dcfclk; | |
922 | funcs->rv_funcs.set_hard_min_dcfclk_by_freq = | |
923 | pp_rv_set_hard_min_dcefclk_by_freq; | |
924 | funcs->rv_funcs.set_hard_min_fclk_by_freq = | |
925 | pp_rv_set_hard_min_fclk_by_freq; | |
926 | break; | |
927 | #ifdef CONFIG_DRM_AMD_DC_DCN2_0 | |
928 | case DCN_VERSION_2_0: | |
929 | funcs->ctx.ver = PP_SMU_VER_NV; | |
930 | funcs->nv_funcs.pp_smu.dm = ctx; | |
931 | funcs->nv_funcs.set_display_count = pp_nv_set_display_count; | |
932 | funcs->nv_funcs.set_hard_min_dcfclk_by_freq = | |
933 | pp_nv_set_hard_min_dcefclk_by_freq; | |
934 | funcs->nv_funcs.set_min_deep_sleep_dcfclk = | |
935 | pp_nv_set_min_deep_sleep_dcfclk; | |
936 | funcs->nv_funcs.set_voltage_by_freq = | |
937 | pp_nv_set_voltage_by_freq; | |
938 | funcs->nv_funcs.set_wm_ranges = pp_nv_set_wm_ranges; | |
939 | ||
940 | /* todo set_pme_wa_enable cause 4k@6ohz display not light up */ | |
941 | funcs->nv_funcs.set_pme_wa_enable = NULL; | |
942 | /* todo debug waring message */ | |
02316e96 | 943 | funcs->nv_funcs.set_hard_min_uclk_by_freq = pp_nv_set_hard_min_uclk_by_freq; |
79a7b060 | 944 | /* todo compare data with window driver*/ |
02316e96 | 945 | funcs->nv_funcs.get_maximum_sustainable_clocks = pp_nv_get_maximum_sustainable_clocks; |
79a7b060 | 946 | /*todo compare data with window driver */ |
02316e96 | 947 | funcs->nv_funcs.get_uclk_dpm_states = pp_nv_get_uclk_dpm_states; |
6e92e156 | 948 | funcs->nv_funcs.set_pstate_handshake_support = pp_nv_set_pstate_handshake_support; |
79a7b060 | 949 | break; |
950 | #endif | |
951 | default: | |
952 | DRM_ERROR("smu version is not supported !\n"); | |
953 | break; | |
954 | } | |
f7c1ed34 | 955 | } |