2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 #include <linux/string.h>
27 #include <linux/acpi.h>
30 #include <drm/drm_crtc_helper.h>
31 #include <drm/amdgpu_drm.h>
32 #include "dm_services.h"
34 #include "amdgpu_dm.h"
35 #include "amdgpu_dm_irq.h"
36 #include "amdgpu_pm.h"
38 /******************************************************************************
40 *****************************************************************************/
42 void dal_register_timer_interrupt(
43 struct dc_context
*ctx
,
44 struct dc_timer_interrupt_params
*int_params
,
48 struct amdgpu_device
*adev
= ctx
->driver_context
;
50 if (!adev
|| !int_params
) {
51 DRM_ERROR("DM_IRQ: invalid input!\n");
55 if (int_params
->int_context
!= INTERRUPT_LOW_IRQ_CONTEXT
) {
56 /* only low irq ctx is supported. */
57 DRM_ERROR("DM_IRQ: invalid context: %d!\n",
58 int_params
->int_context
);
62 amdgpu_dm_irq_register_timer(adev
, int_params
, ih
, args
);
65 void dal_isr_acquire_lock(struct dc_context
*ctx
)
70 void dal_isr_release_lock(struct dc_context
*ctx
)
75 /******************************************************************************
76 * End-of-IRQ Interfaces.
77 *****************************************************************************/
79 bool dm_write_persistent_data(struct dc_context
*ctx
,
80 const struct dc_sink
*sink
,
81 const char *module_name
,
85 struct persistent_data_flag
*flag
)
91 bool dm_read_persistent_data(struct dc_context
*ctx
,
92 const struct dc_sink
*sink
,
93 const char *module_name
,
97 struct persistent_data_flag
*flag
)
103 void dm_delay_in_microseconds(struct dc_context
*ctx
,
104 unsigned int microSeconds
)
110 /**** power component interfaces ****/
112 bool dm_pp_pre_dce_clock_change(
113 struct dc_context
*ctx
,
114 struct dm_pp_gpu_clock_range
*requested_state
,
115 struct dm_pp_gpu_clock_range
*actual_state
)
121 bool dm_pp_apply_safe_state(
122 const struct dc_context
*ctx
)
124 struct amdgpu_device
*adev
= ctx
->driver_context
;
126 if (adev
->pm
.dpm_enabled
) {
127 /* TODO: Does this require PreModeChange event to PPLIB? */
133 bool dm_pp_apply_display_requirements(
134 const struct dc_context
*ctx
,
135 const struct dm_pp_display_configuration
*pp_display_cfg
)
137 struct amdgpu_device
*adev
= ctx
->driver_context
;
139 if (adev
->pm
.dpm_enabled
) {
141 memset(&adev
->pm
.pm_display_cfg
, 0,
142 sizeof(adev
->pm
.pm_display_cfg
));
144 adev
->pm
.pm_display_cfg
.cpu_cc6_disable
=
145 pp_display_cfg
->cpu_cc6_disable
;
147 adev
->pm
.pm_display_cfg
.cpu_pstate_disable
=
148 pp_display_cfg
->cpu_pstate_disable
;
150 adev
->pm
.pm_display_cfg
.cpu_pstate_separation_time
=
151 pp_display_cfg
->cpu_pstate_separation_time
;
153 adev
->pm
.pm_display_cfg
.nb_pstate_switch_disable
=
154 pp_display_cfg
->nb_pstate_switch_disable
;
156 adev
->pm
.pm_display_cfg
.num_display
=
157 pp_display_cfg
->display_count
;
158 adev
->pm
.pm_display_cfg
.num_path_including_non_display
=
159 pp_display_cfg
->display_count
;
161 adev
->pm
.pm_display_cfg
.min_core_set_clock
=
162 pp_display_cfg
->min_engine_clock_khz
/10;
163 adev
->pm
.pm_display_cfg
.min_core_set_clock_in_sr
=
164 pp_display_cfg
->min_engine_clock_deep_sleep_khz
/10;
165 adev
->pm
.pm_display_cfg
.min_mem_set_clock
=
166 pp_display_cfg
->min_memory_clock_khz
/10;
168 adev
->pm
.pm_display_cfg
.multi_monitor_in_sync
=
169 pp_display_cfg
->all_displays_in_sync
;
170 adev
->pm
.pm_display_cfg
.min_vblank_time
=
171 pp_display_cfg
->avail_mclk_switch_time_us
;
173 adev
->pm
.pm_display_cfg
.display_clk
=
174 pp_display_cfg
->disp_clk_khz
/10;
176 adev
->pm
.pm_display_cfg
.dce_tolerable_mclk_in_active_latency
=
177 pp_display_cfg
->avail_mclk_switch_time_in_disp_active_us
;
179 adev
->pm
.pm_display_cfg
.crtc_index
= pp_display_cfg
->crtc_index
;
180 adev
->pm
.pm_display_cfg
.line_time_in_us
=
181 pp_display_cfg
->line_time_in_us
;
183 adev
->pm
.pm_display_cfg
.vrefresh
= pp_display_cfg
->disp_configs
[0].v_refresh
;
184 adev
->pm
.pm_display_cfg
.crossfire_display_index
= -1;
185 adev
->pm
.pm_display_cfg
.min_bus_bandwidth
= 0;
187 /* TODO: complete implementation of
188 * amd_powerplay_display_configuration_change().
190 * PHM_StoreDALConfigurationData - powerplay\hwmgr\hardwaremanager.c
191 * PP_IRI_DisplayConfigurationChange - powerplay\eventmgr\iri.c */
192 amd_powerplay_display_configuration_change(
193 adev
->powerplay
.pp_handle
,
194 &adev
->pm
.pm_display_cfg
);
196 /* TODO: replace by a separate call to 'apply display cfg'? */
197 amdgpu_pm_compute_clocks(adev
);
203 bool dc_service_get_system_clocks_range(
204 const struct dc_context
*ctx
,
205 struct dm_pp_gpu_clock_range
*sys_clks
)
207 struct amdgpu_device
*adev
= ctx
->driver_context
;
209 /* Default values, in case PPLib is not compiled-in. */
210 sys_clks
->mclk
.max_khz
= 800000;
211 sys_clks
->mclk
.min_khz
= 800000;
213 sys_clks
->sclk
.max_khz
= 600000;
214 sys_clks
->sclk
.min_khz
= 300000;
216 if (adev
->pm
.dpm_enabled
) {
217 sys_clks
->mclk
.max_khz
= amdgpu_dpm_get_mclk(adev
, false);
218 sys_clks
->mclk
.min_khz
= amdgpu_dpm_get_mclk(adev
, true);
220 sys_clks
->sclk
.max_khz
= amdgpu_dpm_get_sclk(adev
, false);
221 sys_clks
->sclk
.min_khz
= amdgpu_dpm_get_sclk(adev
, true);
227 static void get_default_clock_levels(
228 enum dm_pp_clock_type clk_type
,
229 struct dm_pp_clock_levels
*clks
)
231 uint32_t disp_clks_in_khz
[6] = {
232 300000, 400000, 496560, 626090, 685720, 757900 };
233 uint32_t sclks_in_khz
[6] = {
234 300000, 360000, 423530, 514290, 626090, 720000 };
235 uint32_t mclks_in_khz
[2] = { 333000, 800000 };
238 case DM_PP_CLOCK_TYPE_DISPLAY_CLK
:
239 clks
->num_levels
= 6;
240 memmove(clks
->clocks_in_khz
, disp_clks_in_khz
,
241 sizeof(disp_clks_in_khz
));
243 case DM_PP_CLOCK_TYPE_ENGINE_CLK
:
244 clks
->num_levels
= 6;
245 memmove(clks
->clocks_in_khz
, sclks_in_khz
,
246 sizeof(sclks_in_khz
));
248 case DM_PP_CLOCK_TYPE_MEMORY_CLK
:
249 clks
->num_levels
= 2;
250 memmove(clks
->clocks_in_khz
, mclks_in_khz
,
251 sizeof(mclks_in_khz
));
254 clks
->num_levels
= 0;
259 static enum amd_pp_clock_type
dc_to_pp_clock_type(
260 enum dm_pp_clock_type dm_pp_clk_type
)
262 enum amd_pp_clock_type amd_pp_clk_type
= 0;
264 switch (dm_pp_clk_type
) {
265 case DM_PP_CLOCK_TYPE_DISPLAY_CLK
:
266 amd_pp_clk_type
= amd_pp_disp_clock
;
268 case DM_PP_CLOCK_TYPE_ENGINE_CLK
:
269 amd_pp_clk_type
= amd_pp_sys_clock
;
271 case DM_PP_CLOCK_TYPE_MEMORY_CLK
:
272 amd_pp_clk_type
= amd_pp_mem_clock
;
275 DRM_ERROR("DM_PPLIB: invalid clock type: %d!\n",
280 return amd_pp_clk_type
;
283 static void pp_to_dc_clock_levels(
284 const struct amd_pp_clocks
*pp_clks
,
285 struct dm_pp_clock_levels
*dc_clks
,
286 enum dm_pp_clock_type dc_clk_type
)
290 if (pp_clks
->count
> DM_PP_MAX_CLOCK_LEVELS
) {
291 DRM_INFO("DM_PPLIB: Warning: %s clock: number of levels %d exceeds maximum of %d!\n",
292 DC_DECODE_PP_CLOCK_TYPE(dc_clk_type
),
294 DM_PP_MAX_CLOCK_LEVELS
);
296 dc_clks
->num_levels
= DM_PP_MAX_CLOCK_LEVELS
;
298 dc_clks
->num_levels
= pp_clks
->count
;
300 DRM_INFO("DM_PPLIB: values for %s clock\n",
301 DC_DECODE_PP_CLOCK_TYPE(dc_clk_type
));
303 for (i
= 0; i
< dc_clks
->num_levels
; i
++) {
304 DRM_INFO("DM_PPLIB:\t %d\n", pp_clks
->clock
[i
]);
305 /* translate 10kHz to kHz */
306 dc_clks
->clocks_in_khz
[i
] = pp_clks
->clock
[i
] * 10;
310 bool dm_pp_get_clock_levels_by_type(
311 const struct dc_context
*ctx
,
312 enum dm_pp_clock_type clk_type
,
313 struct dm_pp_clock_levels
*dc_clks
)
315 struct amdgpu_device
*adev
= ctx
->driver_context
;
316 void *pp_handle
= adev
->powerplay
.pp_handle
;
317 struct amd_pp_clocks pp_clks
= { 0 };
318 struct amd_pp_simple_clock_info validation_clks
= { 0 };
321 if (amd_powerplay_get_clock_by_type(pp_handle
,
322 dc_to_pp_clock_type(clk_type
), &pp_clks
)) {
323 /* Error in pplib. Provide default values. */
324 get_default_clock_levels(clk_type
, dc_clks
);
328 pp_to_dc_clock_levels(&pp_clks
, dc_clks
, clk_type
);
330 if (amd_powerplay_get_display_mode_validation_clocks(pp_handle
,
332 /* Error in pplib. Provide default values. */
333 DRM_INFO("DM_PPLIB: Warning: using default validation clocks!\n");
334 validation_clks
.engine_max_clock
= 72000;
335 validation_clks
.memory_max_clock
= 80000;
336 validation_clks
.level
= 0;
339 DRM_INFO("DM_PPLIB: Validation clocks:\n");
340 DRM_INFO("DM_PPLIB: engine_max_clock: %d\n",
341 validation_clks
.engine_max_clock
);
342 DRM_INFO("DM_PPLIB: memory_max_clock: %d\n",
343 validation_clks
.memory_max_clock
);
344 DRM_INFO("DM_PPLIB: level : %d\n",
345 validation_clks
.level
);
347 /* Translate 10 kHz to kHz. */
348 validation_clks
.engine_max_clock
*= 10;
349 validation_clks
.memory_max_clock
*= 10;
351 /* Determine the highest non-boosted level from the Validation Clocks */
352 if (clk_type
== DM_PP_CLOCK_TYPE_ENGINE_CLK
) {
353 for (i
= 0; i
< dc_clks
->num_levels
; i
++) {
354 if (dc_clks
->clocks_in_khz
[i
] > validation_clks
.engine_max_clock
) {
355 /* This clock is higher the validation clock.
356 * Than means the previous one is the highest
357 * non-boosted one. */
358 DRM_INFO("DM_PPLIB: reducing engine clock level from %d to %d\n",
359 dc_clks
->num_levels
, i
);
360 dc_clks
->num_levels
= i
> 0 ? i
: 1;
364 } else if (clk_type
== DM_PP_CLOCK_TYPE_MEMORY_CLK
) {
365 for (i
= 0; i
< dc_clks
->num_levels
; i
++) {
366 if (dc_clks
->clocks_in_khz
[i
] > validation_clks
.memory_max_clock
) {
367 DRM_INFO("DM_PPLIB: reducing memory clock level from %d to %d\n",
368 dc_clks
->num_levels
, i
);
369 dc_clks
->num_levels
= i
> 0 ? i
: 1;
378 bool dm_pp_get_clock_levels_by_type_with_latency(
379 const struct dc_context
*ctx
,
380 enum dm_pp_clock_type clk_type
,
381 struct dm_pp_clock_levels_with_latency
*clk_level_info
)
383 /* TODO: to be implemented */
387 bool dm_pp_get_clock_levels_by_type_with_voltage(
388 const struct dc_context
*ctx
,
389 enum dm_pp_clock_type clk_type
,
390 struct dm_pp_clock_levels_with_voltage
*clk_level_info
)
392 /* TODO: to be implemented */
396 bool dm_pp_notify_wm_clock_changes(
397 const struct dc_context
*ctx
,
398 struct dm_pp_wm_sets_with_clock_ranges
*wm_with_clock_ranges
)
400 /* TODO: to be implemented */
404 bool dm_pp_notify_wm_clock_changes_soc15(
405 const struct dc_context
*ctx
,
406 struct dm_pp_wm_sets_with_clock_ranges_soc15
*wm_with_clock_ranges
)
408 /* TODO: to be implemented */
412 bool dm_pp_apply_power_level_change_request(
413 const struct dc_context
*ctx
,
414 struct dm_pp_power_level_change_request
*level_change_req
)
416 /* TODO: to be implemented */
420 bool dm_pp_apply_clock_for_voltage_request(
421 const struct dc_context
*ctx
,
422 struct dm_pp_clock_for_voltage_req
*clock_for_voltage_req
)
424 /* TODO: to be implemented */
428 bool dm_pp_get_static_clocks(
429 const struct dc_context
*ctx
,
430 struct dm_pp_static_clock_info
*static_clk_info
)
432 /* TODO: to be implemented */
436 /**** end of power component interfaces ****/