2 * Permission is hereby granted, free of charge, to any person obtaining a
3 * copy of this software and associated documentation files (the "Software"),
4 * to deal in the Software without restriction, including without limitation
5 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
6 * and/or sell copies of the Software, and to permit persons to whom the
7 * Software is furnished to do so, subject to the following conditions:
9 * The above copyright notice and this permission notice shall be included in
10 * all copies or substantial portions of the Software.
12 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
13 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
15 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
16 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
17 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
18 * OTHER DEALINGS IN THE SOFTWARE.
20 * Authors: Rafał Miłecki <zajec5@gmail.com>
21 * Alex Deucher <alexdeucher@gmail.com>
25 #include "amdgpu_drv.h"
26 #include "amdgpu_pm.h"
27 #include "amdgpu_dpm.h"
29 #include <linux/power_supply.h>
30 #include <linux/hwmon.h>
31 #include <linux/hwmon-sysfs.h>
33 #include "amd_powerplay.h"
35 static int amdgpu_debugfs_pm_init(struct amdgpu_device
*adev
);
37 void amdgpu_pm_acpi_event_handler(struct amdgpu_device
*adev
)
43 if (adev
->pm
.dpm_enabled
) {
44 mutex_lock(&adev
->pm
.mutex
);
45 if (power_supply_is_system_supplied() > 0)
46 adev
->pm
.dpm
.ac_power
= true;
48 adev
->pm
.dpm
.ac_power
= false;
49 if (adev
->pm
.funcs
->enable_bapm
)
50 amdgpu_dpm_enable_bapm(adev
, adev
->pm
.dpm
.ac_power
);
51 mutex_unlock(&adev
->pm
.mutex
);
55 static ssize_t
amdgpu_get_dpm_state(struct device
*dev
,
56 struct device_attribute
*attr
,
59 struct drm_device
*ddev
= dev_get_drvdata(dev
);
60 struct amdgpu_device
*adev
= ddev
->dev_private
;
61 enum amd_pm_state_type pm
;
63 if (adev
->pp_enabled
) {
64 pm
= amdgpu_dpm_get_current_power_state(adev
);
66 pm
= adev
->pm
.dpm
.user_state
;
68 return snprintf(buf
, PAGE_SIZE
, "%s\n",
69 (pm
== POWER_STATE_TYPE_BATTERY
) ? "battery" :
70 (pm
== POWER_STATE_TYPE_BALANCED
) ? "balanced" : "performance");
73 static ssize_t
amdgpu_set_dpm_state(struct device
*dev
,
74 struct device_attribute
*attr
,
78 struct drm_device
*ddev
= dev_get_drvdata(dev
);
79 struct amdgpu_device
*adev
= ddev
->dev_private
;
80 enum amd_pm_state_type state
;
82 if (strncmp("battery", buf
, strlen("battery")) == 0)
83 state
= POWER_STATE_TYPE_BATTERY
;
84 else if (strncmp("balanced", buf
, strlen("balanced")) == 0)
85 state
= POWER_STATE_TYPE_BALANCED
;
86 else if (strncmp("performance", buf
, strlen("performance")) == 0)
87 state
= POWER_STATE_TYPE_PERFORMANCE
;
93 if (adev
->pp_enabled
) {
94 amdgpu_dpm_dispatch_task(adev
, AMD_PP_EVENT_ENABLE_USER_STATE
, &state
, NULL
);
96 mutex_lock(&adev
->pm
.mutex
);
97 adev
->pm
.dpm
.user_state
= state
;
98 mutex_unlock(&adev
->pm
.mutex
);
100 /* Can't set dpm state when the card is off */
101 if (!(adev
->flags
& AMD_IS_PX
) ||
102 (ddev
->switch_power_state
== DRM_SWITCH_POWER_ON
))
103 amdgpu_pm_compute_clocks(adev
);
109 static ssize_t
amdgpu_get_dpm_forced_performance_level(struct device
*dev
,
110 struct device_attribute
*attr
,
113 struct drm_device
*ddev
= dev_get_drvdata(dev
);
114 struct amdgpu_device
*adev
= ddev
->dev_private
;
116 if (adev
->pp_enabled
) {
117 enum amd_dpm_forced_level level
;
119 level
= amdgpu_dpm_get_performance_level(adev
);
120 return snprintf(buf
, PAGE_SIZE
, "%s\n",
121 (level
== AMD_DPM_FORCED_LEVEL_AUTO
) ? "auto" :
122 (level
== AMD_DPM_FORCED_LEVEL_LOW
) ? "low" : "high");
124 enum amdgpu_dpm_forced_level level
;
126 level
= adev
->pm
.dpm
.forced_level
;
127 return snprintf(buf
, PAGE_SIZE
, "%s\n",
128 (level
== AMDGPU_DPM_FORCED_LEVEL_AUTO
) ? "auto" :
129 (level
== AMDGPU_DPM_FORCED_LEVEL_LOW
) ? "low" : "high");
133 static ssize_t
amdgpu_set_dpm_forced_performance_level(struct device
*dev
,
134 struct device_attribute
*attr
,
138 struct drm_device
*ddev
= dev_get_drvdata(dev
);
139 struct amdgpu_device
*adev
= ddev
->dev_private
;
140 enum amdgpu_dpm_forced_level level
;
143 if (strncmp("low", buf
, strlen("low")) == 0) {
144 level
= AMDGPU_DPM_FORCED_LEVEL_LOW
;
145 } else if (strncmp("high", buf
, strlen("high")) == 0) {
146 level
= AMDGPU_DPM_FORCED_LEVEL_HIGH
;
147 } else if (strncmp("auto", buf
, strlen("auto")) == 0) {
148 level
= AMDGPU_DPM_FORCED_LEVEL_AUTO
;
154 if (adev
->pp_enabled
)
155 amdgpu_dpm_force_performance_level(adev
, level
);
157 mutex_lock(&adev
->pm
.mutex
);
158 if (adev
->pm
.dpm
.thermal_active
) {
162 ret
= amdgpu_dpm_force_performance_level(adev
, level
);
166 adev
->pm
.dpm
.forced_level
= level
;
167 mutex_unlock(&adev
->pm
.mutex
);
170 mutex_unlock(&adev
->pm
.mutex
);
175 static DEVICE_ATTR(power_dpm_state
, S_IRUGO
| S_IWUSR
, amdgpu_get_dpm_state
, amdgpu_set_dpm_state
);
176 static DEVICE_ATTR(power_dpm_force_performance_level
, S_IRUGO
| S_IWUSR
,
177 amdgpu_get_dpm_forced_performance_level
,
178 amdgpu_set_dpm_forced_performance_level
);
180 static ssize_t
amdgpu_hwmon_show_temp(struct device
*dev
,
181 struct device_attribute
*attr
,
184 struct amdgpu_device
*adev
= dev_get_drvdata(dev
);
187 if (!adev
->pp_enabled
&& !adev
->pm
.funcs
->get_temperature
)
190 temp
= amdgpu_dpm_get_temperature(adev
);
192 return snprintf(buf
, PAGE_SIZE
, "%d\n", temp
);
195 static ssize_t
amdgpu_hwmon_show_temp_thresh(struct device
*dev
,
196 struct device_attribute
*attr
,
199 struct amdgpu_device
*adev
= dev_get_drvdata(dev
);
200 int hyst
= to_sensor_dev_attr(attr
)->index
;
204 temp
= adev
->pm
.dpm
.thermal
.min_temp
;
206 temp
= adev
->pm
.dpm
.thermal
.max_temp
;
208 return snprintf(buf
, PAGE_SIZE
, "%d\n", temp
);
211 static ssize_t
amdgpu_hwmon_get_pwm1_enable(struct device
*dev
,
212 struct device_attribute
*attr
,
215 struct amdgpu_device
*adev
= dev_get_drvdata(dev
);
218 if (!adev
->pp_enabled
&& !adev
->pm
.funcs
->get_fan_control_mode
)
221 pwm_mode
= amdgpu_dpm_get_fan_control_mode(adev
);
223 /* never 0 (full-speed), fuse or smc-controlled always */
224 return sprintf(buf
, "%i\n", pwm_mode
== FDO_PWM_MODE_STATIC
? 1 : 2);
227 static ssize_t
amdgpu_hwmon_set_pwm1_enable(struct device
*dev
,
228 struct device_attribute
*attr
,
232 struct amdgpu_device
*adev
= dev_get_drvdata(dev
);
236 if (!adev
->pp_enabled
&& !adev
->pm
.funcs
->set_fan_control_mode
)
239 err
= kstrtoint(buf
, 10, &value
);
244 case 1: /* manual, percent-based */
245 amdgpu_dpm_set_fan_control_mode(adev
, FDO_PWM_MODE_STATIC
);
247 default: /* disable */
248 amdgpu_dpm_set_fan_control_mode(adev
, 0);
255 static ssize_t
amdgpu_hwmon_get_pwm1_min(struct device
*dev
,
256 struct device_attribute
*attr
,
259 return sprintf(buf
, "%i\n", 0);
262 static ssize_t
amdgpu_hwmon_get_pwm1_max(struct device
*dev
,
263 struct device_attribute
*attr
,
266 return sprintf(buf
, "%i\n", 255);
269 static ssize_t
amdgpu_hwmon_set_pwm1(struct device
*dev
,
270 struct device_attribute
*attr
,
271 const char *buf
, size_t count
)
273 struct amdgpu_device
*adev
= dev_get_drvdata(dev
);
277 err
= kstrtou32(buf
, 10, &value
);
281 value
= (value
* 100) / 255;
283 err
= amdgpu_dpm_set_fan_speed_percent(adev
, value
);
290 static ssize_t
amdgpu_hwmon_get_pwm1(struct device
*dev
,
291 struct device_attribute
*attr
,
294 struct amdgpu_device
*adev
= dev_get_drvdata(dev
);
298 err
= amdgpu_dpm_get_fan_speed_percent(adev
, &speed
);
302 speed
= (speed
* 255) / 100;
304 return sprintf(buf
, "%i\n", speed
);
307 static SENSOR_DEVICE_ATTR(temp1_input
, S_IRUGO
, amdgpu_hwmon_show_temp
, NULL
, 0);
308 static SENSOR_DEVICE_ATTR(temp1_crit
, S_IRUGO
, amdgpu_hwmon_show_temp_thresh
, NULL
, 0);
309 static SENSOR_DEVICE_ATTR(temp1_crit_hyst
, S_IRUGO
, amdgpu_hwmon_show_temp_thresh
, NULL
, 1);
310 static SENSOR_DEVICE_ATTR(pwm1
, S_IRUGO
| S_IWUSR
, amdgpu_hwmon_get_pwm1
, amdgpu_hwmon_set_pwm1
, 0);
311 static SENSOR_DEVICE_ATTR(pwm1_enable
, S_IRUGO
| S_IWUSR
, amdgpu_hwmon_get_pwm1_enable
, amdgpu_hwmon_set_pwm1_enable
, 0);
312 static SENSOR_DEVICE_ATTR(pwm1_min
, S_IRUGO
, amdgpu_hwmon_get_pwm1_min
, NULL
, 0);
313 static SENSOR_DEVICE_ATTR(pwm1_max
, S_IRUGO
, amdgpu_hwmon_get_pwm1_max
, NULL
, 0);
315 static struct attribute
*hwmon_attributes
[] = {
316 &sensor_dev_attr_temp1_input
.dev_attr
.attr
,
317 &sensor_dev_attr_temp1_crit
.dev_attr
.attr
,
318 &sensor_dev_attr_temp1_crit_hyst
.dev_attr
.attr
,
319 &sensor_dev_attr_pwm1
.dev_attr
.attr
,
320 &sensor_dev_attr_pwm1_enable
.dev_attr
.attr
,
321 &sensor_dev_attr_pwm1_min
.dev_attr
.attr
,
322 &sensor_dev_attr_pwm1_max
.dev_attr
.attr
,
326 static umode_t
hwmon_attributes_visible(struct kobject
*kobj
,
327 struct attribute
*attr
, int index
)
329 struct device
*dev
= kobj_to_dev(kobj
);
330 struct amdgpu_device
*adev
= dev_get_drvdata(dev
);
331 umode_t effective_mode
= attr
->mode
;
333 /* Skip limit attributes if DPM is not enabled */
334 if (!adev
->pm
.dpm_enabled
&&
335 (attr
== &sensor_dev_attr_temp1_crit
.dev_attr
.attr
||
336 attr
== &sensor_dev_attr_temp1_crit_hyst
.dev_attr
.attr
||
337 attr
== &sensor_dev_attr_pwm1
.dev_attr
.attr
||
338 attr
== &sensor_dev_attr_pwm1_enable
.dev_attr
.attr
||
339 attr
== &sensor_dev_attr_pwm1_max
.dev_attr
.attr
||
340 attr
== &sensor_dev_attr_pwm1_min
.dev_attr
.attr
))
343 if (adev
->pp_enabled
)
344 return effective_mode
;
346 /* Skip fan attributes if fan is not present */
347 if (adev
->pm
.no_fan
&&
348 (attr
== &sensor_dev_attr_pwm1
.dev_attr
.attr
||
349 attr
== &sensor_dev_attr_pwm1_enable
.dev_attr
.attr
||
350 attr
== &sensor_dev_attr_pwm1_max
.dev_attr
.attr
||
351 attr
== &sensor_dev_attr_pwm1_min
.dev_attr
.attr
))
354 /* mask fan attributes if we have no bindings for this asic to expose */
355 if ((!adev
->pm
.funcs
->get_fan_speed_percent
&&
356 attr
== &sensor_dev_attr_pwm1
.dev_attr
.attr
) || /* can't query fan */
357 (!adev
->pm
.funcs
->get_fan_control_mode
&&
358 attr
== &sensor_dev_attr_pwm1_enable
.dev_attr
.attr
)) /* can't query state */
359 effective_mode
&= ~S_IRUGO
;
361 if ((!adev
->pm
.funcs
->set_fan_speed_percent
&&
362 attr
== &sensor_dev_attr_pwm1
.dev_attr
.attr
) || /* can't manage fan */
363 (!adev
->pm
.funcs
->set_fan_control_mode
&&
364 attr
== &sensor_dev_attr_pwm1_enable
.dev_attr
.attr
)) /* can't manage state */
365 effective_mode
&= ~S_IWUSR
;
367 /* hide max/min values if we can't both query and manage the fan */
368 if ((!adev
->pm
.funcs
->set_fan_speed_percent
&&
369 !adev
->pm
.funcs
->get_fan_speed_percent
) &&
370 (attr
== &sensor_dev_attr_pwm1_max
.dev_attr
.attr
||
371 attr
== &sensor_dev_attr_pwm1_min
.dev_attr
.attr
))
374 return effective_mode
;
377 static const struct attribute_group hwmon_attrgroup
= {
378 .attrs
= hwmon_attributes
,
379 .is_visible
= hwmon_attributes_visible
,
382 static const struct attribute_group
*hwmon_groups
[] = {
387 void amdgpu_dpm_thermal_work_handler(struct work_struct
*work
)
389 struct amdgpu_device
*adev
=
390 container_of(work
, struct amdgpu_device
,
391 pm
.dpm
.thermal
.work
);
392 /* switch to the thermal state */
393 enum amd_pm_state_type dpm_state
= POWER_STATE_TYPE_INTERNAL_THERMAL
;
395 if (!adev
->pm
.dpm_enabled
)
398 if (adev
->pm
.funcs
->get_temperature
) {
399 int temp
= amdgpu_dpm_get_temperature(adev
);
401 if (temp
< adev
->pm
.dpm
.thermal
.min_temp
)
402 /* switch back the user state */
403 dpm_state
= adev
->pm
.dpm
.user_state
;
405 if (adev
->pm
.dpm
.thermal
.high_to_low
)
406 /* switch back the user state */
407 dpm_state
= adev
->pm
.dpm
.user_state
;
409 mutex_lock(&adev
->pm
.mutex
);
410 if (dpm_state
== POWER_STATE_TYPE_INTERNAL_THERMAL
)
411 adev
->pm
.dpm
.thermal_active
= true;
413 adev
->pm
.dpm
.thermal_active
= false;
414 adev
->pm
.dpm
.state
= dpm_state
;
415 mutex_unlock(&adev
->pm
.mutex
);
417 amdgpu_pm_compute_clocks(adev
);
420 static struct amdgpu_ps
*amdgpu_dpm_pick_power_state(struct amdgpu_device
*adev
,
421 enum amd_pm_state_type dpm_state
)
424 struct amdgpu_ps
*ps
;
426 bool single_display
= (adev
->pm
.dpm
.new_active_crtc_count
< 2) ?
429 /* check if the vblank period is too short to adjust the mclk */
430 if (single_display
&& adev
->pm
.funcs
->vblank_too_short
) {
431 if (amdgpu_dpm_vblank_too_short(adev
))
432 single_display
= false;
435 /* certain older asics have a separare 3D performance state,
436 * so try that first if the user selected performance
438 if (dpm_state
== POWER_STATE_TYPE_PERFORMANCE
)
439 dpm_state
= POWER_STATE_TYPE_INTERNAL_3DPERF
;
440 /* balanced states don't exist at the moment */
441 if (dpm_state
== POWER_STATE_TYPE_BALANCED
)
442 dpm_state
= POWER_STATE_TYPE_PERFORMANCE
;
445 /* Pick the best power state based on current conditions */
446 for (i
= 0; i
< adev
->pm
.dpm
.num_ps
; i
++) {
447 ps
= &adev
->pm
.dpm
.ps
[i
];
448 ui_class
= ps
->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK
;
451 case POWER_STATE_TYPE_BATTERY
:
452 if (ui_class
== ATOM_PPLIB_CLASSIFICATION_UI_BATTERY
) {
453 if (ps
->caps
& ATOM_PPLIB_SINGLE_DISPLAY_ONLY
) {
460 case POWER_STATE_TYPE_BALANCED
:
461 if (ui_class
== ATOM_PPLIB_CLASSIFICATION_UI_BALANCED
) {
462 if (ps
->caps
& ATOM_PPLIB_SINGLE_DISPLAY_ONLY
) {
469 case POWER_STATE_TYPE_PERFORMANCE
:
470 if (ui_class
== ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE
) {
471 if (ps
->caps
& ATOM_PPLIB_SINGLE_DISPLAY_ONLY
) {
478 /* internal states */
479 case POWER_STATE_TYPE_INTERNAL_UVD
:
480 if (adev
->pm
.dpm
.uvd_ps
)
481 return adev
->pm
.dpm
.uvd_ps
;
484 case POWER_STATE_TYPE_INTERNAL_UVD_SD
:
485 if (ps
->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE
)
488 case POWER_STATE_TYPE_INTERNAL_UVD_HD
:
489 if (ps
->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE
)
492 case POWER_STATE_TYPE_INTERNAL_UVD_HD2
:
493 if (ps
->class & ATOM_PPLIB_CLASSIFICATION_HD2STATE
)
496 case POWER_STATE_TYPE_INTERNAL_UVD_MVC
:
497 if (ps
->class2
& ATOM_PPLIB_CLASSIFICATION2_MVC
)
500 case POWER_STATE_TYPE_INTERNAL_BOOT
:
501 return adev
->pm
.dpm
.boot_ps
;
502 case POWER_STATE_TYPE_INTERNAL_THERMAL
:
503 if (ps
->class & ATOM_PPLIB_CLASSIFICATION_THERMAL
)
506 case POWER_STATE_TYPE_INTERNAL_ACPI
:
507 if (ps
->class & ATOM_PPLIB_CLASSIFICATION_ACPI
)
510 case POWER_STATE_TYPE_INTERNAL_ULV
:
511 if (ps
->class2
& ATOM_PPLIB_CLASSIFICATION2_ULV
)
514 case POWER_STATE_TYPE_INTERNAL_3DPERF
:
515 if (ps
->class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE
)
522 /* use a fallback state if we didn't match */
524 case POWER_STATE_TYPE_INTERNAL_UVD_SD
:
525 dpm_state
= POWER_STATE_TYPE_INTERNAL_UVD_HD
;
527 case POWER_STATE_TYPE_INTERNAL_UVD_HD
:
528 case POWER_STATE_TYPE_INTERNAL_UVD_HD2
:
529 case POWER_STATE_TYPE_INTERNAL_UVD_MVC
:
530 if (adev
->pm
.dpm
.uvd_ps
) {
531 return adev
->pm
.dpm
.uvd_ps
;
533 dpm_state
= POWER_STATE_TYPE_PERFORMANCE
;
536 case POWER_STATE_TYPE_INTERNAL_THERMAL
:
537 dpm_state
= POWER_STATE_TYPE_INTERNAL_ACPI
;
539 case POWER_STATE_TYPE_INTERNAL_ACPI
:
540 dpm_state
= POWER_STATE_TYPE_BATTERY
;
542 case POWER_STATE_TYPE_BATTERY
:
543 case POWER_STATE_TYPE_BALANCED
:
544 case POWER_STATE_TYPE_INTERNAL_3DPERF
:
545 dpm_state
= POWER_STATE_TYPE_PERFORMANCE
;
554 static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device
*adev
)
557 struct amdgpu_ps
*ps
;
558 enum amd_pm_state_type dpm_state
;
561 /* if dpm init failed */
562 if (!adev
->pm
.dpm_enabled
)
565 if (adev
->pm
.dpm
.user_state
!= adev
->pm
.dpm
.state
) {
566 /* add other state override checks here */
567 if ((!adev
->pm
.dpm
.thermal_active
) &&
568 (!adev
->pm
.dpm
.uvd_active
))
569 adev
->pm
.dpm
.state
= adev
->pm
.dpm
.user_state
;
571 dpm_state
= adev
->pm
.dpm
.state
;
573 ps
= amdgpu_dpm_pick_power_state(adev
, dpm_state
);
575 adev
->pm
.dpm
.requested_ps
= ps
;
579 /* no need to reprogram if nothing changed unless we are on BTC+ */
580 if (adev
->pm
.dpm
.current_ps
== adev
->pm
.dpm
.requested_ps
) {
581 /* vce just modifies an existing state so force a change */
582 if (ps
->vce_active
!= adev
->pm
.dpm
.vce_active
)
584 if (adev
->flags
& AMD_IS_APU
) {
585 /* for APUs if the num crtcs changed but state is the same,
586 * all we need to do is update the display configuration.
588 if (adev
->pm
.dpm
.new_active_crtcs
!= adev
->pm
.dpm
.current_active_crtcs
) {
589 /* update display watermarks based on new power state */
590 amdgpu_display_bandwidth_update(adev
);
591 /* update displays */
592 amdgpu_dpm_display_configuration_changed(adev
);
593 adev
->pm
.dpm
.current_active_crtcs
= adev
->pm
.dpm
.new_active_crtcs
;
594 adev
->pm
.dpm
.current_active_crtc_count
= adev
->pm
.dpm
.new_active_crtc_count
;
598 /* for BTC+ if the num crtcs hasn't changed and state is the same,
599 * nothing to do, if the num crtcs is > 1 and state is the same,
600 * update display configuration.
602 if (adev
->pm
.dpm
.new_active_crtcs
==
603 adev
->pm
.dpm
.current_active_crtcs
) {
605 } else if ((adev
->pm
.dpm
.current_active_crtc_count
> 1) &&
606 (adev
->pm
.dpm
.new_active_crtc_count
> 1)) {
607 /* update display watermarks based on new power state */
608 amdgpu_display_bandwidth_update(adev
);
609 /* update displays */
610 amdgpu_dpm_display_configuration_changed(adev
);
611 adev
->pm
.dpm
.current_active_crtcs
= adev
->pm
.dpm
.new_active_crtcs
;
612 adev
->pm
.dpm
.current_active_crtc_count
= adev
->pm
.dpm
.new_active_crtc_count
;
619 if (amdgpu_dpm
== 1) {
620 printk("switching from power state:\n");
621 amdgpu_dpm_print_power_state(adev
, adev
->pm
.dpm
.current_ps
);
622 printk("switching to power state:\n");
623 amdgpu_dpm_print_power_state(adev
, adev
->pm
.dpm
.requested_ps
);
626 mutex_lock(&adev
->ring_lock
);
628 /* update whether vce is active */
629 ps
->vce_active
= adev
->pm
.dpm
.vce_active
;
631 ret
= amdgpu_dpm_pre_set_power_state(adev
);
635 /* update display watermarks based on new power state */
636 amdgpu_display_bandwidth_update(adev
);
637 /* update displays */
638 amdgpu_dpm_display_configuration_changed(adev
);
640 adev
->pm
.dpm
.current_active_crtcs
= adev
->pm
.dpm
.new_active_crtcs
;
641 adev
->pm
.dpm
.current_active_crtc_count
= adev
->pm
.dpm
.new_active_crtc_count
;
643 /* wait for the rings to drain */
644 for (i
= 0; i
< AMDGPU_MAX_RINGS
; i
++) {
645 struct amdgpu_ring
*ring
= adev
->rings
[i
];
646 if (ring
&& ring
->ready
)
647 amdgpu_fence_wait_empty(ring
);
650 /* program the new power state */
651 amdgpu_dpm_set_power_state(adev
);
653 /* update current power state */
654 adev
->pm
.dpm
.current_ps
= adev
->pm
.dpm
.requested_ps
;
656 amdgpu_dpm_post_set_power_state(adev
);
658 if (adev
->pm
.funcs
->force_performance_level
) {
659 if (adev
->pm
.dpm
.thermal_active
) {
660 enum amdgpu_dpm_forced_level level
= adev
->pm
.dpm
.forced_level
;
661 /* force low perf level for thermal */
662 amdgpu_dpm_force_performance_level(adev
, AMDGPU_DPM_FORCED_LEVEL_LOW
);
663 /* save the user's level */
664 adev
->pm
.dpm
.forced_level
= level
;
666 /* otherwise, user selected level */
667 amdgpu_dpm_force_performance_level(adev
, adev
->pm
.dpm
.forced_level
);
672 mutex_unlock(&adev
->ring_lock
);
675 void amdgpu_dpm_enable_uvd(struct amdgpu_device
*adev
, bool enable
)
677 if (adev
->pp_enabled
)
678 amdgpu_dpm_powergate_uvd(adev
, !enable
);
680 if (adev
->pm
.funcs
->powergate_uvd
) {
681 mutex_lock(&adev
->pm
.mutex
);
682 /* enable/disable UVD */
683 amdgpu_dpm_powergate_uvd(adev
, !enable
);
684 mutex_unlock(&adev
->pm
.mutex
);
687 mutex_lock(&adev
->pm
.mutex
);
688 adev
->pm
.dpm
.uvd_active
= true;
689 adev
->pm
.dpm
.state
= POWER_STATE_TYPE_INTERNAL_UVD
;
690 mutex_unlock(&adev
->pm
.mutex
);
692 mutex_lock(&adev
->pm
.mutex
);
693 adev
->pm
.dpm
.uvd_active
= false;
694 mutex_unlock(&adev
->pm
.mutex
);
696 amdgpu_pm_compute_clocks(adev
);
702 void amdgpu_dpm_enable_vce(struct amdgpu_device
*adev
, bool enable
)
704 if (adev
->pp_enabled
)
705 amdgpu_dpm_powergate_vce(adev
, !enable
);
707 if (adev
->pm
.funcs
->powergate_vce
) {
708 mutex_lock(&adev
->pm
.mutex
);
709 amdgpu_dpm_powergate_vce(adev
, !enable
);
710 mutex_unlock(&adev
->pm
.mutex
);
713 mutex_lock(&adev
->pm
.mutex
);
714 adev
->pm
.dpm
.vce_active
= true;
715 /* XXX select vce level based on ring/task */
716 adev
->pm
.dpm
.vce_level
= AMDGPU_VCE_LEVEL_AC_ALL
;
717 mutex_unlock(&adev
->pm
.mutex
);
719 mutex_lock(&adev
->pm
.mutex
);
720 adev
->pm
.dpm
.vce_active
= false;
721 mutex_unlock(&adev
->pm
.mutex
);
723 amdgpu_pm_compute_clocks(adev
);
728 void amdgpu_pm_print_power_states(struct amdgpu_device
*adev
)
732 if (adev
->pp_enabled
)
736 for (i
= 0; i
< adev
->pm
.dpm
.num_ps
; i
++)
737 amdgpu_dpm_print_power_state(adev
, &adev
->pm
.dpm
.ps
[i
]);
741 int amdgpu_pm_sysfs_init(struct amdgpu_device
*adev
)
745 if (adev
->pm
.sysfs_initialized
)
748 if (!adev
->pp_enabled
) {
749 if (adev
->pm
.funcs
->get_temperature
== NULL
)
753 adev
->pm
.int_hwmon_dev
= hwmon_device_register_with_groups(adev
->dev
,
756 if (IS_ERR(adev
->pm
.int_hwmon_dev
)) {
757 ret
= PTR_ERR(adev
->pm
.int_hwmon_dev
);
759 "Unable to register hwmon device: %d\n", ret
);
763 ret
= device_create_file(adev
->dev
, &dev_attr_power_dpm_state
);
765 DRM_ERROR("failed to create device file for dpm state\n");
768 ret
= device_create_file(adev
->dev
, &dev_attr_power_dpm_force_performance_level
);
770 DRM_ERROR("failed to create device file for dpm state\n");
773 ret
= amdgpu_debugfs_pm_init(adev
);
775 DRM_ERROR("Failed to register debugfs file for dpm!\n");
779 adev
->pm
.sysfs_initialized
= true;
784 void amdgpu_pm_sysfs_fini(struct amdgpu_device
*adev
)
786 if (adev
->pm
.int_hwmon_dev
)
787 hwmon_device_unregister(adev
->pm
.int_hwmon_dev
);
788 device_remove_file(adev
->dev
, &dev_attr_power_dpm_state
);
789 device_remove_file(adev
->dev
, &dev_attr_power_dpm_force_performance_level
);
792 void amdgpu_pm_compute_clocks(struct amdgpu_device
*adev
)
794 struct drm_device
*ddev
= adev
->ddev
;
795 struct drm_crtc
*crtc
;
796 struct amdgpu_crtc
*amdgpu_crtc
;
798 if (!adev
->pm
.dpm_enabled
)
801 if (adev
->pp_enabled
) {
804 amdgpu_display_bandwidth_update(adev
);
805 mutex_lock(&adev
->ring_lock
);
806 for (i
= 0; i
< AMDGPU_MAX_RINGS
; i
++) {
807 struct amdgpu_ring
*ring
= adev
->rings
[i
];
808 if (ring
&& ring
->ready
)
809 amdgpu_fence_wait_empty(ring
);
811 mutex_unlock(&adev
->ring_lock
);
813 amdgpu_dpm_dispatch_task(adev
, AMD_PP_EVENT_DISPLAY_CONFIG_CHANGE
, NULL
, NULL
);
815 mutex_lock(&adev
->pm
.mutex
);
816 adev
->pm
.dpm
.new_active_crtcs
= 0;
817 adev
->pm
.dpm
.new_active_crtc_count
= 0;
818 if (adev
->mode_info
.num_crtc
&& adev
->mode_info
.mode_config_initialized
) {
819 list_for_each_entry(crtc
,
820 &ddev
->mode_config
.crtc_list
, head
) {
821 amdgpu_crtc
= to_amdgpu_crtc(crtc
);
823 adev
->pm
.dpm
.new_active_crtcs
|= (1 << amdgpu_crtc
->crtc_id
);
824 adev
->pm
.dpm
.new_active_crtc_count
++;
828 /* update battery/ac status */
829 if (power_supply_is_system_supplied() > 0)
830 adev
->pm
.dpm
.ac_power
= true;
832 adev
->pm
.dpm
.ac_power
= false;
834 amdgpu_dpm_change_power_state_locked(adev
);
836 mutex_unlock(&adev
->pm
.mutex
);
843 #if defined(CONFIG_DEBUG_FS)
845 static int amdgpu_debugfs_pm_info(struct seq_file
*m
, void *data
)
847 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
848 struct drm_device
*dev
= node
->minor
->dev
;
849 struct amdgpu_device
*adev
= dev
->dev_private
;
851 if (!adev
->pm
.dpm_enabled
) {
852 seq_printf(m
, "dpm not enabled\n");
855 if (adev
->pp_enabled
) {
856 amdgpu_dpm_debugfs_print_current_performance_level(adev
, m
);
858 mutex_lock(&adev
->pm
.mutex
);
859 if (adev
->pm
.funcs
->debugfs_print_current_performance_level
)
860 amdgpu_dpm_debugfs_print_current_performance_level(adev
, m
);
862 seq_printf(m
, "Debugfs support not implemented for this asic\n");
863 mutex_unlock(&adev
->pm
.mutex
);
869 static struct drm_info_list amdgpu_pm_info_list
[] = {
870 {"amdgpu_pm_info", amdgpu_debugfs_pm_info
, 0, NULL
},
874 static int amdgpu_debugfs_pm_init(struct amdgpu_device
*adev
)
876 #if defined(CONFIG_DEBUG_FS)
877 return amdgpu_debugfs_add_files(adev
, amdgpu_pm_info_list
, ARRAY_SIZE(amdgpu_pm_info_list
));