2 * Permission is hereby granted, free of charge, to any person obtaining a
3 * copy of this software and associated documentation files (the "Software"),
4 * to deal in the Software without restriction, including without limitation
5 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
6 * and/or sell copies of the Software, and to permit persons to whom the
7 * Software is furnished to do so, subject to the following conditions:
9 * The above copyright notice and this permission notice shall be included in
10 * all copies or substantial portions of the Software.
12 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
13 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
15 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
16 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
17 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
18 * OTHER DEALINGS IN THE SOFTWARE.
20 * Authors: Rafał Miłecki <zajec5@gmail.com>
21 * Alex Deucher <alexdeucher@gmail.com>
25 #include "amdgpu_drv.h"
26 #include "amdgpu_pm.h"
27 #include "amdgpu_dpm.h"
29 #include <linux/power_supply.h>
30 #include <linux/hwmon.h>
31 #include <linux/hwmon-sysfs.h>
33 #include "amd_powerplay.h"
35 static int amdgpu_debugfs_pm_init(struct amdgpu_device
*adev
);
37 void amdgpu_pm_acpi_event_handler(struct amdgpu_device
*adev
)
43 if (adev
->pm
.dpm_enabled
) {
44 mutex_lock(&adev
->pm
.mutex
);
45 if (power_supply_is_system_supplied() > 0)
46 adev
->pm
.dpm
.ac_power
= true;
48 adev
->pm
.dpm
.ac_power
= false;
49 if (adev
->pm
.funcs
->enable_bapm
)
50 amdgpu_dpm_enable_bapm(adev
, adev
->pm
.dpm
.ac_power
);
51 mutex_unlock(&adev
->pm
.mutex
);
55 static ssize_t
amdgpu_get_dpm_state(struct device
*dev
,
56 struct device_attribute
*attr
,
59 struct drm_device
*ddev
= dev_get_drvdata(dev
);
60 struct amdgpu_device
*adev
= ddev
->dev_private
;
61 enum amd_pm_state_type pm
;
63 if (adev
->pp_enabled
) {
64 pm
= amdgpu_dpm_get_current_power_state(adev
);
66 pm
= adev
->pm
.dpm
.user_state
;
68 return snprintf(buf
, PAGE_SIZE
, "%s\n",
69 (pm
== POWER_STATE_TYPE_BATTERY
) ? "battery" :
70 (pm
== POWER_STATE_TYPE_BALANCED
) ? "balanced" : "performance");
73 static ssize_t
amdgpu_set_dpm_state(struct device
*dev
,
74 struct device_attribute
*attr
,
78 struct drm_device
*ddev
= dev_get_drvdata(dev
);
79 struct amdgpu_device
*adev
= ddev
->dev_private
;
80 enum amd_pm_state_type state
;
82 if (strncmp("battery", buf
, strlen("battery")) == 0)
83 state
= POWER_STATE_TYPE_BATTERY
;
84 else if (strncmp("balanced", buf
, strlen("balanced")) == 0)
85 state
= POWER_STATE_TYPE_BALANCED
;
86 else if (strncmp("performance", buf
, strlen("performance")) == 0)
87 state
= POWER_STATE_TYPE_PERFORMANCE
;
93 if (adev
->pp_enabled
) {
94 amdgpu_dpm_dispatch_task(adev
, AMD_PP_EVENT_ENABLE_USER_STATE
, &state
, NULL
);
96 mutex_lock(&adev
->pm
.mutex
);
97 adev
->pm
.dpm
.user_state
= state
;
98 mutex_unlock(&adev
->pm
.mutex
);
100 /* Can't set dpm state when the card is off */
101 if (!(adev
->flags
& AMD_IS_PX
) ||
102 (ddev
->switch_power_state
== DRM_SWITCH_POWER_ON
))
103 amdgpu_pm_compute_clocks(adev
);
109 static ssize_t
amdgpu_get_dpm_forced_performance_level(struct device
*dev
,
110 struct device_attribute
*attr
,
113 struct drm_device
*ddev
= dev_get_drvdata(dev
);
114 struct amdgpu_device
*adev
= ddev
->dev_private
;
116 if ((adev
->flags
& AMD_IS_PX
) &&
117 (ddev
->switch_power_state
!= DRM_SWITCH_POWER_ON
))
118 return snprintf(buf
, PAGE_SIZE
, "off\n");
120 if (adev
->pp_enabled
) {
121 enum amd_dpm_forced_level level
;
123 level
= amdgpu_dpm_get_performance_level(adev
);
124 return snprintf(buf
, PAGE_SIZE
, "%s\n",
125 (level
== AMD_DPM_FORCED_LEVEL_AUTO
) ? "auto" :
126 (level
== AMD_DPM_FORCED_LEVEL_LOW
) ? "low" : "high");
128 enum amdgpu_dpm_forced_level level
;
130 level
= adev
->pm
.dpm
.forced_level
;
131 return snprintf(buf
, PAGE_SIZE
, "%s\n",
132 (level
== AMDGPU_DPM_FORCED_LEVEL_AUTO
) ? "auto" :
133 (level
== AMDGPU_DPM_FORCED_LEVEL_LOW
) ? "low" : "high");
137 static ssize_t
amdgpu_set_dpm_forced_performance_level(struct device
*dev
,
138 struct device_attribute
*attr
,
142 struct drm_device
*ddev
= dev_get_drvdata(dev
);
143 struct amdgpu_device
*adev
= ddev
->dev_private
;
144 enum amdgpu_dpm_forced_level level
;
147 /* Can't force performance level when the card is off */
148 if ((adev
->flags
& AMD_IS_PX
) &&
149 (ddev
->switch_power_state
!= DRM_SWITCH_POWER_ON
))
152 if (strncmp("low", buf
, strlen("low")) == 0) {
153 level
= AMDGPU_DPM_FORCED_LEVEL_LOW
;
154 } else if (strncmp("high", buf
, strlen("high")) == 0) {
155 level
= AMDGPU_DPM_FORCED_LEVEL_HIGH
;
156 } else if (strncmp("auto", buf
, strlen("auto")) == 0) {
157 level
= AMDGPU_DPM_FORCED_LEVEL_AUTO
;
163 if (adev
->pp_enabled
)
164 amdgpu_dpm_force_performance_level(adev
, level
);
166 mutex_lock(&adev
->pm
.mutex
);
167 if (adev
->pm
.dpm
.thermal_active
) {
169 mutex_unlock(&adev
->pm
.mutex
);
172 ret
= amdgpu_dpm_force_performance_level(adev
, level
);
176 adev
->pm
.dpm
.forced_level
= level
;
177 mutex_unlock(&adev
->pm
.mutex
);
183 static DEVICE_ATTR(power_dpm_state
, S_IRUGO
| S_IWUSR
, amdgpu_get_dpm_state
, amdgpu_set_dpm_state
);
184 static DEVICE_ATTR(power_dpm_force_performance_level
, S_IRUGO
| S_IWUSR
,
185 amdgpu_get_dpm_forced_performance_level
,
186 amdgpu_set_dpm_forced_performance_level
);
188 static ssize_t
amdgpu_hwmon_show_temp(struct device
*dev
,
189 struct device_attribute
*attr
,
192 struct amdgpu_device
*adev
= dev_get_drvdata(dev
);
193 struct drm_device
*ddev
= adev
->ddev
;
196 /* Can't get temperature when the card is off */
197 if ((adev
->flags
& AMD_IS_PX
) &&
198 (ddev
->switch_power_state
!= DRM_SWITCH_POWER_ON
))
201 if (!adev
->pp_enabled
&& !adev
->pm
.funcs
->get_temperature
)
204 temp
= amdgpu_dpm_get_temperature(adev
);
206 return snprintf(buf
, PAGE_SIZE
, "%d\n", temp
);
209 static ssize_t
amdgpu_hwmon_show_temp_thresh(struct device
*dev
,
210 struct device_attribute
*attr
,
213 struct amdgpu_device
*adev
= dev_get_drvdata(dev
);
214 int hyst
= to_sensor_dev_attr(attr
)->index
;
218 temp
= adev
->pm
.dpm
.thermal
.min_temp
;
220 temp
= adev
->pm
.dpm
.thermal
.max_temp
;
222 return snprintf(buf
, PAGE_SIZE
, "%d\n", temp
);
225 static ssize_t
amdgpu_hwmon_get_pwm1_enable(struct device
*dev
,
226 struct device_attribute
*attr
,
229 struct amdgpu_device
*adev
= dev_get_drvdata(dev
);
232 if (!adev
->pp_enabled
&& !adev
->pm
.funcs
->get_fan_control_mode
)
235 pwm_mode
= amdgpu_dpm_get_fan_control_mode(adev
);
237 /* never 0 (full-speed), fuse or smc-controlled always */
238 return sprintf(buf
, "%i\n", pwm_mode
== FDO_PWM_MODE_STATIC
? 1 : 2);
241 static ssize_t
amdgpu_hwmon_set_pwm1_enable(struct device
*dev
,
242 struct device_attribute
*attr
,
246 struct amdgpu_device
*adev
= dev_get_drvdata(dev
);
250 if (!adev
->pp_enabled
&& !adev
->pm
.funcs
->set_fan_control_mode
)
253 err
= kstrtoint(buf
, 10, &value
);
258 case 1: /* manual, percent-based */
259 amdgpu_dpm_set_fan_control_mode(adev
, FDO_PWM_MODE_STATIC
);
261 default: /* disable */
262 amdgpu_dpm_set_fan_control_mode(adev
, 0);
269 static ssize_t
amdgpu_hwmon_get_pwm1_min(struct device
*dev
,
270 struct device_attribute
*attr
,
273 return sprintf(buf
, "%i\n", 0);
276 static ssize_t
amdgpu_hwmon_get_pwm1_max(struct device
*dev
,
277 struct device_attribute
*attr
,
280 return sprintf(buf
, "%i\n", 255);
283 static ssize_t
amdgpu_hwmon_set_pwm1(struct device
*dev
,
284 struct device_attribute
*attr
,
285 const char *buf
, size_t count
)
287 struct amdgpu_device
*adev
= dev_get_drvdata(dev
);
291 err
= kstrtou32(buf
, 10, &value
);
295 value
= (value
* 100) / 255;
297 err
= amdgpu_dpm_set_fan_speed_percent(adev
, value
);
304 static ssize_t
amdgpu_hwmon_get_pwm1(struct device
*dev
,
305 struct device_attribute
*attr
,
308 struct amdgpu_device
*adev
= dev_get_drvdata(dev
);
312 err
= amdgpu_dpm_get_fan_speed_percent(adev
, &speed
);
316 speed
= (speed
* 255) / 100;
318 return sprintf(buf
, "%i\n", speed
);
321 static SENSOR_DEVICE_ATTR(temp1_input
, S_IRUGO
, amdgpu_hwmon_show_temp
, NULL
, 0);
322 static SENSOR_DEVICE_ATTR(temp1_crit
, S_IRUGO
, amdgpu_hwmon_show_temp_thresh
, NULL
, 0);
323 static SENSOR_DEVICE_ATTR(temp1_crit_hyst
, S_IRUGO
, amdgpu_hwmon_show_temp_thresh
, NULL
, 1);
324 static SENSOR_DEVICE_ATTR(pwm1
, S_IRUGO
| S_IWUSR
, amdgpu_hwmon_get_pwm1
, amdgpu_hwmon_set_pwm1
, 0);
325 static SENSOR_DEVICE_ATTR(pwm1_enable
, S_IRUGO
| S_IWUSR
, amdgpu_hwmon_get_pwm1_enable
, amdgpu_hwmon_set_pwm1_enable
, 0);
326 static SENSOR_DEVICE_ATTR(pwm1_min
, S_IRUGO
, amdgpu_hwmon_get_pwm1_min
, NULL
, 0);
327 static SENSOR_DEVICE_ATTR(pwm1_max
, S_IRUGO
, amdgpu_hwmon_get_pwm1_max
, NULL
, 0);
329 static struct attribute
*hwmon_attributes
[] = {
330 &sensor_dev_attr_temp1_input
.dev_attr
.attr
,
331 &sensor_dev_attr_temp1_crit
.dev_attr
.attr
,
332 &sensor_dev_attr_temp1_crit_hyst
.dev_attr
.attr
,
333 &sensor_dev_attr_pwm1
.dev_attr
.attr
,
334 &sensor_dev_attr_pwm1_enable
.dev_attr
.attr
,
335 &sensor_dev_attr_pwm1_min
.dev_attr
.attr
,
336 &sensor_dev_attr_pwm1_max
.dev_attr
.attr
,
340 static umode_t
hwmon_attributes_visible(struct kobject
*kobj
,
341 struct attribute
*attr
, int index
)
343 struct device
*dev
= kobj_to_dev(kobj
);
344 struct amdgpu_device
*adev
= dev_get_drvdata(dev
);
345 umode_t effective_mode
= attr
->mode
;
347 /* Skip limit attributes if DPM is not enabled */
348 if (!adev
->pm
.dpm_enabled
&&
349 (attr
== &sensor_dev_attr_temp1_crit
.dev_attr
.attr
||
350 attr
== &sensor_dev_attr_temp1_crit_hyst
.dev_attr
.attr
||
351 attr
== &sensor_dev_attr_pwm1
.dev_attr
.attr
||
352 attr
== &sensor_dev_attr_pwm1_enable
.dev_attr
.attr
||
353 attr
== &sensor_dev_attr_pwm1_max
.dev_attr
.attr
||
354 attr
== &sensor_dev_attr_pwm1_min
.dev_attr
.attr
))
357 if (adev
->pp_enabled
)
358 return effective_mode
;
360 /* Skip fan attributes if fan is not present */
361 if (adev
->pm
.no_fan
&&
362 (attr
== &sensor_dev_attr_pwm1
.dev_attr
.attr
||
363 attr
== &sensor_dev_attr_pwm1_enable
.dev_attr
.attr
||
364 attr
== &sensor_dev_attr_pwm1_max
.dev_attr
.attr
||
365 attr
== &sensor_dev_attr_pwm1_min
.dev_attr
.attr
))
368 /* mask fan attributes if we have no bindings for this asic to expose */
369 if ((!adev
->pm
.funcs
->get_fan_speed_percent
&&
370 attr
== &sensor_dev_attr_pwm1
.dev_attr
.attr
) || /* can't query fan */
371 (!adev
->pm
.funcs
->get_fan_control_mode
&&
372 attr
== &sensor_dev_attr_pwm1_enable
.dev_attr
.attr
)) /* can't query state */
373 effective_mode
&= ~S_IRUGO
;
375 if ((!adev
->pm
.funcs
->set_fan_speed_percent
&&
376 attr
== &sensor_dev_attr_pwm1
.dev_attr
.attr
) || /* can't manage fan */
377 (!adev
->pm
.funcs
->set_fan_control_mode
&&
378 attr
== &sensor_dev_attr_pwm1_enable
.dev_attr
.attr
)) /* can't manage state */
379 effective_mode
&= ~S_IWUSR
;
381 /* hide max/min values if we can't both query and manage the fan */
382 if ((!adev
->pm
.funcs
->set_fan_speed_percent
&&
383 !adev
->pm
.funcs
->get_fan_speed_percent
) &&
384 (attr
== &sensor_dev_attr_pwm1_max
.dev_attr
.attr
||
385 attr
== &sensor_dev_attr_pwm1_min
.dev_attr
.attr
))
388 return effective_mode
;
391 static const struct attribute_group hwmon_attrgroup
= {
392 .attrs
= hwmon_attributes
,
393 .is_visible
= hwmon_attributes_visible
,
396 static const struct attribute_group
*hwmon_groups
[] = {
401 void amdgpu_dpm_thermal_work_handler(struct work_struct
*work
)
403 struct amdgpu_device
*adev
=
404 container_of(work
, struct amdgpu_device
,
405 pm
.dpm
.thermal
.work
);
406 /* switch to the thermal state */
407 enum amd_pm_state_type dpm_state
= POWER_STATE_TYPE_INTERNAL_THERMAL
;
409 if (!adev
->pm
.dpm_enabled
)
412 if (adev
->pm
.funcs
->get_temperature
) {
413 int temp
= amdgpu_dpm_get_temperature(adev
);
415 if (temp
< adev
->pm
.dpm
.thermal
.min_temp
)
416 /* switch back the user state */
417 dpm_state
= adev
->pm
.dpm
.user_state
;
419 if (adev
->pm
.dpm
.thermal
.high_to_low
)
420 /* switch back the user state */
421 dpm_state
= adev
->pm
.dpm
.user_state
;
423 mutex_lock(&adev
->pm
.mutex
);
424 if (dpm_state
== POWER_STATE_TYPE_INTERNAL_THERMAL
)
425 adev
->pm
.dpm
.thermal_active
= true;
427 adev
->pm
.dpm
.thermal_active
= false;
428 adev
->pm
.dpm
.state
= dpm_state
;
429 mutex_unlock(&adev
->pm
.mutex
);
431 amdgpu_pm_compute_clocks(adev
);
434 static struct amdgpu_ps
*amdgpu_dpm_pick_power_state(struct amdgpu_device
*adev
,
435 enum amd_pm_state_type dpm_state
)
438 struct amdgpu_ps
*ps
;
440 bool single_display
= (adev
->pm
.dpm
.new_active_crtc_count
< 2) ?
443 /* check if the vblank period is too short to adjust the mclk */
444 if (single_display
&& adev
->pm
.funcs
->vblank_too_short
) {
445 if (amdgpu_dpm_vblank_too_short(adev
))
446 single_display
= false;
449 /* certain older asics have a separare 3D performance state,
450 * so try that first if the user selected performance
452 if (dpm_state
== POWER_STATE_TYPE_PERFORMANCE
)
453 dpm_state
= POWER_STATE_TYPE_INTERNAL_3DPERF
;
454 /* balanced states don't exist at the moment */
455 if (dpm_state
== POWER_STATE_TYPE_BALANCED
)
456 dpm_state
= POWER_STATE_TYPE_PERFORMANCE
;
459 /* Pick the best power state based on current conditions */
460 for (i
= 0; i
< adev
->pm
.dpm
.num_ps
; i
++) {
461 ps
= &adev
->pm
.dpm
.ps
[i
];
462 ui_class
= ps
->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK
;
465 case POWER_STATE_TYPE_BATTERY
:
466 if (ui_class
== ATOM_PPLIB_CLASSIFICATION_UI_BATTERY
) {
467 if (ps
->caps
& ATOM_PPLIB_SINGLE_DISPLAY_ONLY
) {
474 case POWER_STATE_TYPE_BALANCED
:
475 if (ui_class
== ATOM_PPLIB_CLASSIFICATION_UI_BALANCED
) {
476 if (ps
->caps
& ATOM_PPLIB_SINGLE_DISPLAY_ONLY
) {
483 case POWER_STATE_TYPE_PERFORMANCE
:
484 if (ui_class
== ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE
) {
485 if (ps
->caps
& ATOM_PPLIB_SINGLE_DISPLAY_ONLY
) {
492 /* internal states */
493 case POWER_STATE_TYPE_INTERNAL_UVD
:
494 if (adev
->pm
.dpm
.uvd_ps
)
495 return adev
->pm
.dpm
.uvd_ps
;
498 case POWER_STATE_TYPE_INTERNAL_UVD_SD
:
499 if (ps
->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE
)
502 case POWER_STATE_TYPE_INTERNAL_UVD_HD
:
503 if (ps
->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE
)
506 case POWER_STATE_TYPE_INTERNAL_UVD_HD2
:
507 if (ps
->class & ATOM_PPLIB_CLASSIFICATION_HD2STATE
)
510 case POWER_STATE_TYPE_INTERNAL_UVD_MVC
:
511 if (ps
->class2
& ATOM_PPLIB_CLASSIFICATION2_MVC
)
514 case POWER_STATE_TYPE_INTERNAL_BOOT
:
515 return adev
->pm
.dpm
.boot_ps
;
516 case POWER_STATE_TYPE_INTERNAL_THERMAL
:
517 if (ps
->class & ATOM_PPLIB_CLASSIFICATION_THERMAL
)
520 case POWER_STATE_TYPE_INTERNAL_ACPI
:
521 if (ps
->class & ATOM_PPLIB_CLASSIFICATION_ACPI
)
524 case POWER_STATE_TYPE_INTERNAL_ULV
:
525 if (ps
->class2
& ATOM_PPLIB_CLASSIFICATION2_ULV
)
528 case POWER_STATE_TYPE_INTERNAL_3DPERF
:
529 if (ps
->class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE
)
536 /* use a fallback state if we didn't match */
538 case POWER_STATE_TYPE_INTERNAL_UVD_SD
:
539 dpm_state
= POWER_STATE_TYPE_INTERNAL_UVD_HD
;
541 case POWER_STATE_TYPE_INTERNAL_UVD_HD
:
542 case POWER_STATE_TYPE_INTERNAL_UVD_HD2
:
543 case POWER_STATE_TYPE_INTERNAL_UVD_MVC
:
544 if (adev
->pm
.dpm
.uvd_ps
) {
545 return adev
->pm
.dpm
.uvd_ps
;
547 dpm_state
= POWER_STATE_TYPE_PERFORMANCE
;
550 case POWER_STATE_TYPE_INTERNAL_THERMAL
:
551 dpm_state
= POWER_STATE_TYPE_INTERNAL_ACPI
;
553 case POWER_STATE_TYPE_INTERNAL_ACPI
:
554 dpm_state
= POWER_STATE_TYPE_BATTERY
;
556 case POWER_STATE_TYPE_BATTERY
:
557 case POWER_STATE_TYPE_BALANCED
:
558 case POWER_STATE_TYPE_INTERNAL_3DPERF
:
559 dpm_state
= POWER_STATE_TYPE_PERFORMANCE
;
568 static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device
*adev
)
571 struct amdgpu_ps
*ps
;
572 enum amd_pm_state_type dpm_state
;
575 /* if dpm init failed */
576 if (!adev
->pm
.dpm_enabled
)
579 if (adev
->pm
.dpm
.user_state
!= adev
->pm
.dpm
.state
) {
580 /* add other state override checks here */
581 if ((!adev
->pm
.dpm
.thermal_active
) &&
582 (!adev
->pm
.dpm
.uvd_active
))
583 adev
->pm
.dpm
.state
= adev
->pm
.dpm
.user_state
;
585 dpm_state
= adev
->pm
.dpm
.state
;
587 ps
= amdgpu_dpm_pick_power_state(adev
, dpm_state
);
589 adev
->pm
.dpm
.requested_ps
= ps
;
593 /* no need to reprogram if nothing changed unless we are on BTC+ */
594 if (adev
->pm
.dpm
.current_ps
== adev
->pm
.dpm
.requested_ps
) {
595 /* vce just modifies an existing state so force a change */
596 if (ps
->vce_active
!= adev
->pm
.dpm
.vce_active
)
598 if (adev
->flags
& AMD_IS_APU
) {
599 /* for APUs if the num crtcs changed but state is the same,
600 * all we need to do is update the display configuration.
602 if (adev
->pm
.dpm
.new_active_crtcs
!= adev
->pm
.dpm
.current_active_crtcs
) {
603 /* update display watermarks based on new power state */
604 amdgpu_display_bandwidth_update(adev
);
605 /* update displays */
606 amdgpu_dpm_display_configuration_changed(adev
);
607 adev
->pm
.dpm
.current_active_crtcs
= adev
->pm
.dpm
.new_active_crtcs
;
608 adev
->pm
.dpm
.current_active_crtc_count
= adev
->pm
.dpm
.new_active_crtc_count
;
612 /* for BTC+ if the num crtcs hasn't changed and state is the same,
613 * nothing to do, if the num crtcs is > 1 and state is the same,
614 * update display configuration.
616 if (adev
->pm
.dpm
.new_active_crtcs
==
617 adev
->pm
.dpm
.current_active_crtcs
) {
619 } else if ((adev
->pm
.dpm
.current_active_crtc_count
> 1) &&
620 (adev
->pm
.dpm
.new_active_crtc_count
> 1)) {
621 /* update display watermarks based on new power state */
622 amdgpu_display_bandwidth_update(adev
);
623 /* update displays */
624 amdgpu_dpm_display_configuration_changed(adev
);
625 adev
->pm
.dpm
.current_active_crtcs
= adev
->pm
.dpm
.new_active_crtcs
;
626 adev
->pm
.dpm
.current_active_crtc_count
= adev
->pm
.dpm
.new_active_crtc_count
;
633 if (amdgpu_dpm
== 1) {
634 printk("switching from power state:\n");
635 amdgpu_dpm_print_power_state(adev
, adev
->pm
.dpm
.current_ps
);
636 printk("switching to power state:\n");
637 amdgpu_dpm_print_power_state(adev
, adev
->pm
.dpm
.requested_ps
);
640 mutex_lock(&adev
->ring_lock
);
642 /* update whether vce is active */
643 ps
->vce_active
= adev
->pm
.dpm
.vce_active
;
645 ret
= amdgpu_dpm_pre_set_power_state(adev
);
649 /* update display watermarks based on new power state */
650 amdgpu_display_bandwidth_update(adev
);
652 /* wait for the rings to drain */
653 for (i
= 0; i
< AMDGPU_MAX_RINGS
; i
++) {
654 struct amdgpu_ring
*ring
= adev
->rings
[i
];
655 if (ring
&& ring
->ready
)
656 amdgpu_fence_wait_empty(ring
);
659 /* program the new power state */
660 amdgpu_dpm_set_power_state(adev
);
662 /* update current power state */
663 adev
->pm
.dpm
.current_ps
= adev
->pm
.dpm
.requested_ps
;
665 amdgpu_dpm_post_set_power_state(adev
);
667 /* update displays */
668 amdgpu_dpm_display_configuration_changed(adev
);
670 adev
->pm
.dpm
.current_active_crtcs
= adev
->pm
.dpm
.new_active_crtcs
;
671 adev
->pm
.dpm
.current_active_crtc_count
= adev
->pm
.dpm
.new_active_crtc_count
;
673 if (adev
->pm
.funcs
->force_performance_level
) {
674 if (adev
->pm
.dpm
.thermal_active
) {
675 enum amdgpu_dpm_forced_level level
= adev
->pm
.dpm
.forced_level
;
676 /* force low perf level for thermal */
677 amdgpu_dpm_force_performance_level(adev
, AMDGPU_DPM_FORCED_LEVEL_LOW
);
678 /* save the user's level */
679 adev
->pm
.dpm
.forced_level
= level
;
681 /* otherwise, user selected level */
682 amdgpu_dpm_force_performance_level(adev
, adev
->pm
.dpm
.forced_level
);
687 mutex_unlock(&adev
->ring_lock
);
690 void amdgpu_dpm_enable_uvd(struct amdgpu_device
*adev
, bool enable
)
692 if (adev
->pp_enabled
)
693 amdgpu_dpm_powergate_uvd(adev
, !enable
);
695 if (adev
->pm
.funcs
->powergate_uvd
) {
696 mutex_lock(&adev
->pm
.mutex
);
697 /* enable/disable UVD */
698 amdgpu_dpm_powergate_uvd(adev
, !enable
);
699 mutex_unlock(&adev
->pm
.mutex
);
702 mutex_lock(&adev
->pm
.mutex
);
703 adev
->pm
.dpm
.uvd_active
= true;
704 adev
->pm
.dpm
.state
= POWER_STATE_TYPE_INTERNAL_UVD
;
705 mutex_unlock(&adev
->pm
.mutex
);
707 mutex_lock(&adev
->pm
.mutex
);
708 adev
->pm
.dpm
.uvd_active
= false;
709 mutex_unlock(&adev
->pm
.mutex
);
711 amdgpu_pm_compute_clocks(adev
);
717 void amdgpu_dpm_enable_vce(struct amdgpu_device
*adev
, bool enable
)
719 if (adev
->pp_enabled
)
720 amdgpu_dpm_powergate_vce(adev
, !enable
);
722 if (adev
->pm
.funcs
->powergate_vce
) {
723 mutex_lock(&adev
->pm
.mutex
);
724 amdgpu_dpm_powergate_vce(adev
, !enable
);
725 mutex_unlock(&adev
->pm
.mutex
);
728 mutex_lock(&adev
->pm
.mutex
);
729 adev
->pm
.dpm
.vce_active
= true;
730 /* XXX select vce level based on ring/task */
731 adev
->pm
.dpm
.vce_level
= AMDGPU_VCE_LEVEL_AC_ALL
;
732 mutex_unlock(&adev
->pm
.mutex
);
734 mutex_lock(&adev
->pm
.mutex
);
735 adev
->pm
.dpm
.vce_active
= false;
736 mutex_unlock(&adev
->pm
.mutex
);
738 amdgpu_pm_compute_clocks(adev
);
743 void amdgpu_pm_print_power_states(struct amdgpu_device
*adev
)
747 if (adev
->pp_enabled
)
751 for (i
= 0; i
< adev
->pm
.dpm
.num_ps
; i
++)
752 amdgpu_dpm_print_power_state(adev
, &adev
->pm
.dpm
.ps
[i
]);
756 int amdgpu_pm_sysfs_init(struct amdgpu_device
*adev
)
760 if (adev
->pm
.sysfs_initialized
)
763 if (!adev
->pp_enabled
) {
764 if (adev
->pm
.funcs
->get_temperature
== NULL
)
768 adev
->pm
.int_hwmon_dev
= hwmon_device_register_with_groups(adev
->dev
,
771 if (IS_ERR(adev
->pm
.int_hwmon_dev
)) {
772 ret
= PTR_ERR(adev
->pm
.int_hwmon_dev
);
774 "Unable to register hwmon device: %d\n", ret
);
778 ret
= device_create_file(adev
->dev
, &dev_attr_power_dpm_state
);
780 DRM_ERROR("failed to create device file for dpm state\n");
783 ret
= device_create_file(adev
->dev
, &dev_attr_power_dpm_force_performance_level
);
785 DRM_ERROR("failed to create device file for dpm state\n");
788 ret
= amdgpu_debugfs_pm_init(adev
);
790 DRM_ERROR("Failed to register debugfs file for dpm!\n");
794 adev
->pm
.sysfs_initialized
= true;
799 void amdgpu_pm_sysfs_fini(struct amdgpu_device
*adev
)
801 if (adev
->pm
.int_hwmon_dev
)
802 hwmon_device_unregister(adev
->pm
.int_hwmon_dev
);
803 device_remove_file(adev
->dev
, &dev_attr_power_dpm_state
);
804 device_remove_file(adev
->dev
, &dev_attr_power_dpm_force_performance_level
);
807 void amdgpu_pm_compute_clocks(struct amdgpu_device
*adev
)
809 struct drm_device
*ddev
= adev
->ddev
;
810 struct drm_crtc
*crtc
;
811 struct amdgpu_crtc
*amdgpu_crtc
;
813 if (!adev
->pm
.dpm_enabled
)
816 if (adev
->pp_enabled
) {
819 amdgpu_display_bandwidth_update(adev
);
820 mutex_lock(&adev
->ring_lock
);
821 for (i
= 0; i
< AMDGPU_MAX_RINGS
; i
++) {
822 struct amdgpu_ring
*ring
= adev
->rings
[i
];
823 if (ring
&& ring
->ready
)
824 amdgpu_fence_wait_empty(ring
);
826 mutex_unlock(&adev
->ring_lock
);
828 amdgpu_dpm_dispatch_task(adev
, AMD_PP_EVENT_DISPLAY_CONFIG_CHANGE
, NULL
, NULL
);
830 mutex_lock(&adev
->pm
.mutex
);
831 adev
->pm
.dpm
.new_active_crtcs
= 0;
832 adev
->pm
.dpm
.new_active_crtc_count
= 0;
833 if (adev
->mode_info
.num_crtc
&& adev
->mode_info
.mode_config_initialized
) {
834 list_for_each_entry(crtc
,
835 &ddev
->mode_config
.crtc_list
, head
) {
836 amdgpu_crtc
= to_amdgpu_crtc(crtc
);
838 adev
->pm
.dpm
.new_active_crtcs
|= (1 << amdgpu_crtc
->crtc_id
);
839 adev
->pm
.dpm
.new_active_crtc_count
++;
843 /* update battery/ac status */
844 if (power_supply_is_system_supplied() > 0)
845 adev
->pm
.dpm
.ac_power
= true;
847 adev
->pm
.dpm
.ac_power
= false;
849 amdgpu_dpm_change_power_state_locked(adev
);
851 mutex_unlock(&adev
->pm
.mutex
);
858 #if defined(CONFIG_DEBUG_FS)
860 static int amdgpu_debugfs_pm_info(struct seq_file
*m
, void *data
)
862 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
863 struct drm_device
*dev
= node
->minor
->dev
;
864 struct amdgpu_device
*adev
= dev
->dev_private
;
865 struct drm_device
*ddev
= adev
->ddev
;
867 if (!adev
->pm
.dpm_enabled
) {
868 seq_printf(m
, "dpm not enabled\n");
871 if ((adev
->flags
& AMD_IS_PX
) &&
872 (ddev
->switch_power_state
!= DRM_SWITCH_POWER_ON
)) {
873 seq_printf(m
, "PX asic powered off\n");
874 } else if (adev
->pp_enabled
) {
875 amdgpu_dpm_debugfs_print_current_performance_level(adev
, m
);
877 mutex_lock(&adev
->pm
.mutex
);
878 if (adev
->pm
.funcs
->debugfs_print_current_performance_level
)
879 amdgpu_dpm_debugfs_print_current_performance_level(adev
, m
);
881 seq_printf(m
, "Debugfs support not implemented for this asic\n");
882 mutex_unlock(&adev
->pm
.mutex
);
888 static struct drm_info_list amdgpu_pm_info_list
[] = {
889 {"amdgpu_pm_info", amdgpu_debugfs_pm_info
, 0, NULL
},
893 static int amdgpu_debugfs_pm_init(struct amdgpu_device
*adev
)
895 #if defined(CONFIG_DEBUG_FS)
896 return amdgpu_debugfs_add_files(adev
, amdgpu_pm_info_list
, ARRAY_SIZE(amdgpu_pm_info_list
));