]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
drm/amdgpu: fix locking in force performance level
[mirror_ubuntu-bionic-kernel.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_pm.c
CommitLineData
d38ceaf9
AD
1/*
2 * Permission is hereby granted, free of charge, to any person obtaining a
3 * copy of this software and associated documentation files (the "Software"),
4 * to deal in the Software without restriction, including without limitation
5 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
6 * and/or sell copies of the Software, and to permit persons to whom the
7 * Software is furnished to do so, subject to the following conditions:
8 *
9 * The above copyright notice and this permission notice shall be included in
10 * all copies or substantial portions of the Software.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
13 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
15 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
16 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
17 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
18 * OTHER DEALINGS IN THE SOFTWARE.
19 *
20 * Authors: Rafał Miłecki <zajec5@gmail.com>
21 * Alex Deucher <alexdeucher@gmail.com>
22 */
23#include <drm/drmP.h>
24#include "amdgpu.h"
25#include "amdgpu_drv.h"
26#include "amdgpu_pm.h"
27#include "amdgpu_dpm.h"
28#include "atom.h"
29#include <linux/power_supply.h>
30#include <linux/hwmon.h>
31#include <linux/hwmon-sysfs.h>
32
1b5708ff
RZ
33#include "amd_powerplay.h"
34
d38ceaf9
AD
35static int amdgpu_debugfs_pm_init(struct amdgpu_device *adev);
36
37void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
38{
e61710c5 39 if (adev->pp_enabled)
1b5708ff
RZ
40 /* TODO */
41 return;
42
d38ceaf9
AD
43 if (adev->pm.dpm_enabled) {
44 mutex_lock(&adev->pm.mutex);
45 if (power_supply_is_system_supplied() > 0)
46 adev->pm.dpm.ac_power = true;
47 else
48 adev->pm.dpm.ac_power = false;
49 if (adev->pm.funcs->enable_bapm)
50 amdgpu_dpm_enable_bapm(adev, adev->pm.dpm.ac_power);
51 mutex_unlock(&adev->pm.mutex);
52 }
53}
54
55static ssize_t amdgpu_get_dpm_state(struct device *dev,
56 struct device_attribute *attr,
57 char *buf)
58{
59 struct drm_device *ddev = dev_get_drvdata(dev);
60 struct amdgpu_device *adev = ddev->dev_private;
1b5708ff
RZ
61 enum amd_pm_state_type pm;
62
e61710c5 63 if (adev->pp_enabled) {
1b5708ff
RZ
64 pm = amdgpu_dpm_get_current_power_state(adev);
65 } else
66 pm = adev->pm.dpm.user_state;
d38ceaf9
AD
67
68 return snprintf(buf, PAGE_SIZE, "%s\n",
69 (pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
70 (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance");
71}
72
73static ssize_t amdgpu_set_dpm_state(struct device *dev,
74 struct device_attribute *attr,
75 const char *buf,
76 size_t count)
77{
78 struct drm_device *ddev = dev_get_drvdata(dev);
79 struct amdgpu_device *adev = ddev->dev_private;
1b5708ff 80 enum amd_pm_state_type state;
d38ceaf9 81
d38ceaf9 82 if (strncmp("battery", buf, strlen("battery")) == 0)
1b5708ff 83 state = POWER_STATE_TYPE_BATTERY;
d38ceaf9 84 else if (strncmp("balanced", buf, strlen("balanced")) == 0)
1b5708ff 85 state = POWER_STATE_TYPE_BALANCED;
d38ceaf9 86 else if (strncmp("performance", buf, strlen("performance")) == 0)
1b5708ff 87 state = POWER_STATE_TYPE_PERFORMANCE;
d38ceaf9 88 else {
d38ceaf9
AD
89 count = -EINVAL;
90 goto fail;
91 }
d38ceaf9 92
e61710c5 93 if (adev->pp_enabled) {
1b5708ff
RZ
94 amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_ENABLE_USER_STATE, &state, NULL);
95 } else {
96 mutex_lock(&adev->pm.mutex);
97 adev->pm.dpm.user_state = state;
98 mutex_unlock(&adev->pm.mutex);
99
100 /* Can't set dpm state when the card is off */
101 if (!(adev->flags & AMD_IS_PX) ||
102 (ddev->switch_power_state == DRM_SWITCH_POWER_ON))
103 amdgpu_pm_compute_clocks(adev);
104 }
d38ceaf9
AD
105fail:
106 return count;
107}
108
109static ssize_t amdgpu_get_dpm_forced_performance_level(struct device *dev,
1b5708ff
RZ
110 struct device_attribute *attr,
111 char *buf)
d38ceaf9
AD
112{
113 struct drm_device *ddev = dev_get_drvdata(dev);
114 struct amdgpu_device *adev = ddev->dev_private;
d38ceaf9 115
e61710c5 116 if (adev->pp_enabled) {
1b5708ff
RZ
117 enum amd_dpm_forced_level level;
118
119 level = amdgpu_dpm_get_performance_level(adev);
120 return snprintf(buf, PAGE_SIZE, "%s\n",
121 (level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" :
122 (level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" : "high");
123 } else {
124 enum amdgpu_dpm_forced_level level;
125
126 level = adev->pm.dpm.forced_level;
127 return snprintf(buf, PAGE_SIZE, "%s\n",
128 (level == AMDGPU_DPM_FORCED_LEVEL_AUTO) ? "auto" :
129 (level == AMDGPU_DPM_FORCED_LEVEL_LOW) ? "low" : "high");
130 }
d38ceaf9
AD
131}
132
133static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
134 struct device_attribute *attr,
135 const char *buf,
136 size_t count)
137{
138 struct drm_device *ddev = dev_get_drvdata(dev);
139 struct amdgpu_device *adev = ddev->dev_private;
140 enum amdgpu_dpm_forced_level level;
141 int ret = 0;
142
d38ceaf9
AD
143 if (strncmp("low", buf, strlen("low")) == 0) {
144 level = AMDGPU_DPM_FORCED_LEVEL_LOW;
145 } else if (strncmp("high", buf, strlen("high")) == 0) {
146 level = AMDGPU_DPM_FORCED_LEVEL_HIGH;
147 } else if (strncmp("auto", buf, strlen("auto")) == 0) {
148 level = AMDGPU_DPM_FORCED_LEVEL_AUTO;
149 } else {
150 count = -EINVAL;
151 goto fail;
152 }
1b5708ff 153
e61710c5 154 if (adev->pp_enabled)
1b5708ff
RZ
155 amdgpu_dpm_force_performance_level(adev, level);
156 else {
157 mutex_lock(&adev->pm.mutex);
d38ceaf9
AD
158 if (adev->pm.dpm.thermal_active) {
159 count = -EINVAL;
10f950f6 160 mutex_unlock(&adev->pm.mutex);
d38ceaf9
AD
161 goto fail;
162 }
163 ret = amdgpu_dpm_force_performance_level(adev, level);
164 if (ret)
165 count = -EINVAL;
1b5708ff
RZ
166 else
167 adev->pm.dpm.forced_level = level;
168 mutex_unlock(&adev->pm.mutex);
d38ceaf9
AD
169 }
170fail:
d38ceaf9
AD
171 return count;
172}
173
174static DEVICE_ATTR(power_dpm_state, S_IRUGO | S_IWUSR, amdgpu_get_dpm_state, amdgpu_set_dpm_state);
175static DEVICE_ATTR(power_dpm_force_performance_level, S_IRUGO | S_IWUSR,
176 amdgpu_get_dpm_forced_performance_level,
177 amdgpu_set_dpm_forced_performance_level);
178
179static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
180 struct device_attribute *attr,
181 char *buf)
182{
183 struct amdgpu_device *adev = dev_get_drvdata(dev);
184 int temp;
185
e61710c5 186 if (!adev->pp_enabled && !adev->pm.funcs->get_temperature)
d38ceaf9 187 temp = 0;
8804b8d5
RZ
188 else
189 temp = amdgpu_dpm_get_temperature(adev);
d38ceaf9
AD
190
191 return snprintf(buf, PAGE_SIZE, "%d\n", temp);
192}
193
194static ssize_t amdgpu_hwmon_show_temp_thresh(struct device *dev,
195 struct device_attribute *attr,
196 char *buf)
197{
198 struct amdgpu_device *adev = dev_get_drvdata(dev);
199 int hyst = to_sensor_dev_attr(attr)->index;
200 int temp;
201
202 if (hyst)
203 temp = adev->pm.dpm.thermal.min_temp;
204 else
205 temp = adev->pm.dpm.thermal.max_temp;
206
207 return snprintf(buf, PAGE_SIZE, "%d\n", temp);
208}
209
210static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev,
211 struct device_attribute *attr,
212 char *buf)
213{
214 struct amdgpu_device *adev = dev_get_drvdata(dev);
215 u32 pwm_mode = 0;
216
e61710c5 217 if (!adev->pp_enabled && !adev->pm.funcs->get_fan_control_mode)
8804b8d5
RZ
218 return -EINVAL;
219
220 pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
d38ceaf9
AD
221
222 /* never 0 (full-speed), fuse or smc-controlled always */
223 return sprintf(buf, "%i\n", pwm_mode == FDO_PWM_MODE_STATIC ? 1 : 2);
224}
225
226static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
227 struct device_attribute *attr,
228 const char *buf,
229 size_t count)
230{
231 struct amdgpu_device *adev = dev_get_drvdata(dev);
232 int err;
233 int value;
234
e61710c5 235 if (!adev->pp_enabled && !adev->pm.funcs->set_fan_control_mode)
d38ceaf9
AD
236 return -EINVAL;
237
238 err = kstrtoint(buf, 10, &value);
239 if (err)
240 return err;
241
242 switch (value) {
243 case 1: /* manual, percent-based */
244 amdgpu_dpm_set_fan_control_mode(adev, FDO_PWM_MODE_STATIC);
245 break;
246 default: /* disable */
247 amdgpu_dpm_set_fan_control_mode(adev, 0);
248 break;
249 }
250
251 return count;
252}
253
254static ssize_t amdgpu_hwmon_get_pwm1_min(struct device *dev,
255 struct device_attribute *attr,
256 char *buf)
257{
258 return sprintf(buf, "%i\n", 0);
259}
260
261static ssize_t amdgpu_hwmon_get_pwm1_max(struct device *dev,
262 struct device_attribute *attr,
263 char *buf)
264{
265 return sprintf(buf, "%i\n", 255);
266}
267
268static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
269 struct device_attribute *attr,
270 const char *buf, size_t count)
271{
272 struct amdgpu_device *adev = dev_get_drvdata(dev);
273 int err;
274 u32 value;
275
276 err = kstrtou32(buf, 10, &value);
277 if (err)
278 return err;
279
280 value = (value * 100) / 255;
281
282 err = amdgpu_dpm_set_fan_speed_percent(adev, value);
283 if (err)
284 return err;
285
286 return count;
287}
288
289static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev,
290 struct device_attribute *attr,
291 char *buf)
292{
293 struct amdgpu_device *adev = dev_get_drvdata(dev);
294 int err;
295 u32 speed;
296
297 err = amdgpu_dpm_get_fan_speed_percent(adev, &speed);
298 if (err)
299 return err;
300
301 speed = (speed * 255) / 100;
302
303 return sprintf(buf, "%i\n", speed);
304}
305
306static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, 0);
307static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 0);
308static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 1);
309static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1, amdgpu_hwmon_set_pwm1, 0);
310static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1_enable, amdgpu_hwmon_set_pwm1_enable, 0);
311static SENSOR_DEVICE_ATTR(pwm1_min, S_IRUGO, amdgpu_hwmon_get_pwm1_min, NULL, 0);
312static SENSOR_DEVICE_ATTR(pwm1_max, S_IRUGO, amdgpu_hwmon_get_pwm1_max, NULL, 0);
313
314static struct attribute *hwmon_attributes[] = {
315 &sensor_dev_attr_temp1_input.dev_attr.attr,
316 &sensor_dev_attr_temp1_crit.dev_attr.attr,
317 &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
318 &sensor_dev_attr_pwm1.dev_attr.attr,
319 &sensor_dev_attr_pwm1_enable.dev_attr.attr,
320 &sensor_dev_attr_pwm1_min.dev_attr.attr,
321 &sensor_dev_attr_pwm1_max.dev_attr.attr,
322 NULL
323};
324
325static umode_t hwmon_attributes_visible(struct kobject *kobj,
326 struct attribute *attr, int index)
327{
cc29ec87 328 struct device *dev = kobj_to_dev(kobj);
d38ceaf9
AD
329 struct amdgpu_device *adev = dev_get_drvdata(dev);
330 umode_t effective_mode = attr->mode;
331
1b5708ff 332 /* Skip limit attributes if DPM is not enabled */
d38ceaf9
AD
333 if (!adev->pm.dpm_enabled &&
334 (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
27100735
AD
335 attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr ||
336 attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
337 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
338 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
339 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
d38ceaf9
AD
340 return 0;
341
e61710c5 342 if (adev->pp_enabled)
8804b8d5
RZ
343 return effective_mode;
344
d38ceaf9
AD
345 /* Skip fan attributes if fan is not present */
346 if (adev->pm.no_fan &&
347 (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
348 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
349 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
350 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
351 return 0;
352
353 /* mask fan attributes if we have no bindings for this asic to expose */
354 if ((!adev->pm.funcs->get_fan_speed_percent &&
355 attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't query fan */
356 (!adev->pm.funcs->get_fan_control_mode &&
357 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't query state */
358 effective_mode &= ~S_IRUGO;
359
360 if ((!adev->pm.funcs->set_fan_speed_percent &&
361 attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't manage fan */
362 (!adev->pm.funcs->set_fan_control_mode &&
363 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't manage state */
364 effective_mode &= ~S_IWUSR;
365
366 /* hide max/min values if we can't both query and manage the fan */
367 if ((!adev->pm.funcs->set_fan_speed_percent &&
368 !adev->pm.funcs->get_fan_speed_percent) &&
369 (attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
370 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
371 return 0;
372
373 return effective_mode;
374}
375
376static const struct attribute_group hwmon_attrgroup = {
377 .attrs = hwmon_attributes,
378 .is_visible = hwmon_attributes_visible,
379};
380
381static const struct attribute_group *hwmon_groups[] = {
382 &hwmon_attrgroup,
383 NULL
384};
385
386void amdgpu_dpm_thermal_work_handler(struct work_struct *work)
387{
388 struct amdgpu_device *adev =
389 container_of(work, struct amdgpu_device,
390 pm.dpm.thermal.work);
391 /* switch to the thermal state */
3a2c788d 392 enum amd_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL;
d38ceaf9
AD
393
394 if (!adev->pm.dpm_enabled)
395 return;
396
397 if (adev->pm.funcs->get_temperature) {
398 int temp = amdgpu_dpm_get_temperature(adev);
399
400 if (temp < adev->pm.dpm.thermal.min_temp)
401 /* switch back the user state */
402 dpm_state = adev->pm.dpm.user_state;
403 } else {
404 if (adev->pm.dpm.thermal.high_to_low)
405 /* switch back the user state */
406 dpm_state = adev->pm.dpm.user_state;
407 }
408 mutex_lock(&adev->pm.mutex);
409 if (dpm_state == POWER_STATE_TYPE_INTERNAL_THERMAL)
410 adev->pm.dpm.thermal_active = true;
411 else
412 adev->pm.dpm.thermal_active = false;
413 adev->pm.dpm.state = dpm_state;
414 mutex_unlock(&adev->pm.mutex);
415
416 amdgpu_pm_compute_clocks(adev);
417}
418
419static struct amdgpu_ps *amdgpu_dpm_pick_power_state(struct amdgpu_device *adev,
3a2c788d 420 enum amd_pm_state_type dpm_state)
d38ceaf9
AD
421{
422 int i;
423 struct amdgpu_ps *ps;
424 u32 ui_class;
425 bool single_display = (adev->pm.dpm.new_active_crtc_count < 2) ?
426 true : false;
427
428 /* check if the vblank period is too short to adjust the mclk */
429 if (single_display && adev->pm.funcs->vblank_too_short) {
430 if (amdgpu_dpm_vblank_too_short(adev))
431 single_display = false;
432 }
433
434 /* certain older asics have a separare 3D performance state,
435 * so try that first if the user selected performance
436 */
437 if (dpm_state == POWER_STATE_TYPE_PERFORMANCE)
438 dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF;
439 /* balanced states don't exist at the moment */
440 if (dpm_state == POWER_STATE_TYPE_BALANCED)
441 dpm_state = POWER_STATE_TYPE_PERFORMANCE;
442
443restart_search:
444 /* Pick the best power state based on current conditions */
445 for (i = 0; i < adev->pm.dpm.num_ps; i++) {
446 ps = &adev->pm.dpm.ps[i];
447 ui_class = ps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK;
448 switch (dpm_state) {
449 /* user states */
450 case POWER_STATE_TYPE_BATTERY:
451 if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) {
452 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
453 if (single_display)
454 return ps;
455 } else
456 return ps;
457 }
458 break;
459 case POWER_STATE_TYPE_BALANCED:
460 if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BALANCED) {
461 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
462 if (single_display)
463 return ps;
464 } else
465 return ps;
466 }
467 break;
468 case POWER_STATE_TYPE_PERFORMANCE:
469 if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) {
470 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
471 if (single_display)
472 return ps;
473 } else
474 return ps;
475 }
476 break;
477 /* internal states */
478 case POWER_STATE_TYPE_INTERNAL_UVD:
479 if (adev->pm.dpm.uvd_ps)
480 return adev->pm.dpm.uvd_ps;
481 else
482 break;
483 case POWER_STATE_TYPE_INTERNAL_UVD_SD:
484 if (ps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
485 return ps;
486 break;
487 case POWER_STATE_TYPE_INTERNAL_UVD_HD:
488 if (ps->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
489 return ps;
490 break;
491 case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
492 if (ps->class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
493 return ps;
494 break;
495 case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
496 if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
497 return ps;
498 break;
499 case POWER_STATE_TYPE_INTERNAL_BOOT:
500 return adev->pm.dpm.boot_ps;
501 case POWER_STATE_TYPE_INTERNAL_THERMAL:
502 if (ps->class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
503 return ps;
504 break;
505 case POWER_STATE_TYPE_INTERNAL_ACPI:
506 if (ps->class & ATOM_PPLIB_CLASSIFICATION_ACPI)
507 return ps;
508 break;
509 case POWER_STATE_TYPE_INTERNAL_ULV:
510 if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
511 return ps;
512 break;
513 case POWER_STATE_TYPE_INTERNAL_3DPERF:
514 if (ps->class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
515 return ps;
516 break;
517 default:
518 break;
519 }
520 }
521 /* use a fallback state if we didn't match */
522 switch (dpm_state) {
523 case POWER_STATE_TYPE_INTERNAL_UVD_SD:
524 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
525 goto restart_search;
526 case POWER_STATE_TYPE_INTERNAL_UVD_HD:
527 case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
528 case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
529 if (adev->pm.dpm.uvd_ps) {
530 return adev->pm.dpm.uvd_ps;
531 } else {
532 dpm_state = POWER_STATE_TYPE_PERFORMANCE;
533 goto restart_search;
534 }
535 case POWER_STATE_TYPE_INTERNAL_THERMAL:
536 dpm_state = POWER_STATE_TYPE_INTERNAL_ACPI;
537 goto restart_search;
538 case POWER_STATE_TYPE_INTERNAL_ACPI:
539 dpm_state = POWER_STATE_TYPE_BATTERY;
540 goto restart_search;
541 case POWER_STATE_TYPE_BATTERY:
542 case POWER_STATE_TYPE_BALANCED:
543 case POWER_STATE_TYPE_INTERNAL_3DPERF:
544 dpm_state = POWER_STATE_TYPE_PERFORMANCE;
545 goto restart_search;
546 default:
547 break;
548 }
549
550 return NULL;
551}
552
553static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
554{
555 int i;
556 struct amdgpu_ps *ps;
3a2c788d 557 enum amd_pm_state_type dpm_state;
d38ceaf9
AD
558 int ret;
559
560 /* if dpm init failed */
561 if (!adev->pm.dpm_enabled)
562 return;
563
564 if (adev->pm.dpm.user_state != adev->pm.dpm.state) {
565 /* add other state override checks here */
566 if ((!adev->pm.dpm.thermal_active) &&
567 (!adev->pm.dpm.uvd_active))
568 adev->pm.dpm.state = adev->pm.dpm.user_state;
569 }
570 dpm_state = adev->pm.dpm.state;
571
572 ps = amdgpu_dpm_pick_power_state(adev, dpm_state);
573 if (ps)
574 adev->pm.dpm.requested_ps = ps;
575 else
576 return;
577
578 /* no need to reprogram if nothing changed unless we are on BTC+ */
579 if (adev->pm.dpm.current_ps == adev->pm.dpm.requested_ps) {
580 /* vce just modifies an existing state so force a change */
581 if (ps->vce_active != adev->pm.dpm.vce_active)
582 goto force;
2f7d10b3 583 if (adev->flags & AMD_IS_APU) {
d38ceaf9
AD
584 /* for APUs if the num crtcs changed but state is the same,
585 * all we need to do is update the display configuration.
586 */
587 if (adev->pm.dpm.new_active_crtcs != adev->pm.dpm.current_active_crtcs) {
588 /* update display watermarks based on new power state */
589 amdgpu_display_bandwidth_update(adev);
590 /* update displays */
591 amdgpu_dpm_display_configuration_changed(adev);
592 adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
593 adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
594 }
595 return;
596 } else {
597 /* for BTC+ if the num crtcs hasn't changed and state is the same,
598 * nothing to do, if the num crtcs is > 1 and state is the same,
599 * update display configuration.
600 */
601 if (adev->pm.dpm.new_active_crtcs ==
602 adev->pm.dpm.current_active_crtcs) {
603 return;
604 } else if ((adev->pm.dpm.current_active_crtc_count > 1) &&
605 (adev->pm.dpm.new_active_crtc_count > 1)) {
606 /* update display watermarks based on new power state */
607 amdgpu_display_bandwidth_update(adev);
608 /* update displays */
609 amdgpu_dpm_display_configuration_changed(adev);
610 adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
611 adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
612 return;
613 }
614 }
615 }
616
617force:
618 if (amdgpu_dpm == 1) {
619 printk("switching from power state:\n");
620 amdgpu_dpm_print_power_state(adev, adev->pm.dpm.current_ps);
621 printk("switching to power state:\n");
622 amdgpu_dpm_print_power_state(adev, adev->pm.dpm.requested_ps);
623 }
624
d38ceaf9
AD
625 mutex_lock(&adev->ring_lock);
626
627 /* update whether vce is active */
628 ps->vce_active = adev->pm.dpm.vce_active;
629
630 ret = amdgpu_dpm_pre_set_power_state(adev);
631 if (ret)
632 goto done;
633
634 /* update display watermarks based on new power state */
635 amdgpu_display_bandwidth_update(adev);
636 /* update displays */
637 amdgpu_dpm_display_configuration_changed(adev);
638
639 adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
640 adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
641
642 /* wait for the rings to drain */
643 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
644 struct amdgpu_ring *ring = adev->rings[i];
645 if (ring && ring->ready)
646 amdgpu_fence_wait_empty(ring);
647 }
648
649 /* program the new power state */
650 amdgpu_dpm_set_power_state(adev);
651
652 /* update current power state */
653 adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps;
654
655 amdgpu_dpm_post_set_power_state(adev);
656
657 if (adev->pm.funcs->force_performance_level) {
658 if (adev->pm.dpm.thermal_active) {
659 enum amdgpu_dpm_forced_level level = adev->pm.dpm.forced_level;
660 /* force low perf level for thermal */
661 amdgpu_dpm_force_performance_level(adev, AMDGPU_DPM_FORCED_LEVEL_LOW);
662 /* save the user's level */
663 adev->pm.dpm.forced_level = level;
664 } else {
665 /* otherwise, user selected level */
666 amdgpu_dpm_force_performance_level(adev, adev->pm.dpm.forced_level);
667 }
668 }
669
670done:
671 mutex_unlock(&adev->ring_lock);
d38ceaf9
AD
672}
673
674void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
675{
e61710c5 676 if (adev->pp_enabled)
d38ceaf9 677 amdgpu_dpm_powergate_uvd(adev, !enable);
1b5708ff
RZ
678 else {
679 if (adev->pm.funcs->powergate_uvd) {
d38ceaf9 680 mutex_lock(&adev->pm.mutex);
1b5708ff
RZ
681 /* enable/disable UVD */
682 amdgpu_dpm_powergate_uvd(adev, !enable);
d38ceaf9
AD
683 mutex_unlock(&adev->pm.mutex);
684 } else {
1b5708ff
RZ
685 if (enable) {
686 mutex_lock(&adev->pm.mutex);
687 adev->pm.dpm.uvd_active = true;
688 adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD;
689 mutex_unlock(&adev->pm.mutex);
690 } else {
691 mutex_lock(&adev->pm.mutex);
692 adev->pm.dpm.uvd_active = false;
693 mutex_unlock(&adev->pm.mutex);
694 }
695 amdgpu_pm_compute_clocks(adev);
d38ceaf9
AD
696 }
697
d38ceaf9
AD
698 }
699}
700
701void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
702{
e61710c5 703 if (adev->pp_enabled)
b7a07769 704 amdgpu_dpm_powergate_vce(adev, !enable);
1b5708ff
RZ
705 else {
706 if (adev->pm.funcs->powergate_vce) {
b7a07769 707 mutex_lock(&adev->pm.mutex);
1b5708ff 708 amdgpu_dpm_powergate_vce(adev, !enable);
b7a07769
SJ
709 mutex_unlock(&adev->pm.mutex);
710 } else {
1b5708ff
RZ
711 if (enable) {
712 mutex_lock(&adev->pm.mutex);
713 adev->pm.dpm.vce_active = true;
714 /* XXX select vce level based on ring/task */
715 adev->pm.dpm.vce_level = AMDGPU_VCE_LEVEL_AC_ALL;
716 mutex_unlock(&adev->pm.mutex);
717 } else {
718 mutex_lock(&adev->pm.mutex);
719 adev->pm.dpm.vce_active = false;
720 mutex_unlock(&adev->pm.mutex);
721 }
722 amdgpu_pm_compute_clocks(adev);
b7a07769 723 }
b7a07769 724 }
d38ceaf9
AD
725}
726
727void amdgpu_pm_print_power_states(struct amdgpu_device *adev)
728{
729 int i;
730
e61710c5 731 if (adev->pp_enabled)
1b5708ff
RZ
732 /* TO DO */
733 return;
734
735 for (i = 0; i < adev->pm.dpm.num_ps; i++)
d38ceaf9 736 amdgpu_dpm_print_power_state(adev, &adev->pm.dpm.ps[i]);
1b5708ff 737
d38ceaf9
AD
738}
739
740int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
741{
742 int ret;
743
c86f5ebf
AD
744 if (adev->pm.sysfs_initialized)
745 return 0;
746
e61710c5 747 if (!adev->pp_enabled) {
1b5708ff
RZ
748 if (adev->pm.funcs->get_temperature == NULL)
749 return 0;
750 }
751
d38ceaf9
AD
752 adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev,
753 DRIVER_NAME, adev,
754 hwmon_groups);
755 if (IS_ERR(adev->pm.int_hwmon_dev)) {
756 ret = PTR_ERR(adev->pm.int_hwmon_dev);
757 dev_err(adev->dev,
758 "Unable to register hwmon device: %d\n", ret);
759 return ret;
760 }
761
762 ret = device_create_file(adev->dev, &dev_attr_power_dpm_state);
763 if (ret) {
764 DRM_ERROR("failed to create device file for dpm state\n");
765 return ret;
766 }
767 ret = device_create_file(adev->dev, &dev_attr_power_dpm_force_performance_level);
768 if (ret) {
769 DRM_ERROR("failed to create device file for dpm state\n");
770 return ret;
771 }
772 ret = amdgpu_debugfs_pm_init(adev);
773 if (ret) {
774 DRM_ERROR("Failed to register debugfs file for dpm!\n");
775 return ret;
776 }
777
c86f5ebf
AD
778 adev->pm.sysfs_initialized = true;
779
d38ceaf9
AD
780 return 0;
781}
782
783void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
784{
785 if (adev->pm.int_hwmon_dev)
786 hwmon_device_unregister(adev->pm.int_hwmon_dev);
787 device_remove_file(adev->dev, &dev_attr_power_dpm_state);
788 device_remove_file(adev->dev, &dev_attr_power_dpm_force_performance_level);
789}
790
791void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
792{
793 struct drm_device *ddev = adev->ddev;
794 struct drm_crtc *crtc;
795 struct amdgpu_crtc *amdgpu_crtc;
796
797 if (!adev->pm.dpm_enabled)
798 return;
799
e61710c5 800 if (adev->pp_enabled) {
1b5708ff
RZ
801 int i = 0;
802
803 amdgpu_display_bandwidth_update(adev);
804 mutex_lock(&adev->ring_lock);
805 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
806 struct amdgpu_ring *ring = adev->rings[i];
807 if (ring && ring->ready)
808 amdgpu_fence_wait_empty(ring);
75ac63db 809 }
1b5708ff 810 mutex_unlock(&adev->ring_lock);
d38ceaf9 811
1b5708ff
RZ
812 amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_DISPLAY_CONFIG_CHANGE, NULL, NULL);
813 } else {
814 mutex_lock(&adev->pm.mutex);
815 adev->pm.dpm.new_active_crtcs = 0;
816 adev->pm.dpm.new_active_crtc_count = 0;
817 if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
818 list_for_each_entry(crtc,
819 &ddev->mode_config.crtc_list, head) {
820 amdgpu_crtc = to_amdgpu_crtc(crtc);
821 if (crtc->enabled) {
822 adev->pm.dpm.new_active_crtcs |= (1 << amdgpu_crtc->crtc_id);
823 adev->pm.dpm.new_active_crtc_count++;
824 }
d38ceaf9
AD
825 }
826 }
1b5708ff
RZ
827 /* update battery/ac status */
828 if (power_supply_is_system_supplied() > 0)
829 adev->pm.dpm.ac_power = true;
830 else
831 adev->pm.dpm.ac_power = false;
d38ceaf9 832
1b5708ff 833 amdgpu_dpm_change_power_state_locked(adev);
d38ceaf9 834
1b5708ff
RZ
835 mutex_unlock(&adev->pm.mutex);
836 }
d38ceaf9
AD
837}
838
839/*
840 * Debugfs info
841 */
842#if defined(CONFIG_DEBUG_FS)
843
844static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data)
845{
846 struct drm_info_node *node = (struct drm_info_node *) m->private;
847 struct drm_device *dev = node->minor->dev;
848 struct amdgpu_device *adev = dev->dev_private;
849
1b5708ff
RZ
850 if (!adev->pm.dpm_enabled) {
851 seq_printf(m, "dpm not enabled\n");
852 return 0;
853 }
e61710c5 854 if (adev->pp_enabled) {
1b5708ff
RZ
855 amdgpu_dpm_debugfs_print_current_performance_level(adev, m);
856 } else {
d38ceaf9
AD
857 mutex_lock(&adev->pm.mutex);
858 if (adev->pm.funcs->debugfs_print_current_performance_level)
859 amdgpu_dpm_debugfs_print_current_performance_level(adev, m);
860 else
861 seq_printf(m, "Debugfs support not implemented for this asic\n");
862 mutex_unlock(&adev->pm.mutex);
863 }
864
865 return 0;
866}
867
868static struct drm_info_list amdgpu_pm_info_list[] = {
869 {"amdgpu_pm_info", amdgpu_debugfs_pm_info, 0, NULL},
870};
871#endif
872
873static int amdgpu_debugfs_pm_init(struct amdgpu_device *adev)
874{
875#if defined(CONFIG_DEBUG_FS)
876 return amdgpu_debugfs_add_files(adev, amdgpu_pm_info_list, ARRAY_SIZE(amdgpu_pm_info_list));
877#else
878 return 0;
879#endif
880}