2 * Copyright 2017 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 * Authors: Rafał Miłecki <zajec5@gmail.com>
23 * Alex Deucher <alexdeucher@gmail.com>
27 #include "amdgpu_drv.h"
28 #include "amdgpu_pm.h"
29 #include "amdgpu_dpm.h"
31 #include <linux/power_supply.h>
32 #include <linux/hwmon.h>
33 #include <linux/hwmon-sysfs.h>
34 #include <linux/nospec.h>
36 #include "amd_powerplay.h"
38 static int amdgpu_debugfs_pm_init(struct amdgpu_device
*adev
);
40 static const struct cg_flag_name clocks
[] = {
41 {AMD_CG_SUPPORT_GFX_MGCG
, "Graphics Medium Grain Clock Gating"},
42 {AMD_CG_SUPPORT_GFX_MGLS
, "Graphics Medium Grain memory Light Sleep"},
43 {AMD_CG_SUPPORT_GFX_CGCG
, "Graphics Coarse Grain Clock Gating"},
44 {AMD_CG_SUPPORT_GFX_CGLS
, "Graphics Coarse Grain memory Light Sleep"},
45 {AMD_CG_SUPPORT_GFX_CGTS
, "Graphics Coarse Grain Tree Shader Clock Gating"},
46 {AMD_CG_SUPPORT_GFX_CGTS_LS
, "Graphics Coarse Grain Tree Shader Light Sleep"},
47 {AMD_CG_SUPPORT_GFX_CP_LS
, "Graphics Command Processor Light Sleep"},
48 {AMD_CG_SUPPORT_GFX_RLC_LS
, "Graphics Run List Controller Light Sleep"},
49 {AMD_CG_SUPPORT_GFX_3D_CGCG
, "Graphics 3D Coarse Grain Clock Gating"},
50 {AMD_CG_SUPPORT_GFX_3D_CGLS
, "Graphics 3D Coarse Grain memory Light Sleep"},
51 {AMD_CG_SUPPORT_MC_LS
, "Memory Controller Light Sleep"},
52 {AMD_CG_SUPPORT_MC_MGCG
, "Memory Controller Medium Grain Clock Gating"},
53 {AMD_CG_SUPPORT_SDMA_LS
, "System Direct Memory Access Light Sleep"},
54 {AMD_CG_SUPPORT_SDMA_MGCG
, "System Direct Memory Access Medium Grain Clock Gating"},
55 {AMD_CG_SUPPORT_BIF_MGCG
, "Bus Interface Medium Grain Clock Gating"},
56 {AMD_CG_SUPPORT_BIF_LS
, "Bus Interface Light Sleep"},
57 {AMD_CG_SUPPORT_UVD_MGCG
, "Unified Video Decoder Medium Grain Clock Gating"},
58 {AMD_CG_SUPPORT_VCE_MGCG
, "Video Compression Engine Medium Grain Clock Gating"},
59 {AMD_CG_SUPPORT_HDP_LS
, "Host Data Path Light Sleep"},
60 {AMD_CG_SUPPORT_HDP_MGCG
, "Host Data Path Medium Grain Clock Gating"},
61 {AMD_CG_SUPPORT_DRM_MGCG
, "Digital Right Management Medium Grain Clock Gating"},
62 {AMD_CG_SUPPORT_DRM_LS
, "Digital Right Management Light Sleep"},
63 {AMD_CG_SUPPORT_ROM_MGCG
, "Rom Medium Grain Clock Gating"},
64 {AMD_CG_SUPPORT_DF_MGCG
, "Data Fabric Medium Grain Clock Gating"},
68 void amdgpu_pm_acpi_event_handler(struct amdgpu_device
*adev
)
70 if (adev
->pm
.dpm_enabled
) {
71 mutex_lock(&adev
->pm
.mutex
);
72 if (power_supply_is_system_supplied() > 0)
73 adev
->pm
.dpm
.ac_power
= true;
75 adev
->pm
.dpm
.ac_power
= false;
76 if (adev
->powerplay
.pp_funcs
->enable_bapm
)
77 amdgpu_dpm_enable_bapm(adev
, adev
->pm
.dpm
.ac_power
);
78 mutex_unlock(&adev
->pm
.mutex
);
82 static ssize_t
amdgpu_get_dpm_state(struct device
*dev
,
83 struct device_attribute
*attr
,
86 struct drm_device
*ddev
= dev_get_drvdata(dev
);
87 struct amdgpu_device
*adev
= ddev
->dev_private
;
88 enum amd_pm_state_type pm
;
90 if (adev
->powerplay
.pp_funcs
->get_current_power_state
)
91 pm
= amdgpu_dpm_get_current_power_state(adev
);
93 pm
= adev
->pm
.dpm
.user_state
;
95 return snprintf(buf
, PAGE_SIZE
, "%s\n",
96 (pm
== POWER_STATE_TYPE_BATTERY
) ? "battery" :
97 (pm
== POWER_STATE_TYPE_BALANCED
) ? "balanced" : "performance");
100 static ssize_t
amdgpu_set_dpm_state(struct device
*dev
,
101 struct device_attribute
*attr
,
105 struct drm_device
*ddev
= dev_get_drvdata(dev
);
106 struct amdgpu_device
*adev
= ddev
->dev_private
;
107 enum amd_pm_state_type state
;
109 if (strncmp("battery", buf
, strlen("battery")) == 0)
110 state
= POWER_STATE_TYPE_BATTERY
;
111 else if (strncmp("balanced", buf
, strlen("balanced")) == 0)
112 state
= POWER_STATE_TYPE_BALANCED
;
113 else if (strncmp("performance", buf
, strlen("performance")) == 0)
114 state
= POWER_STATE_TYPE_PERFORMANCE
;
120 if (adev
->powerplay
.pp_funcs
->dispatch_tasks
) {
121 amdgpu_dpm_dispatch_task(adev
, AMD_PP_TASK_ENABLE_USER_STATE
, &state
, NULL
);
123 mutex_lock(&adev
->pm
.mutex
);
124 adev
->pm
.dpm
.user_state
= state
;
125 mutex_unlock(&adev
->pm
.mutex
);
127 /* Can't set dpm state when the card is off */
128 if (!(adev
->flags
& AMD_IS_PX
) ||
129 (ddev
->switch_power_state
== DRM_SWITCH_POWER_ON
))
130 amdgpu_pm_compute_clocks(adev
);
136 static ssize_t
amdgpu_get_dpm_forced_performance_level(struct device
*dev
,
137 struct device_attribute
*attr
,
140 struct drm_device
*ddev
= dev_get_drvdata(dev
);
141 struct amdgpu_device
*adev
= ddev
->dev_private
;
142 enum amd_dpm_forced_level level
= 0xff;
144 if ((adev
->flags
& AMD_IS_PX
) &&
145 (ddev
->switch_power_state
!= DRM_SWITCH_POWER_ON
))
146 return snprintf(buf
, PAGE_SIZE
, "off\n");
148 if (adev
->powerplay
.pp_funcs
->get_performance_level
)
149 level
= amdgpu_dpm_get_performance_level(adev
);
151 level
= adev
->pm
.dpm
.forced_level
;
153 return snprintf(buf
, PAGE_SIZE
, "%s\n",
154 (level
== AMD_DPM_FORCED_LEVEL_AUTO
) ? "auto" :
155 (level
== AMD_DPM_FORCED_LEVEL_LOW
) ? "low" :
156 (level
== AMD_DPM_FORCED_LEVEL_HIGH
) ? "high" :
157 (level
== AMD_DPM_FORCED_LEVEL_MANUAL
) ? "manual" :
158 (level
== AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD
) ? "profile_standard" :
159 (level
== AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK
) ? "profile_min_sclk" :
160 (level
== AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK
) ? "profile_min_mclk" :
161 (level
== AMD_DPM_FORCED_LEVEL_PROFILE_PEAK
) ? "profile_peak" :
165 static ssize_t
amdgpu_set_dpm_forced_performance_level(struct device
*dev
,
166 struct device_attribute
*attr
,
170 struct drm_device
*ddev
= dev_get_drvdata(dev
);
171 struct amdgpu_device
*adev
= ddev
->dev_private
;
172 enum amd_dpm_forced_level level
;
173 enum amd_dpm_forced_level current_level
= 0xff;
176 /* Can't force performance level when the card is off */
177 if ((adev
->flags
& AMD_IS_PX
) &&
178 (ddev
->switch_power_state
!= DRM_SWITCH_POWER_ON
))
181 if (adev
->powerplay
.pp_funcs
->get_performance_level
)
182 current_level
= amdgpu_dpm_get_performance_level(adev
);
184 if (strncmp("low", buf
, strlen("low")) == 0) {
185 level
= AMD_DPM_FORCED_LEVEL_LOW
;
186 } else if (strncmp("high", buf
, strlen("high")) == 0) {
187 level
= AMD_DPM_FORCED_LEVEL_HIGH
;
188 } else if (strncmp("auto", buf
, strlen("auto")) == 0) {
189 level
= AMD_DPM_FORCED_LEVEL_AUTO
;
190 } else if (strncmp("manual", buf
, strlen("manual")) == 0) {
191 level
= AMD_DPM_FORCED_LEVEL_MANUAL
;
192 } else if (strncmp("profile_exit", buf
, strlen("profile_exit")) == 0) {
193 level
= AMD_DPM_FORCED_LEVEL_PROFILE_EXIT
;
194 } else if (strncmp("profile_standard", buf
, strlen("profile_standard")) == 0) {
195 level
= AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD
;
196 } else if (strncmp("profile_min_sclk", buf
, strlen("profile_min_sclk")) == 0) {
197 level
= AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK
;
198 } else if (strncmp("profile_min_mclk", buf
, strlen("profile_min_mclk")) == 0) {
199 level
= AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK
;
200 } else if (strncmp("profile_peak", buf
, strlen("profile_peak")) == 0) {
201 level
= AMD_DPM_FORCED_LEVEL_PROFILE_PEAK
;
207 if (current_level
== level
)
210 if (adev
->powerplay
.pp_funcs
->force_performance_level
) {
211 mutex_lock(&adev
->pm
.mutex
);
212 if (adev
->pm
.dpm
.thermal_active
) {
214 mutex_unlock(&adev
->pm
.mutex
);
217 ret
= amdgpu_dpm_force_performance_level(adev
, level
);
221 adev
->pm
.dpm
.forced_level
= level
;
222 mutex_unlock(&adev
->pm
.mutex
);
229 static ssize_t
amdgpu_get_pp_num_states(struct device
*dev
,
230 struct device_attribute
*attr
,
233 struct drm_device
*ddev
= dev_get_drvdata(dev
);
234 struct amdgpu_device
*adev
= ddev
->dev_private
;
235 struct pp_states_info data
;
238 if (adev
->powerplay
.pp_funcs
->get_pp_num_states
)
239 amdgpu_dpm_get_pp_num_states(adev
, &data
);
241 buf_len
= snprintf(buf
, PAGE_SIZE
, "states: %d\n", data
.nums
);
242 for (i
= 0; i
< data
.nums
; i
++)
243 buf_len
+= snprintf(buf
+ buf_len
, PAGE_SIZE
, "%d %s\n", i
,
244 (data
.states
[i
] == POWER_STATE_TYPE_INTERNAL_BOOT
) ? "boot" :
245 (data
.states
[i
] == POWER_STATE_TYPE_BATTERY
) ? "battery" :
246 (data
.states
[i
] == POWER_STATE_TYPE_BALANCED
) ? "balanced" :
247 (data
.states
[i
] == POWER_STATE_TYPE_PERFORMANCE
) ? "performance" : "default");
252 static ssize_t
amdgpu_get_pp_cur_state(struct device
*dev
,
253 struct device_attribute
*attr
,
256 struct drm_device
*ddev
= dev_get_drvdata(dev
);
257 struct amdgpu_device
*adev
= ddev
->dev_private
;
258 struct pp_states_info data
;
259 enum amd_pm_state_type pm
= 0;
262 if (adev
->powerplay
.pp_funcs
->get_current_power_state
263 && adev
->powerplay
.pp_funcs
->get_pp_num_states
) {
264 pm
= amdgpu_dpm_get_current_power_state(adev
);
265 amdgpu_dpm_get_pp_num_states(adev
, &data
);
267 for (i
= 0; i
< data
.nums
; i
++) {
268 if (pm
== data
.states
[i
])
276 return snprintf(buf
, PAGE_SIZE
, "%d\n", i
);
279 static ssize_t
amdgpu_get_pp_force_state(struct device
*dev
,
280 struct device_attribute
*attr
,
283 struct drm_device
*ddev
= dev_get_drvdata(dev
);
284 struct amdgpu_device
*adev
= ddev
->dev_private
;
286 if (adev
->pp_force_state_enabled
)
287 return amdgpu_get_pp_cur_state(dev
, attr
, buf
);
289 return snprintf(buf
, PAGE_SIZE
, "\n");
292 static ssize_t
amdgpu_set_pp_force_state(struct device
*dev
,
293 struct device_attribute
*attr
,
297 struct drm_device
*ddev
= dev_get_drvdata(dev
);
298 struct amdgpu_device
*adev
= ddev
->dev_private
;
299 enum amd_pm_state_type state
= 0;
303 if (strlen(buf
) == 1)
304 adev
->pp_force_state_enabled
= false;
305 else if (adev
->powerplay
.pp_funcs
->dispatch_tasks
&&
306 adev
->powerplay
.pp_funcs
->get_pp_num_states
) {
307 struct pp_states_info data
;
309 ret
= kstrtoul(buf
, 0, &idx
);
310 if (ret
|| idx
>= ARRAY_SIZE(data
.states
)) {
314 idx
= array_index_nospec(idx
, ARRAY_SIZE(data
.states
));
316 amdgpu_dpm_get_pp_num_states(adev
, &data
);
317 state
= data
.states
[idx
];
318 /* only set user selected power states */
319 if (state
!= POWER_STATE_TYPE_INTERNAL_BOOT
&&
320 state
!= POWER_STATE_TYPE_DEFAULT
) {
321 amdgpu_dpm_dispatch_task(adev
,
322 AMD_PP_TASK_ENABLE_USER_STATE
, &state
, NULL
);
323 adev
->pp_force_state_enabled
= true;
330 static ssize_t
amdgpu_get_pp_table(struct device
*dev
,
331 struct device_attribute
*attr
,
334 struct drm_device
*ddev
= dev_get_drvdata(dev
);
335 struct amdgpu_device
*adev
= ddev
->dev_private
;
339 if (adev
->powerplay
.pp_funcs
->get_pp_table
)
340 size
= amdgpu_dpm_get_pp_table(adev
, &table
);
344 if (size
>= PAGE_SIZE
)
345 size
= PAGE_SIZE
- 1;
347 memcpy(buf
, table
, size
);
352 static ssize_t
amdgpu_set_pp_table(struct device
*dev
,
353 struct device_attribute
*attr
,
357 struct drm_device
*ddev
= dev_get_drvdata(dev
);
358 struct amdgpu_device
*adev
= ddev
->dev_private
;
360 if (adev
->powerplay
.pp_funcs
->set_pp_table
)
361 amdgpu_dpm_set_pp_table(adev
, buf
, count
);
366 static ssize_t
amdgpu_get_pp_dpm_sclk(struct device
*dev
,
367 struct device_attribute
*attr
,
370 struct drm_device
*ddev
= dev_get_drvdata(dev
);
371 struct amdgpu_device
*adev
= ddev
->dev_private
;
373 if (adev
->powerplay
.pp_funcs
->print_clock_levels
)
374 return amdgpu_dpm_print_clock_levels(adev
, PP_SCLK
, buf
);
376 return snprintf(buf
, PAGE_SIZE
, "\n");
379 static ssize_t
amdgpu_set_pp_dpm_sclk(struct device
*dev
,
380 struct device_attribute
*attr
,
384 struct drm_device
*ddev
= dev_get_drvdata(dev
);
385 struct amdgpu_device
*adev
= ddev
->dev_private
;
388 uint32_t i
, mask
= 0;
391 for (i
= 0; i
< strlen(buf
); i
++) {
392 if (*(buf
+ i
) == '\n')
394 sub_str
[0] = *(buf
+ i
);
396 ret
= kstrtol(sub_str
, 0, &level
);
405 if (adev
->powerplay
.pp_funcs
->force_clock_level
)
406 amdgpu_dpm_force_clock_level(adev
, PP_SCLK
, mask
);
412 static ssize_t
amdgpu_get_pp_dpm_mclk(struct device
*dev
,
413 struct device_attribute
*attr
,
416 struct drm_device
*ddev
= dev_get_drvdata(dev
);
417 struct amdgpu_device
*adev
= ddev
->dev_private
;
419 if (adev
->powerplay
.pp_funcs
->print_clock_levels
)
420 return amdgpu_dpm_print_clock_levels(adev
, PP_MCLK
, buf
);
422 return snprintf(buf
, PAGE_SIZE
, "\n");
425 static ssize_t
amdgpu_set_pp_dpm_mclk(struct device
*dev
,
426 struct device_attribute
*attr
,
430 struct drm_device
*ddev
= dev_get_drvdata(dev
);
431 struct amdgpu_device
*adev
= ddev
->dev_private
;
434 uint32_t i
, mask
= 0;
437 for (i
= 0; i
< strlen(buf
); i
++) {
438 if (*(buf
+ i
) == '\n')
440 sub_str
[0] = *(buf
+ i
);
442 ret
= kstrtol(sub_str
, 0, &level
);
450 if (adev
->powerplay
.pp_funcs
->force_clock_level
)
451 amdgpu_dpm_force_clock_level(adev
, PP_MCLK
, mask
);
457 static ssize_t
amdgpu_get_pp_dpm_pcie(struct device
*dev
,
458 struct device_attribute
*attr
,
461 struct drm_device
*ddev
= dev_get_drvdata(dev
);
462 struct amdgpu_device
*adev
= ddev
->dev_private
;
464 if (adev
->powerplay
.pp_funcs
->print_clock_levels
)
465 return amdgpu_dpm_print_clock_levels(adev
, PP_PCIE
, buf
);
467 return snprintf(buf
, PAGE_SIZE
, "\n");
470 static ssize_t
amdgpu_set_pp_dpm_pcie(struct device
*dev
,
471 struct device_attribute
*attr
,
475 struct drm_device
*ddev
= dev_get_drvdata(dev
);
476 struct amdgpu_device
*adev
= ddev
->dev_private
;
479 uint32_t i
, mask
= 0;
482 for (i
= 0; i
< strlen(buf
); i
++) {
483 if (*(buf
+ i
) == '\n')
485 sub_str
[0] = *(buf
+ i
);
487 ret
= kstrtol(sub_str
, 0, &level
);
495 if (adev
->powerplay
.pp_funcs
->force_clock_level
)
496 amdgpu_dpm_force_clock_level(adev
, PP_PCIE
, mask
);
502 static ssize_t
amdgpu_get_pp_sclk_od(struct device
*dev
,
503 struct device_attribute
*attr
,
506 struct drm_device
*ddev
= dev_get_drvdata(dev
);
507 struct amdgpu_device
*adev
= ddev
->dev_private
;
510 if (adev
->powerplay
.pp_funcs
->get_sclk_od
)
511 value
= amdgpu_dpm_get_sclk_od(adev
);
513 return snprintf(buf
, PAGE_SIZE
, "%d\n", value
);
516 static ssize_t
amdgpu_set_pp_sclk_od(struct device
*dev
,
517 struct device_attribute
*attr
,
521 struct drm_device
*ddev
= dev_get_drvdata(dev
);
522 struct amdgpu_device
*adev
= ddev
->dev_private
;
526 ret
= kstrtol(buf
, 0, &value
);
532 if (adev
->powerplay
.pp_funcs
->set_sclk_od
)
533 amdgpu_dpm_set_sclk_od(adev
, (uint32_t)value
);
535 if (adev
->powerplay
.pp_funcs
->dispatch_tasks
) {
536 amdgpu_dpm_dispatch_task(adev
, AMD_PP_TASK_READJUST_POWER_STATE
, NULL
, NULL
);
538 adev
->pm
.dpm
.current_ps
= adev
->pm
.dpm
.boot_ps
;
539 amdgpu_pm_compute_clocks(adev
);
546 static ssize_t
amdgpu_get_pp_mclk_od(struct device
*dev
,
547 struct device_attribute
*attr
,
550 struct drm_device
*ddev
= dev_get_drvdata(dev
);
551 struct amdgpu_device
*adev
= ddev
->dev_private
;
554 if (adev
->powerplay
.pp_funcs
->get_mclk_od
)
555 value
= amdgpu_dpm_get_mclk_od(adev
);
557 return snprintf(buf
, PAGE_SIZE
, "%d\n", value
);
560 static ssize_t
amdgpu_set_pp_mclk_od(struct device
*dev
,
561 struct device_attribute
*attr
,
565 struct drm_device
*ddev
= dev_get_drvdata(dev
);
566 struct amdgpu_device
*adev
= ddev
->dev_private
;
570 ret
= kstrtol(buf
, 0, &value
);
576 if (adev
->powerplay
.pp_funcs
->set_mclk_od
)
577 amdgpu_dpm_set_mclk_od(adev
, (uint32_t)value
);
579 if (adev
->powerplay
.pp_funcs
->dispatch_tasks
) {
580 amdgpu_dpm_dispatch_task(adev
, AMD_PP_TASK_READJUST_POWER_STATE
, NULL
, NULL
);
582 adev
->pm
.dpm
.current_ps
= adev
->pm
.dpm
.boot_ps
;
583 amdgpu_pm_compute_clocks(adev
);
590 static ssize_t
amdgpu_get_pp_power_profile(struct device
*dev
,
591 char *buf
, struct amd_pp_profile
*query
)
593 struct drm_device
*ddev
= dev_get_drvdata(dev
);
594 struct amdgpu_device
*adev
= ddev
->dev_private
;
597 if (adev
->powerplay
.pp_funcs
->get_power_profile_state
)
598 ret
= amdgpu_dpm_get_power_profile_state(
604 return snprintf(buf
, PAGE_SIZE
,
606 query
->min_sclk
/ 100,
607 query
->min_mclk
/ 100,
608 query
->activity_threshold
,
613 static ssize_t
amdgpu_get_pp_gfx_power_profile(struct device
*dev
,
614 struct device_attribute
*attr
,
617 struct amd_pp_profile query
= {0};
619 query
.type
= AMD_PP_GFX_PROFILE
;
621 return amdgpu_get_pp_power_profile(dev
, buf
, &query
);
624 static ssize_t
amdgpu_get_pp_compute_power_profile(struct device
*dev
,
625 struct device_attribute
*attr
,
628 struct amd_pp_profile query
= {0};
630 query
.type
= AMD_PP_COMPUTE_PROFILE
;
632 return amdgpu_get_pp_power_profile(dev
, buf
, &query
);
635 static ssize_t
amdgpu_set_pp_power_profile(struct device
*dev
,
638 struct amd_pp_profile
*request
)
640 struct drm_device
*ddev
= dev_get_drvdata(dev
);
641 struct amdgpu_device
*adev
= ddev
->dev_private
;
643 char *sub_str
, buf_cpy
[128], *tmp_str
;
644 const char delimiter
[3] = {' ', '\n', '\0'};
648 if (strncmp("reset", buf
, strlen("reset")) == 0) {
649 if (adev
->powerplay
.pp_funcs
->reset_power_profile_state
)
650 ret
= amdgpu_dpm_reset_power_profile_state(
659 if (strncmp("set", buf
, strlen("set")) == 0) {
660 if (adev
->powerplay
.pp_funcs
->set_power_profile_state
)
661 ret
= amdgpu_dpm_set_power_profile_state(
671 if (count
+ 1 >= 128) {
676 memcpy(buf_cpy
, buf
, count
+ 1);
680 sub_str
= strsep(&tmp_str
, delimiter
);
681 ret
= kstrtol(sub_str
, 0, &value
);
689 /* input unit MHz convert to dpm table unit 10KHz*/
690 request
->min_sclk
= (uint32_t)value
* 100;
693 /* input unit MHz convert to dpm table unit 10KHz*/
694 request
->min_mclk
= (uint32_t)value
* 100;
697 request
->activity_threshold
= (uint16_t)value
;
700 request
->up_hyst
= (uint8_t)value
;
703 request
->down_hyst
= (uint8_t)value
;
711 if (adev
->powerplay
.pp_funcs
->set_power_profile_state
)
712 ret
= amdgpu_dpm_set_power_profile_state(adev
, request
);
721 static ssize_t
amdgpu_set_pp_gfx_power_profile(struct device
*dev
,
722 struct device_attribute
*attr
,
726 struct amd_pp_profile request
= {0};
728 request
.type
= AMD_PP_GFX_PROFILE
;
730 return amdgpu_set_pp_power_profile(dev
, buf
, count
, &request
);
733 static ssize_t
amdgpu_set_pp_compute_power_profile(struct device
*dev
,
734 struct device_attribute
*attr
,
738 struct amd_pp_profile request
= {0};
740 request
.type
= AMD_PP_COMPUTE_PROFILE
;
742 return amdgpu_set_pp_power_profile(dev
, buf
, count
, &request
);
745 static DEVICE_ATTR(power_dpm_state
, S_IRUGO
| S_IWUSR
, amdgpu_get_dpm_state
, amdgpu_set_dpm_state
);
746 static DEVICE_ATTR(power_dpm_force_performance_level
, S_IRUGO
| S_IWUSR
,
747 amdgpu_get_dpm_forced_performance_level
,
748 amdgpu_set_dpm_forced_performance_level
);
749 static DEVICE_ATTR(pp_num_states
, S_IRUGO
, amdgpu_get_pp_num_states
, NULL
);
750 static DEVICE_ATTR(pp_cur_state
, S_IRUGO
, amdgpu_get_pp_cur_state
, NULL
);
751 static DEVICE_ATTR(pp_force_state
, S_IRUGO
| S_IWUSR
,
752 amdgpu_get_pp_force_state
,
753 amdgpu_set_pp_force_state
);
754 static DEVICE_ATTR(pp_table
, S_IRUGO
| S_IWUSR
,
756 amdgpu_set_pp_table
);
757 static DEVICE_ATTR(pp_dpm_sclk
, S_IRUGO
| S_IWUSR
,
758 amdgpu_get_pp_dpm_sclk
,
759 amdgpu_set_pp_dpm_sclk
);
760 static DEVICE_ATTR(pp_dpm_mclk
, S_IRUGO
| S_IWUSR
,
761 amdgpu_get_pp_dpm_mclk
,
762 amdgpu_set_pp_dpm_mclk
);
763 static DEVICE_ATTR(pp_dpm_pcie
, S_IRUGO
| S_IWUSR
,
764 amdgpu_get_pp_dpm_pcie
,
765 amdgpu_set_pp_dpm_pcie
);
766 static DEVICE_ATTR(pp_sclk_od
, S_IRUGO
| S_IWUSR
,
767 amdgpu_get_pp_sclk_od
,
768 amdgpu_set_pp_sclk_od
);
769 static DEVICE_ATTR(pp_mclk_od
, S_IRUGO
| S_IWUSR
,
770 amdgpu_get_pp_mclk_od
,
771 amdgpu_set_pp_mclk_od
);
772 static DEVICE_ATTR(pp_gfx_power_profile
, S_IRUGO
| S_IWUSR
,
773 amdgpu_get_pp_gfx_power_profile
,
774 amdgpu_set_pp_gfx_power_profile
);
775 static DEVICE_ATTR(pp_compute_power_profile
, S_IRUGO
| S_IWUSR
,
776 amdgpu_get_pp_compute_power_profile
,
777 amdgpu_set_pp_compute_power_profile
);
779 static ssize_t
amdgpu_hwmon_show_temp(struct device
*dev
,
780 struct device_attribute
*attr
,
783 struct amdgpu_device
*adev
= dev_get_drvdata(dev
);
784 struct drm_device
*ddev
= adev
->ddev
;
787 /* Can't get temperature when the card is off */
788 if ((adev
->flags
& AMD_IS_PX
) &&
789 (ddev
->switch_power_state
!= DRM_SWITCH_POWER_ON
))
792 if (!adev
->powerplay
.pp_funcs
->get_temperature
)
795 temp
= amdgpu_dpm_get_temperature(adev
);
797 return snprintf(buf
, PAGE_SIZE
, "%d\n", temp
);
800 static ssize_t
amdgpu_hwmon_show_temp_thresh(struct device
*dev
,
801 struct device_attribute
*attr
,
804 struct amdgpu_device
*adev
= dev_get_drvdata(dev
);
805 int hyst
= to_sensor_dev_attr(attr
)->index
;
809 temp
= adev
->pm
.dpm
.thermal
.min_temp
;
811 temp
= adev
->pm
.dpm
.thermal
.max_temp
;
813 return snprintf(buf
, PAGE_SIZE
, "%d\n", temp
);
816 static ssize_t
amdgpu_hwmon_get_pwm1_enable(struct device
*dev
,
817 struct device_attribute
*attr
,
820 struct amdgpu_device
*adev
= dev_get_drvdata(dev
);
823 if (!adev
->powerplay
.pp_funcs
->get_fan_control_mode
)
826 pwm_mode
= amdgpu_dpm_get_fan_control_mode(adev
);
828 return sprintf(buf
, "%i\n", pwm_mode
);
831 static ssize_t
amdgpu_hwmon_set_pwm1_enable(struct device
*dev
,
832 struct device_attribute
*attr
,
836 struct amdgpu_device
*adev
= dev_get_drvdata(dev
);
840 if (!adev
->powerplay
.pp_funcs
->set_fan_control_mode
)
843 err
= kstrtoint(buf
, 10, &value
);
847 amdgpu_dpm_set_fan_control_mode(adev
, value
);
852 static ssize_t
amdgpu_hwmon_get_pwm1_min(struct device
*dev
,
853 struct device_attribute
*attr
,
856 return sprintf(buf
, "%i\n", 0);
859 static ssize_t
amdgpu_hwmon_get_pwm1_max(struct device
*dev
,
860 struct device_attribute
*attr
,
863 return sprintf(buf
, "%i\n", 255);
866 static ssize_t
amdgpu_hwmon_set_pwm1(struct device
*dev
,
867 struct device_attribute
*attr
,
868 const char *buf
, size_t count
)
870 struct amdgpu_device
*adev
= dev_get_drvdata(dev
);
874 err
= kstrtou32(buf
, 10, &value
);
878 value
= (value
* 100) / 255;
880 if (adev
->powerplay
.pp_funcs
->set_fan_speed_percent
) {
881 err
= amdgpu_dpm_set_fan_speed_percent(adev
, value
);
889 static ssize_t
amdgpu_hwmon_get_pwm1(struct device
*dev
,
890 struct device_attribute
*attr
,
893 struct amdgpu_device
*adev
= dev_get_drvdata(dev
);
897 if (adev
->powerplay
.pp_funcs
->get_fan_speed_percent
) {
898 err
= amdgpu_dpm_get_fan_speed_percent(adev
, &speed
);
903 speed
= (speed
* 255) / 100;
905 return sprintf(buf
, "%i\n", speed
);
908 static ssize_t
amdgpu_hwmon_get_fan1_input(struct device
*dev
,
909 struct device_attribute
*attr
,
912 struct amdgpu_device
*adev
= dev_get_drvdata(dev
);
916 if (adev
->powerplay
.pp_funcs
->get_fan_speed_rpm
) {
917 err
= amdgpu_dpm_get_fan_speed_rpm(adev
, &speed
);
922 return sprintf(buf
, "%i\n", speed
);
925 static SENSOR_DEVICE_ATTR(temp1_input
, S_IRUGO
, amdgpu_hwmon_show_temp
, NULL
, 0);
926 static SENSOR_DEVICE_ATTR(temp1_crit
, S_IRUGO
, amdgpu_hwmon_show_temp_thresh
, NULL
, 0);
927 static SENSOR_DEVICE_ATTR(temp1_crit_hyst
, S_IRUGO
, amdgpu_hwmon_show_temp_thresh
, NULL
, 1);
928 static SENSOR_DEVICE_ATTR(pwm1
, S_IRUGO
| S_IWUSR
, amdgpu_hwmon_get_pwm1
, amdgpu_hwmon_set_pwm1
, 0);
929 static SENSOR_DEVICE_ATTR(pwm1_enable
, S_IRUGO
| S_IWUSR
, amdgpu_hwmon_get_pwm1_enable
, amdgpu_hwmon_set_pwm1_enable
, 0);
930 static SENSOR_DEVICE_ATTR(pwm1_min
, S_IRUGO
, amdgpu_hwmon_get_pwm1_min
, NULL
, 0);
931 static SENSOR_DEVICE_ATTR(pwm1_max
, S_IRUGO
, amdgpu_hwmon_get_pwm1_max
, NULL
, 0);
932 static SENSOR_DEVICE_ATTR(fan1_input
, S_IRUGO
, amdgpu_hwmon_get_fan1_input
, NULL
, 0);
934 static struct attribute
*hwmon_attributes
[] = {
935 &sensor_dev_attr_temp1_input
.dev_attr
.attr
,
936 &sensor_dev_attr_temp1_crit
.dev_attr
.attr
,
937 &sensor_dev_attr_temp1_crit_hyst
.dev_attr
.attr
,
938 &sensor_dev_attr_pwm1
.dev_attr
.attr
,
939 &sensor_dev_attr_pwm1_enable
.dev_attr
.attr
,
940 &sensor_dev_attr_pwm1_min
.dev_attr
.attr
,
941 &sensor_dev_attr_pwm1_max
.dev_attr
.attr
,
942 &sensor_dev_attr_fan1_input
.dev_attr
.attr
,
946 static umode_t
hwmon_attributes_visible(struct kobject
*kobj
,
947 struct attribute
*attr
, int index
)
949 struct device
*dev
= kobj_to_dev(kobj
);
950 struct amdgpu_device
*adev
= dev_get_drvdata(dev
);
951 umode_t effective_mode
= attr
->mode
;
953 /* no skipping for powerplay */
954 if (adev
->powerplay
.cgs_device
)
955 return effective_mode
;
957 /* Skip limit attributes if DPM is not enabled */
958 if (!adev
->pm
.dpm_enabled
&&
959 (attr
== &sensor_dev_attr_temp1_crit
.dev_attr
.attr
||
960 attr
== &sensor_dev_attr_temp1_crit_hyst
.dev_attr
.attr
||
961 attr
== &sensor_dev_attr_pwm1
.dev_attr
.attr
||
962 attr
== &sensor_dev_attr_pwm1_enable
.dev_attr
.attr
||
963 attr
== &sensor_dev_attr_pwm1_max
.dev_attr
.attr
||
964 attr
== &sensor_dev_attr_pwm1_min
.dev_attr
.attr
))
967 /* Skip fan attributes if fan is not present */
968 if (adev
->pm
.no_fan
&&
969 (attr
== &sensor_dev_attr_pwm1
.dev_attr
.attr
||
970 attr
== &sensor_dev_attr_pwm1_enable
.dev_attr
.attr
||
971 attr
== &sensor_dev_attr_pwm1_max
.dev_attr
.attr
||
972 attr
== &sensor_dev_attr_pwm1_min
.dev_attr
.attr
))
975 /* mask fan attributes if we have no bindings for this asic to expose */
976 if ((!adev
->powerplay
.pp_funcs
->get_fan_speed_percent
&&
977 attr
== &sensor_dev_attr_pwm1
.dev_attr
.attr
) || /* can't query fan */
978 (!adev
->powerplay
.pp_funcs
->get_fan_control_mode
&&
979 attr
== &sensor_dev_attr_pwm1_enable
.dev_attr
.attr
)) /* can't query state */
980 effective_mode
&= ~S_IRUGO
;
982 if ((!adev
->powerplay
.pp_funcs
->set_fan_speed_percent
&&
983 attr
== &sensor_dev_attr_pwm1
.dev_attr
.attr
) || /* can't manage fan */
984 (!adev
->powerplay
.pp_funcs
->set_fan_control_mode
&&
985 attr
== &sensor_dev_attr_pwm1_enable
.dev_attr
.attr
)) /* can't manage state */
986 effective_mode
&= ~S_IWUSR
;
988 /* hide max/min values if we can't both query and manage the fan */
989 if ((!adev
->powerplay
.pp_funcs
->set_fan_speed_percent
&&
990 !adev
->powerplay
.pp_funcs
->get_fan_speed_percent
) &&
991 (attr
== &sensor_dev_attr_pwm1_max
.dev_attr
.attr
||
992 attr
== &sensor_dev_attr_pwm1_min
.dev_attr
.attr
))
995 /* requires powerplay */
996 if (attr
== &sensor_dev_attr_fan1_input
.dev_attr
.attr
)
999 return effective_mode
;
1002 static const struct attribute_group hwmon_attrgroup
= {
1003 .attrs
= hwmon_attributes
,
1004 .is_visible
= hwmon_attributes_visible
,
1007 static const struct attribute_group
*hwmon_groups
[] = {
1012 void amdgpu_dpm_thermal_work_handler(struct work_struct
*work
)
1014 struct amdgpu_device
*adev
=
1015 container_of(work
, struct amdgpu_device
,
1016 pm
.dpm
.thermal
.work
);
1017 /* switch to the thermal state */
1018 enum amd_pm_state_type dpm_state
= POWER_STATE_TYPE_INTERNAL_THERMAL
;
1020 if (!adev
->pm
.dpm_enabled
)
1023 if (adev
->powerplay
.pp_funcs
->get_temperature
) {
1024 int temp
= amdgpu_dpm_get_temperature(adev
);
1026 if (temp
< adev
->pm
.dpm
.thermal
.min_temp
)
1027 /* switch back the user state */
1028 dpm_state
= adev
->pm
.dpm
.user_state
;
1030 if (adev
->pm
.dpm
.thermal
.high_to_low
)
1031 /* switch back the user state */
1032 dpm_state
= adev
->pm
.dpm
.user_state
;
1034 mutex_lock(&adev
->pm
.mutex
);
1035 if (dpm_state
== POWER_STATE_TYPE_INTERNAL_THERMAL
)
1036 adev
->pm
.dpm
.thermal_active
= true;
1038 adev
->pm
.dpm
.thermal_active
= false;
1039 adev
->pm
.dpm
.state
= dpm_state
;
1040 mutex_unlock(&adev
->pm
.mutex
);
1042 amdgpu_pm_compute_clocks(adev
);
1045 static struct amdgpu_ps
*amdgpu_dpm_pick_power_state(struct amdgpu_device
*adev
,
1046 enum amd_pm_state_type dpm_state
)
1049 struct amdgpu_ps
*ps
;
1051 bool single_display
= (adev
->pm
.dpm
.new_active_crtc_count
< 2) ?
1054 /* check if the vblank period is too short to adjust the mclk */
1055 if (single_display
&& adev
->powerplay
.pp_funcs
->vblank_too_short
) {
1056 if (amdgpu_dpm_vblank_too_short(adev
))
1057 single_display
= false;
1060 /* certain older asics have a separare 3D performance state,
1061 * so try that first if the user selected performance
1063 if (dpm_state
== POWER_STATE_TYPE_PERFORMANCE
)
1064 dpm_state
= POWER_STATE_TYPE_INTERNAL_3DPERF
;
1065 /* balanced states don't exist at the moment */
1066 if (dpm_state
== POWER_STATE_TYPE_BALANCED
)
1067 dpm_state
= POWER_STATE_TYPE_PERFORMANCE
;
1070 /* Pick the best power state based on current conditions */
1071 for (i
= 0; i
< adev
->pm
.dpm
.num_ps
; i
++) {
1072 ps
= &adev
->pm
.dpm
.ps
[i
];
1073 ui_class
= ps
->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK
;
1074 switch (dpm_state
) {
1076 case POWER_STATE_TYPE_BATTERY
:
1077 if (ui_class
== ATOM_PPLIB_CLASSIFICATION_UI_BATTERY
) {
1078 if (ps
->caps
& ATOM_PPLIB_SINGLE_DISPLAY_ONLY
) {
1085 case POWER_STATE_TYPE_BALANCED
:
1086 if (ui_class
== ATOM_PPLIB_CLASSIFICATION_UI_BALANCED
) {
1087 if (ps
->caps
& ATOM_PPLIB_SINGLE_DISPLAY_ONLY
) {
1094 case POWER_STATE_TYPE_PERFORMANCE
:
1095 if (ui_class
== ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE
) {
1096 if (ps
->caps
& ATOM_PPLIB_SINGLE_DISPLAY_ONLY
) {
1103 /* internal states */
1104 case POWER_STATE_TYPE_INTERNAL_UVD
:
1105 if (adev
->pm
.dpm
.uvd_ps
)
1106 return adev
->pm
.dpm
.uvd_ps
;
1109 case POWER_STATE_TYPE_INTERNAL_UVD_SD
:
1110 if (ps
->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE
)
1113 case POWER_STATE_TYPE_INTERNAL_UVD_HD
:
1114 if (ps
->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE
)
1117 case POWER_STATE_TYPE_INTERNAL_UVD_HD2
:
1118 if (ps
->class & ATOM_PPLIB_CLASSIFICATION_HD2STATE
)
1121 case POWER_STATE_TYPE_INTERNAL_UVD_MVC
:
1122 if (ps
->class2
& ATOM_PPLIB_CLASSIFICATION2_MVC
)
1125 case POWER_STATE_TYPE_INTERNAL_BOOT
:
1126 return adev
->pm
.dpm
.boot_ps
;
1127 case POWER_STATE_TYPE_INTERNAL_THERMAL
:
1128 if (ps
->class & ATOM_PPLIB_CLASSIFICATION_THERMAL
)
1131 case POWER_STATE_TYPE_INTERNAL_ACPI
:
1132 if (ps
->class & ATOM_PPLIB_CLASSIFICATION_ACPI
)
1135 case POWER_STATE_TYPE_INTERNAL_ULV
:
1136 if (ps
->class2
& ATOM_PPLIB_CLASSIFICATION2_ULV
)
1139 case POWER_STATE_TYPE_INTERNAL_3DPERF
:
1140 if (ps
->class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE
)
1147 /* use a fallback state if we didn't match */
1148 switch (dpm_state
) {
1149 case POWER_STATE_TYPE_INTERNAL_UVD_SD
:
1150 dpm_state
= POWER_STATE_TYPE_INTERNAL_UVD_HD
;
1151 goto restart_search
;
1152 case POWER_STATE_TYPE_INTERNAL_UVD_HD
:
1153 case POWER_STATE_TYPE_INTERNAL_UVD_HD2
:
1154 case POWER_STATE_TYPE_INTERNAL_UVD_MVC
:
1155 if (adev
->pm
.dpm
.uvd_ps
) {
1156 return adev
->pm
.dpm
.uvd_ps
;
1158 dpm_state
= POWER_STATE_TYPE_PERFORMANCE
;
1159 goto restart_search
;
1161 case POWER_STATE_TYPE_INTERNAL_THERMAL
:
1162 dpm_state
= POWER_STATE_TYPE_INTERNAL_ACPI
;
1163 goto restart_search
;
1164 case POWER_STATE_TYPE_INTERNAL_ACPI
:
1165 dpm_state
= POWER_STATE_TYPE_BATTERY
;
1166 goto restart_search
;
1167 case POWER_STATE_TYPE_BATTERY
:
1168 case POWER_STATE_TYPE_BALANCED
:
1169 case POWER_STATE_TYPE_INTERNAL_3DPERF
:
1170 dpm_state
= POWER_STATE_TYPE_PERFORMANCE
;
1171 goto restart_search
;
1179 static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device
*adev
)
1181 struct amdgpu_ps
*ps
;
1182 enum amd_pm_state_type dpm_state
;
1186 /* if dpm init failed */
1187 if (!adev
->pm
.dpm_enabled
)
1190 if (adev
->pm
.dpm
.user_state
!= adev
->pm
.dpm
.state
) {
1191 /* add other state override checks here */
1192 if ((!adev
->pm
.dpm
.thermal_active
) &&
1193 (!adev
->pm
.dpm
.uvd_active
))
1194 adev
->pm
.dpm
.state
= adev
->pm
.dpm
.user_state
;
1196 dpm_state
= adev
->pm
.dpm
.state
;
1198 ps
= amdgpu_dpm_pick_power_state(adev
, dpm_state
);
1200 adev
->pm
.dpm
.requested_ps
= ps
;
1204 if (amdgpu_dpm
== 1 && adev
->powerplay
.pp_funcs
->print_power_state
) {
1205 printk("switching from power state:\n");
1206 amdgpu_dpm_print_power_state(adev
, adev
->pm
.dpm
.current_ps
);
1207 printk("switching to power state:\n");
1208 amdgpu_dpm_print_power_state(adev
, adev
->pm
.dpm
.requested_ps
);
1211 /* update whether vce is active */
1212 ps
->vce_active
= adev
->pm
.dpm
.vce_active
;
1213 if (adev
->powerplay
.pp_funcs
->display_configuration_changed
)
1214 amdgpu_dpm_display_configuration_changed(adev
);
1216 ret
= amdgpu_dpm_pre_set_power_state(adev
);
1220 if (adev
->powerplay
.pp_funcs
->check_state_equal
) {
1221 if (0 != amdgpu_dpm_check_state_equal(adev
, adev
->pm
.dpm
.current_ps
, adev
->pm
.dpm
.requested_ps
, &equal
))
1228 amdgpu_dpm_set_power_state(adev
);
1229 amdgpu_dpm_post_set_power_state(adev
);
1231 adev
->pm
.dpm
.current_active_crtcs
= adev
->pm
.dpm
.new_active_crtcs
;
1232 adev
->pm
.dpm
.current_active_crtc_count
= adev
->pm
.dpm
.new_active_crtc_count
;
1234 if (adev
->powerplay
.pp_funcs
->force_performance_level
) {
1235 if (adev
->pm
.dpm
.thermal_active
) {
1236 enum amd_dpm_forced_level level
= adev
->pm
.dpm
.forced_level
;
1237 /* force low perf level for thermal */
1238 amdgpu_dpm_force_performance_level(adev
, AMD_DPM_FORCED_LEVEL_LOW
);
1239 /* save the user's level */
1240 adev
->pm
.dpm
.forced_level
= level
;
1242 /* otherwise, user selected level */
1243 amdgpu_dpm_force_performance_level(adev
, adev
->pm
.dpm
.forced_level
);
1248 void amdgpu_dpm_enable_uvd(struct amdgpu_device
*adev
, bool enable
)
1250 if (adev
->powerplay
.pp_funcs
->powergate_uvd
) {
1251 /* enable/disable UVD */
1252 mutex_lock(&adev
->pm
.mutex
);
1253 amdgpu_dpm_powergate_uvd(adev
, !enable
);
1254 mutex_unlock(&adev
->pm
.mutex
);
1257 mutex_lock(&adev
->pm
.mutex
);
1258 adev
->pm
.dpm
.uvd_active
= true;
1259 adev
->pm
.dpm
.state
= POWER_STATE_TYPE_INTERNAL_UVD
;
1260 mutex_unlock(&adev
->pm
.mutex
);
1262 mutex_lock(&adev
->pm
.mutex
);
1263 adev
->pm
.dpm
.uvd_active
= false;
1264 mutex_unlock(&adev
->pm
.mutex
);
1266 amdgpu_pm_compute_clocks(adev
);
1270 void amdgpu_dpm_enable_vce(struct amdgpu_device
*adev
, bool enable
)
1272 if (adev
->powerplay
.pp_funcs
->powergate_vce
) {
1273 /* enable/disable VCE */
1274 mutex_lock(&adev
->pm
.mutex
);
1275 amdgpu_dpm_powergate_vce(adev
, !enable
);
1276 mutex_unlock(&adev
->pm
.mutex
);
1279 mutex_lock(&adev
->pm
.mutex
);
1280 adev
->pm
.dpm
.vce_active
= true;
1281 /* XXX select vce level based on ring/task */
1282 adev
->pm
.dpm
.vce_level
= AMD_VCE_LEVEL_AC_ALL
;
1283 mutex_unlock(&adev
->pm
.mutex
);
1284 amdgpu_set_clockgating_state(adev
, AMD_IP_BLOCK_TYPE_VCE
,
1285 AMD_CG_STATE_UNGATE
);
1286 amdgpu_set_powergating_state(adev
, AMD_IP_BLOCK_TYPE_VCE
,
1287 AMD_PG_STATE_UNGATE
);
1288 amdgpu_pm_compute_clocks(adev
);
1290 amdgpu_set_powergating_state(adev
, AMD_IP_BLOCK_TYPE_VCE
,
1292 amdgpu_set_clockgating_state(adev
, AMD_IP_BLOCK_TYPE_VCE
,
1294 mutex_lock(&adev
->pm
.mutex
);
1295 adev
->pm
.dpm
.vce_active
= false;
1296 mutex_unlock(&adev
->pm
.mutex
);
1297 amdgpu_pm_compute_clocks(adev
);
1303 void amdgpu_pm_print_power_states(struct amdgpu_device
*adev
)
1307 if (adev
->powerplay
.pp_funcs
->print_power_state
== NULL
)
1310 for (i
= 0; i
< adev
->pm
.dpm
.num_ps
; i
++)
1311 amdgpu_dpm_print_power_state(adev
, &adev
->pm
.dpm
.ps
[i
]);
1315 int amdgpu_pm_sysfs_init(struct amdgpu_device
*adev
)
1319 if (adev
->pm
.sysfs_initialized
)
1322 if (adev
->pm
.dpm_enabled
== 0)
1325 if (adev
->powerplay
.pp_funcs
->get_temperature
== NULL
)
1328 adev
->pm
.int_hwmon_dev
= hwmon_device_register_with_groups(adev
->dev
,
1331 if (IS_ERR(adev
->pm
.int_hwmon_dev
)) {
1332 ret
= PTR_ERR(adev
->pm
.int_hwmon_dev
);
1334 "Unable to register hwmon device: %d\n", ret
);
1338 ret
= device_create_file(adev
->dev
, &dev_attr_power_dpm_state
);
1340 DRM_ERROR("failed to create device file for dpm state\n");
1343 ret
= device_create_file(adev
->dev
, &dev_attr_power_dpm_force_performance_level
);
1345 DRM_ERROR("failed to create device file for dpm state\n");
1350 ret
= device_create_file(adev
->dev
, &dev_attr_pp_num_states
);
1352 DRM_ERROR("failed to create device file pp_num_states\n");
1355 ret
= device_create_file(adev
->dev
, &dev_attr_pp_cur_state
);
1357 DRM_ERROR("failed to create device file pp_cur_state\n");
1360 ret
= device_create_file(adev
->dev
, &dev_attr_pp_force_state
);
1362 DRM_ERROR("failed to create device file pp_force_state\n");
1365 ret
= device_create_file(adev
->dev
, &dev_attr_pp_table
);
1367 DRM_ERROR("failed to create device file pp_table\n");
1371 ret
= device_create_file(adev
->dev
, &dev_attr_pp_dpm_sclk
);
1373 DRM_ERROR("failed to create device file pp_dpm_sclk\n");
1376 ret
= device_create_file(adev
->dev
, &dev_attr_pp_dpm_mclk
);
1378 DRM_ERROR("failed to create device file pp_dpm_mclk\n");
1381 ret
= device_create_file(adev
->dev
, &dev_attr_pp_dpm_pcie
);
1383 DRM_ERROR("failed to create device file pp_dpm_pcie\n");
1386 ret
= device_create_file(adev
->dev
, &dev_attr_pp_sclk_od
);
1388 DRM_ERROR("failed to create device file pp_sclk_od\n");
1391 ret
= device_create_file(adev
->dev
, &dev_attr_pp_mclk_od
);
1393 DRM_ERROR("failed to create device file pp_mclk_od\n");
1396 ret
= device_create_file(adev
->dev
,
1397 &dev_attr_pp_gfx_power_profile
);
1399 DRM_ERROR("failed to create device file "
1400 "pp_gfx_power_profile\n");
1403 ret
= device_create_file(adev
->dev
,
1404 &dev_attr_pp_compute_power_profile
);
1406 DRM_ERROR("failed to create device file "
1407 "pp_compute_power_profile\n");
1411 ret
= amdgpu_debugfs_pm_init(adev
);
1413 DRM_ERROR("Failed to register debugfs file for dpm!\n");
1417 adev
->pm
.sysfs_initialized
= true;
1422 void amdgpu_pm_sysfs_fini(struct amdgpu_device
*adev
)
1424 if (adev
->pm
.dpm_enabled
== 0)
1427 if (adev
->pm
.int_hwmon_dev
)
1428 hwmon_device_unregister(adev
->pm
.int_hwmon_dev
);
1429 device_remove_file(adev
->dev
, &dev_attr_power_dpm_state
);
1430 device_remove_file(adev
->dev
, &dev_attr_power_dpm_force_performance_level
);
1432 device_remove_file(adev
->dev
, &dev_attr_pp_num_states
);
1433 device_remove_file(adev
->dev
, &dev_attr_pp_cur_state
);
1434 device_remove_file(adev
->dev
, &dev_attr_pp_force_state
);
1435 device_remove_file(adev
->dev
, &dev_attr_pp_table
);
1437 device_remove_file(adev
->dev
, &dev_attr_pp_dpm_sclk
);
1438 device_remove_file(adev
->dev
, &dev_attr_pp_dpm_mclk
);
1439 device_remove_file(adev
->dev
, &dev_attr_pp_dpm_pcie
);
1440 device_remove_file(adev
->dev
, &dev_attr_pp_sclk_od
);
1441 device_remove_file(adev
->dev
, &dev_attr_pp_mclk_od
);
1442 device_remove_file(adev
->dev
,
1443 &dev_attr_pp_gfx_power_profile
);
1444 device_remove_file(adev
->dev
,
1445 &dev_attr_pp_compute_power_profile
);
1448 void amdgpu_pm_compute_clocks(struct amdgpu_device
*adev
)
1450 struct drm_device
*ddev
= adev
->ddev
;
1451 struct drm_crtc
*crtc
;
1452 struct amdgpu_crtc
*amdgpu_crtc
;
1455 if (!adev
->pm
.dpm_enabled
)
1458 if (adev
->mode_info
.num_crtc
)
1459 amdgpu_display_bandwidth_update(adev
);
1461 for (i
= 0; i
< AMDGPU_MAX_RINGS
; i
++) {
1462 struct amdgpu_ring
*ring
= adev
->rings
[i
];
1463 if (ring
&& ring
->ready
)
1464 amdgpu_fence_wait_empty(ring
);
1467 if (adev
->powerplay
.pp_funcs
->dispatch_tasks
) {
1468 amdgpu_dpm_dispatch_task(adev
, AMD_PP_TASK_DISPLAY_CONFIG_CHANGE
, NULL
, NULL
);
1470 mutex_lock(&adev
->pm
.mutex
);
1471 adev
->pm
.dpm
.new_active_crtcs
= 0;
1472 adev
->pm
.dpm
.new_active_crtc_count
= 0;
1473 if (adev
->mode_info
.num_crtc
&& adev
->mode_info
.mode_config_initialized
) {
1474 list_for_each_entry(crtc
,
1475 &ddev
->mode_config
.crtc_list
, head
) {
1476 amdgpu_crtc
= to_amdgpu_crtc(crtc
);
1477 if (amdgpu_crtc
->enabled
) {
1478 adev
->pm
.dpm
.new_active_crtcs
|= (1 << amdgpu_crtc
->crtc_id
);
1479 adev
->pm
.dpm
.new_active_crtc_count
++;
1483 /* update battery/ac status */
1484 if (power_supply_is_system_supplied() > 0)
1485 adev
->pm
.dpm
.ac_power
= true;
1487 adev
->pm
.dpm
.ac_power
= false;
1489 amdgpu_dpm_change_power_state_locked(adev
);
1491 mutex_unlock(&adev
->pm
.mutex
);
1498 #if defined(CONFIG_DEBUG_FS)
1500 static int amdgpu_debugfs_pm_info_pp(struct seq_file
*m
, struct amdgpu_device
*adev
)
1503 struct pp_gpu_power query
= {0};
1506 /* sanity check PP is enabled */
1507 if (!(adev
->powerplay
.pp_funcs
&&
1508 adev
->powerplay
.pp_funcs
->read_sensor
))
1512 size
= sizeof(value
);
1513 seq_printf(m
, "GFX Clocks and Power:\n");
1514 if (!amdgpu_dpm_read_sensor(adev
, AMDGPU_PP_SENSOR_GFX_MCLK
, (void *)&value
, &size
))
1515 seq_printf(m
, "\t%u MHz (MCLK)\n", value
/100);
1516 if (!amdgpu_dpm_read_sensor(adev
, AMDGPU_PP_SENSOR_GFX_SCLK
, (void *)&value
, &size
))
1517 seq_printf(m
, "\t%u MHz (SCLK)\n", value
/100);
1518 if (!amdgpu_dpm_read_sensor(adev
, AMDGPU_PP_SENSOR_VDDGFX
, (void *)&value
, &size
))
1519 seq_printf(m
, "\t%u mV (VDDGFX)\n", value
);
1520 if (!amdgpu_dpm_read_sensor(adev
, AMDGPU_PP_SENSOR_VDDNB
, (void *)&value
, &size
))
1521 seq_printf(m
, "\t%u mV (VDDNB)\n", value
);
1522 size
= sizeof(query
);
1523 if (!amdgpu_dpm_read_sensor(adev
, AMDGPU_PP_SENSOR_GPU_POWER
, (void *)&query
, &size
)) {
1524 seq_printf(m
, "\t%u.%u W (VDDC)\n", query
.vddc_power
>> 8,
1525 query
.vddc_power
& 0xff);
1526 seq_printf(m
, "\t%u.%u W (VDDCI)\n", query
.vddci_power
>> 8,
1527 query
.vddci_power
& 0xff);
1528 seq_printf(m
, "\t%u.%u W (max GPU)\n", query
.max_gpu_power
>> 8,
1529 query
.max_gpu_power
& 0xff);
1530 seq_printf(m
, "\t%u.%u W (average GPU)\n", query
.average_gpu_power
>> 8,
1531 query
.average_gpu_power
& 0xff);
1533 size
= sizeof(value
);
1534 seq_printf(m
, "\n");
1537 if (!amdgpu_dpm_read_sensor(adev
, AMDGPU_PP_SENSOR_GPU_TEMP
, (void *)&value
, &size
))
1538 seq_printf(m
, "GPU Temperature: %u C\n", value
/1000);
1541 if (!amdgpu_dpm_read_sensor(adev
, AMDGPU_PP_SENSOR_GPU_LOAD
, (void *)&value
, &size
))
1542 seq_printf(m
, "GPU Load: %u %%\n", value
);
1543 seq_printf(m
, "\n");
1546 if (!amdgpu_dpm_read_sensor(adev
, AMDGPU_PP_SENSOR_UVD_POWER
, (void *)&value
, &size
)) {
1548 seq_printf(m
, "UVD: Disabled\n");
1550 seq_printf(m
, "UVD: Enabled\n");
1551 if (!amdgpu_dpm_read_sensor(adev
, AMDGPU_PP_SENSOR_UVD_DCLK
, (void *)&value
, &size
))
1552 seq_printf(m
, "\t%u MHz (DCLK)\n", value
/100);
1553 if (!amdgpu_dpm_read_sensor(adev
, AMDGPU_PP_SENSOR_UVD_VCLK
, (void *)&value
, &size
))
1554 seq_printf(m
, "\t%u MHz (VCLK)\n", value
/100);
1557 seq_printf(m
, "\n");
1560 if (!amdgpu_dpm_read_sensor(adev
, AMDGPU_PP_SENSOR_VCE_POWER
, (void *)&value
, &size
)) {
1562 seq_printf(m
, "VCE: Disabled\n");
1564 seq_printf(m
, "VCE: Enabled\n");
1565 if (!amdgpu_dpm_read_sensor(adev
, AMDGPU_PP_SENSOR_VCE_ECCLK
, (void *)&value
, &size
))
1566 seq_printf(m
, "\t%u MHz (ECCLK)\n", value
/100);
1573 static void amdgpu_parse_cg_state(struct seq_file
*m
, u32 flags
)
1577 for (i
= 0; clocks
[i
].flag
; i
++)
1578 seq_printf(m
, "\t%s: %s\n", clocks
[i
].name
,
1579 (flags
& clocks
[i
].flag
) ? "On" : "Off");
1582 static int amdgpu_debugfs_pm_info(struct seq_file
*m
, void *data
)
1584 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1585 struct drm_device
*dev
= node
->minor
->dev
;
1586 struct amdgpu_device
*adev
= dev
->dev_private
;
1587 struct drm_device
*ddev
= adev
->ddev
;
1590 amdgpu_get_clockgating_state(adev
, &flags
);
1591 seq_printf(m
, "Clock Gating Flags Mask: 0x%x\n", flags
);
1592 amdgpu_parse_cg_state(m
, flags
);
1593 seq_printf(m
, "\n");
1595 if (!adev
->pm
.dpm_enabled
) {
1596 seq_printf(m
, "dpm not enabled\n");
1599 if ((adev
->flags
& AMD_IS_PX
) &&
1600 (ddev
->switch_power_state
!= DRM_SWITCH_POWER_ON
)) {
1601 seq_printf(m
, "PX asic powered off\n");
1602 } else if (adev
->powerplay
.pp_funcs
->debugfs_print_current_performance_level
) {
1603 mutex_lock(&adev
->pm
.mutex
);
1604 if (adev
->powerplay
.pp_funcs
->debugfs_print_current_performance_level
)
1605 adev
->powerplay
.pp_funcs
->debugfs_print_current_performance_level(adev
, m
);
1607 seq_printf(m
, "Debugfs support not implemented for this asic\n");
1608 mutex_unlock(&adev
->pm
.mutex
);
1610 return amdgpu_debugfs_pm_info_pp(m
, adev
);
1616 static const struct drm_info_list amdgpu_pm_info_list
[] = {
1617 {"amdgpu_pm_info", amdgpu_debugfs_pm_info
, 0, NULL
},
1621 static int amdgpu_debugfs_pm_init(struct amdgpu_device
*adev
)
1623 #if defined(CONFIG_DEBUG_FS)
1624 return amdgpu_debugfs_add_files(adev
, amdgpu_pm_info_list
, ARRAY_SIZE(amdgpu_pm_info_list
));