2 * Copyright 2017 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 * Authors: Rafał Miłecki <zajec5@gmail.com>
23 * Alex Deucher <alexdeucher@gmail.com>
27 #include "amdgpu_drv.h"
28 #include "amdgpu_pm.h"
29 #include "amdgpu_dpm.h"
31 #include <linux/power_supply.h>
32 #include <linux/hwmon.h>
33 #include <linux/hwmon-sysfs.h>
35 #include "amd_powerplay.h"
37 static int amdgpu_debugfs_pm_init(struct amdgpu_device
*adev
);
39 static const struct cg_flag_name clocks
[] = {
40 {AMD_CG_SUPPORT_GFX_MGCG
, "Graphics Medium Grain Clock Gating"},
41 {AMD_CG_SUPPORT_GFX_MGLS
, "Graphics Medium Grain memory Light Sleep"},
42 {AMD_CG_SUPPORT_GFX_CGCG
, "Graphics Coarse Grain Clock Gating"},
43 {AMD_CG_SUPPORT_GFX_CGLS
, "Graphics Coarse Grain memory Light Sleep"},
44 {AMD_CG_SUPPORT_GFX_CGTS
, "Graphics Coarse Grain Tree Shader Clock Gating"},
45 {AMD_CG_SUPPORT_GFX_CGTS_LS
, "Graphics Coarse Grain Tree Shader Light Sleep"},
46 {AMD_CG_SUPPORT_GFX_CP_LS
, "Graphics Command Processor Light Sleep"},
47 {AMD_CG_SUPPORT_GFX_RLC_LS
, "Graphics Run List Controller Light Sleep"},
48 {AMD_CG_SUPPORT_GFX_3D_CGCG
, "Graphics 3D Coarse Grain Clock Gating"},
49 {AMD_CG_SUPPORT_GFX_3D_CGLS
, "Graphics 3D Coarse Grain memory Light Sleep"},
50 {AMD_CG_SUPPORT_MC_LS
, "Memory Controller Light Sleep"},
51 {AMD_CG_SUPPORT_MC_MGCG
, "Memory Controller Medium Grain Clock Gating"},
52 {AMD_CG_SUPPORT_SDMA_LS
, "System Direct Memory Access Light Sleep"},
53 {AMD_CG_SUPPORT_SDMA_MGCG
, "System Direct Memory Access Medium Grain Clock Gating"},
54 {AMD_CG_SUPPORT_BIF_MGCG
, "Bus Interface Medium Grain Clock Gating"},
55 {AMD_CG_SUPPORT_BIF_LS
, "Bus Interface Light Sleep"},
56 {AMD_CG_SUPPORT_UVD_MGCG
, "Unified Video Decoder Medium Grain Clock Gating"},
57 {AMD_CG_SUPPORT_VCE_MGCG
, "Video Compression Engine Medium Grain Clock Gating"},
58 {AMD_CG_SUPPORT_HDP_LS
, "Host Data Path Light Sleep"},
59 {AMD_CG_SUPPORT_HDP_MGCG
, "Host Data Path Medium Grain Clock Gating"},
60 {AMD_CG_SUPPORT_DRM_MGCG
, "Digital Right Management Medium Grain Clock Gating"},
61 {AMD_CG_SUPPORT_DRM_LS
, "Digital Right Management Light Sleep"},
62 {AMD_CG_SUPPORT_ROM_MGCG
, "Rom Medium Grain Clock Gating"},
63 {AMD_CG_SUPPORT_DF_MGCG
, "Data Fabric Medium Grain Clock Gating"},
67 void amdgpu_pm_acpi_event_handler(struct amdgpu_device
*adev
)
69 if (adev
->pm
.dpm_enabled
) {
70 mutex_lock(&adev
->pm
.mutex
);
71 if (power_supply_is_system_supplied() > 0)
72 adev
->pm
.dpm
.ac_power
= true;
74 adev
->pm
.dpm
.ac_power
= false;
75 if (adev
->powerplay
.pp_funcs
->enable_bapm
)
76 amdgpu_dpm_enable_bapm(adev
, adev
->pm
.dpm
.ac_power
);
77 mutex_unlock(&adev
->pm
.mutex
);
81 static ssize_t
amdgpu_get_dpm_state(struct device
*dev
,
82 struct device_attribute
*attr
,
85 struct drm_device
*ddev
= dev_get_drvdata(dev
);
86 struct amdgpu_device
*adev
= ddev
->dev_private
;
87 enum amd_pm_state_type pm
;
89 if (adev
->powerplay
.pp_funcs
->get_current_power_state
)
90 pm
= amdgpu_dpm_get_current_power_state(adev
);
92 pm
= adev
->pm
.dpm
.user_state
;
94 return snprintf(buf
, PAGE_SIZE
, "%s\n",
95 (pm
== POWER_STATE_TYPE_BATTERY
) ? "battery" :
96 (pm
== POWER_STATE_TYPE_BALANCED
) ? "balanced" : "performance");
99 static ssize_t
amdgpu_set_dpm_state(struct device
*dev
,
100 struct device_attribute
*attr
,
104 struct drm_device
*ddev
= dev_get_drvdata(dev
);
105 struct amdgpu_device
*adev
= ddev
->dev_private
;
106 enum amd_pm_state_type state
;
108 if (strncmp("battery", buf
, strlen("battery")) == 0)
109 state
= POWER_STATE_TYPE_BATTERY
;
110 else if (strncmp("balanced", buf
, strlen("balanced")) == 0)
111 state
= POWER_STATE_TYPE_BALANCED
;
112 else if (strncmp("performance", buf
, strlen("performance")) == 0)
113 state
= POWER_STATE_TYPE_PERFORMANCE
;
119 if (adev
->powerplay
.pp_funcs
->dispatch_tasks
) {
120 amdgpu_dpm_dispatch_task(adev
, AMD_PP_TASK_ENABLE_USER_STATE
, &state
, NULL
);
122 mutex_lock(&adev
->pm
.mutex
);
123 adev
->pm
.dpm
.user_state
= state
;
124 mutex_unlock(&adev
->pm
.mutex
);
126 /* Can't set dpm state when the card is off */
127 if (!(adev
->flags
& AMD_IS_PX
) ||
128 (ddev
->switch_power_state
== DRM_SWITCH_POWER_ON
))
129 amdgpu_pm_compute_clocks(adev
);
135 static ssize_t
amdgpu_get_dpm_forced_performance_level(struct device
*dev
,
136 struct device_attribute
*attr
,
139 struct drm_device
*ddev
= dev_get_drvdata(dev
);
140 struct amdgpu_device
*adev
= ddev
->dev_private
;
141 enum amd_dpm_forced_level level
= 0xff;
143 if ((adev
->flags
& AMD_IS_PX
) &&
144 (ddev
->switch_power_state
!= DRM_SWITCH_POWER_ON
))
145 return snprintf(buf
, PAGE_SIZE
, "off\n");
147 if (adev
->powerplay
.pp_funcs
->get_performance_level
)
148 level
= amdgpu_dpm_get_performance_level(adev
);
150 level
= adev
->pm
.dpm
.forced_level
;
152 return snprintf(buf
, PAGE_SIZE
, "%s\n",
153 (level
== AMD_DPM_FORCED_LEVEL_AUTO
) ? "auto" :
154 (level
== AMD_DPM_FORCED_LEVEL_LOW
) ? "low" :
155 (level
== AMD_DPM_FORCED_LEVEL_HIGH
) ? "high" :
156 (level
== AMD_DPM_FORCED_LEVEL_MANUAL
) ? "manual" :
157 (level
== AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD
) ? "profile_standard" :
158 (level
== AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK
) ? "profile_min_sclk" :
159 (level
== AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK
) ? "profile_min_mclk" :
160 (level
== AMD_DPM_FORCED_LEVEL_PROFILE_PEAK
) ? "profile_peak" :
164 static ssize_t
amdgpu_set_dpm_forced_performance_level(struct device
*dev
,
165 struct device_attribute
*attr
,
169 struct drm_device
*ddev
= dev_get_drvdata(dev
);
170 struct amdgpu_device
*adev
= ddev
->dev_private
;
171 enum amd_dpm_forced_level level
;
172 enum amd_dpm_forced_level current_level
= 0xff;
175 /* Can't force performance level when the card is off */
176 if ((adev
->flags
& AMD_IS_PX
) &&
177 (ddev
->switch_power_state
!= DRM_SWITCH_POWER_ON
))
180 if (adev
->powerplay
.pp_funcs
->get_performance_level
)
181 current_level
= amdgpu_dpm_get_performance_level(adev
);
183 if (strncmp("low", buf
, strlen("low")) == 0) {
184 level
= AMD_DPM_FORCED_LEVEL_LOW
;
185 } else if (strncmp("high", buf
, strlen("high")) == 0) {
186 level
= AMD_DPM_FORCED_LEVEL_HIGH
;
187 } else if (strncmp("auto", buf
, strlen("auto")) == 0) {
188 level
= AMD_DPM_FORCED_LEVEL_AUTO
;
189 } else if (strncmp("manual", buf
, strlen("manual")) == 0) {
190 level
= AMD_DPM_FORCED_LEVEL_MANUAL
;
191 } else if (strncmp("profile_exit", buf
, strlen("profile_exit")) == 0) {
192 level
= AMD_DPM_FORCED_LEVEL_PROFILE_EXIT
;
193 } else if (strncmp("profile_standard", buf
, strlen("profile_standard")) == 0) {
194 level
= AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD
;
195 } else if (strncmp("profile_min_sclk", buf
, strlen("profile_min_sclk")) == 0) {
196 level
= AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK
;
197 } else if (strncmp("profile_min_mclk", buf
, strlen("profile_min_mclk")) == 0) {
198 level
= AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK
;
199 } else if (strncmp("profile_peak", buf
, strlen("profile_peak")) == 0) {
200 level
= AMD_DPM_FORCED_LEVEL_PROFILE_PEAK
;
206 if (current_level
== level
)
209 if (adev
->powerplay
.pp_funcs
->force_performance_level
) {
210 mutex_lock(&adev
->pm
.mutex
);
211 if (adev
->pm
.dpm
.thermal_active
) {
213 mutex_unlock(&adev
->pm
.mutex
);
216 ret
= amdgpu_dpm_force_performance_level(adev
, level
);
220 adev
->pm
.dpm
.forced_level
= level
;
221 mutex_unlock(&adev
->pm
.mutex
);
228 static ssize_t
amdgpu_get_pp_num_states(struct device
*dev
,
229 struct device_attribute
*attr
,
232 struct drm_device
*ddev
= dev_get_drvdata(dev
);
233 struct amdgpu_device
*adev
= ddev
->dev_private
;
234 struct pp_states_info data
;
237 if (adev
->powerplay
.pp_funcs
->get_pp_num_states
)
238 amdgpu_dpm_get_pp_num_states(adev
, &data
);
240 buf_len
= snprintf(buf
, PAGE_SIZE
, "states: %d\n", data
.nums
);
241 for (i
= 0; i
< data
.nums
; i
++)
242 buf_len
+= snprintf(buf
+ buf_len
, PAGE_SIZE
, "%d %s\n", i
,
243 (data
.states
[i
] == POWER_STATE_TYPE_INTERNAL_BOOT
) ? "boot" :
244 (data
.states
[i
] == POWER_STATE_TYPE_BATTERY
) ? "battery" :
245 (data
.states
[i
] == POWER_STATE_TYPE_BALANCED
) ? "balanced" :
246 (data
.states
[i
] == POWER_STATE_TYPE_PERFORMANCE
) ? "performance" : "default");
251 static ssize_t
amdgpu_get_pp_cur_state(struct device
*dev
,
252 struct device_attribute
*attr
,
255 struct drm_device
*ddev
= dev_get_drvdata(dev
);
256 struct amdgpu_device
*adev
= ddev
->dev_private
;
257 struct pp_states_info data
;
258 enum amd_pm_state_type pm
= 0;
261 if (adev
->powerplay
.pp_funcs
->get_current_power_state
262 && adev
->powerplay
.pp_funcs
->get_pp_num_states
) {
263 pm
= amdgpu_dpm_get_current_power_state(adev
);
264 amdgpu_dpm_get_pp_num_states(adev
, &data
);
266 for (i
= 0; i
< data
.nums
; i
++) {
267 if (pm
== data
.states
[i
])
275 return snprintf(buf
, PAGE_SIZE
, "%d\n", i
);
278 static ssize_t
amdgpu_get_pp_force_state(struct device
*dev
,
279 struct device_attribute
*attr
,
282 struct drm_device
*ddev
= dev_get_drvdata(dev
);
283 struct amdgpu_device
*adev
= ddev
->dev_private
;
285 if (adev
->pp_force_state_enabled
)
286 return amdgpu_get_pp_cur_state(dev
, attr
, buf
);
288 return snprintf(buf
, PAGE_SIZE
, "\n");
291 static ssize_t
amdgpu_set_pp_force_state(struct device
*dev
,
292 struct device_attribute
*attr
,
296 struct drm_device
*ddev
= dev_get_drvdata(dev
);
297 struct amdgpu_device
*adev
= ddev
->dev_private
;
298 enum amd_pm_state_type state
= 0;
302 if (strlen(buf
) == 1)
303 adev
->pp_force_state_enabled
= false;
304 else if (adev
->powerplay
.pp_funcs
->dispatch_tasks
&&
305 adev
->powerplay
.pp_funcs
->get_pp_num_states
) {
306 struct pp_states_info data
;
308 ret
= kstrtoul(buf
, 0, &idx
);
309 if (ret
|| idx
>= ARRAY_SIZE(data
.states
)) {
314 amdgpu_dpm_get_pp_num_states(adev
, &data
);
315 state
= data
.states
[idx
];
316 /* only set user selected power states */
317 if (state
!= POWER_STATE_TYPE_INTERNAL_BOOT
&&
318 state
!= POWER_STATE_TYPE_DEFAULT
) {
319 amdgpu_dpm_dispatch_task(adev
,
320 AMD_PP_TASK_ENABLE_USER_STATE
, &state
, NULL
);
321 adev
->pp_force_state_enabled
= true;
328 static ssize_t
amdgpu_get_pp_table(struct device
*dev
,
329 struct device_attribute
*attr
,
332 struct drm_device
*ddev
= dev_get_drvdata(dev
);
333 struct amdgpu_device
*adev
= ddev
->dev_private
;
337 if (adev
->powerplay
.pp_funcs
->get_pp_table
)
338 size
= amdgpu_dpm_get_pp_table(adev
, &table
);
342 if (size
>= PAGE_SIZE
)
343 size
= PAGE_SIZE
- 1;
345 memcpy(buf
, table
, size
);
350 static ssize_t
amdgpu_set_pp_table(struct device
*dev
,
351 struct device_attribute
*attr
,
355 struct drm_device
*ddev
= dev_get_drvdata(dev
);
356 struct amdgpu_device
*adev
= ddev
->dev_private
;
358 if (adev
->powerplay
.pp_funcs
->set_pp_table
)
359 amdgpu_dpm_set_pp_table(adev
, buf
, count
);
364 static ssize_t
amdgpu_get_pp_dpm_sclk(struct device
*dev
,
365 struct device_attribute
*attr
,
368 struct drm_device
*ddev
= dev_get_drvdata(dev
);
369 struct amdgpu_device
*adev
= ddev
->dev_private
;
371 if (adev
->powerplay
.pp_funcs
->print_clock_levels
)
372 return amdgpu_dpm_print_clock_levels(adev
, PP_SCLK
, buf
);
374 return snprintf(buf
, PAGE_SIZE
, "\n");
377 static ssize_t
amdgpu_set_pp_dpm_sclk(struct device
*dev
,
378 struct device_attribute
*attr
,
382 struct drm_device
*ddev
= dev_get_drvdata(dev
);
383 struct amdgpu_device
*adev
= ddev
->dev_private
;
386 uint32_t i
, mask
= 0;
389 for (i
= 0; i
< strlen(buf
); i
++) {
390 if (*(buf
+ i
) == '\n')
392 sub_str
[0] = *(buf
+ i
);
394 ret
= kstrtol(sub_str
, 0, &level
);
403 if (adev
->powerplay
.pp_funcs
->force_clock_level
)
404 amdgpu_dpm_force_clock_level(adev
, PP_SCLK
, mask
);
410 static ssize_t
amdgpu_get_pp_dpm_mclk(struct device
*dev
,
411 struct device_attribute
*attr
,
414 struct drm_device
*ddev
= dev_get_drvdata(dev
);
415 struct amdgpu_device
*adev
= ddev
->dev_private
;
417 if (adev
->powerplay
.pp_funcs
->print_clock_levels
)
418 return amdgpu_dpm_print_clock_levels(adev
, PP_MCLK
, buf
);
420 return snprintf(buf
, PAGE_SIZE
, "\n");
423 static ssize_t
amdgpu_set_pp_dpm_mclk(struct device
*dev
,
424 struct device_attribute
*attr
,
428 struct drm_device
*ddev
= dev_get_drvdata(dev
);
429 struct amdgpu_device
*adev
= ddev
->dev_private
;
432 uint32_t i
, mask
= 0;
435 for (i
= 0; i
< strlen(buf
); i
++) {
436 if (*(buf
+ i
) == '\n')
438 sub_str
[0] = *(buf
+ i
);
440 ret
= kstrtol(sub_str
, 0, &level
);
448 if (adev
->powerplay
.pp_funcs
->force_clock_level
)
449 amdgpu_dpm_force_clock_level(adev
, PP_MCLK
, mask
);
455 static ssize_t
amdgpu_get_pp_dpm_pcie(struct device
*dev
,
456 struct device_attribute
*attr
,
459 struct drm_device
*ddev
= dev_get_drvdata(dev
);
460 struct amdgpu_device
*adev
= ddev
->dev_private
;
462 if (adev
->powerplay
.pp_funcs
->print_clock_levels
)
463 return amdgpu_dpm_print_clock_levels(adev
, PP_PCIE
, buf
);
465 return snprintf(buf
, PAGE_SIZE
, "\n");
468 static ssize_t
amdgpu_set_pp_dpm_pcie(struct device
*dev
,
469 struct device_attribute
*attr
,
473 struct drm_device
*ddev
= dev_get_drvdata(dev
);
474 struct amdgpu_device
*adev
= ddev
->dev_private
;
477 uint32_t i
, mask
= 0;
480 for (i
= 0; i
< strlen(buf
); i
++) {
481 if (*(buf
+ i
) == '\n')
483 sub_str
[0] = *(buf
+ i
);
485 ret
= kstrtol(sub_str
, 0, &level
);
493 if (adev
->powerplay
.pp_funcs
->force_clock_level
)
494 amdgpu_dpm_force_clock_level(adev
, PP_PCIE
, mask
);
500 static ssize_t
amdgpu_get_pp_sclk_od(struct device
*dev
,
501 struct device_attribute
*attr
,
504 struct drm_device
*ddev
= dev_get_drvdata(dev
);
505 struct amdgpu_device
*adev
= ddev
->dev_private
;
508 if (adev
->powerplay
.pp_funcs
->get_sclk_od
)
509 value
= amdgpu_dpm_get_sclk_od(adev
);
511 return snprintf(buf
, PAGE_SIZE
, "%d\n", value
);
514 static ssize_t
amdgpu_set_pp_sclk_od(struct device
*dev
,
515 struct device_attribute
*attr
,
519 struct drm_device
*ddev
= dev_get_drvdata(dev
);
520 struct amdgpu_device
*adev
= ddev
->dev_private
;
524 ret
= kstrtol(buf
, 0, &value
);
530 if (adev
->powerplay
.pp_funcs
->set_sclk_od
)
531 amdgpu_dpm_set_sclk_od(adev
, (uint32_t)value
);
533 if (adev
->powerplay
.pp_funcs
->dispatch_tasks
) {
534 amdgpu_dpm_dispatch_task(adev
, AMD_PP_TASK_READJUST_POWER_STATE
, NULL
, NULL
);
536 adev
->pm
.dpm
.current_ps
= adev
->pm
.dpm
.boot_ps
;
537 amdgpu_pm_compute_clocks(adev
);
544 static ssize_t
amdgpu_get_pp_mclk_od(struct device
*dev
,
545 struct device_attribute
*attr
,
548 struct drm_device
*ddev
= dev_get_drvdata(dev
);
549 struct amdgpu_device
*adev
= ddev
->dev_private
;
552 if (adev
->powerplay
.pp_funcs
->get_mclk_od
)
553 value
= amdgpu_dpm_get_mclk_od(adev
);
555 return snprintf(buf
, PAGE_SIZE
, "%d\n", value
);
558 static ssize_t
amdgpu_set_pp_mclk_od(struct device
*dev
,
559 struct device_attribute
*attr
,
563 struct drm_device
*ddev
= dev_get_drvdata(dev
);
564 struct amdgpu_device
*adev
= ddev
->dev_private
;
568 ret
= kstrtol(buf
, 0, &value
);
574 if (adev
->powerplay
.pp_funcs
->set_mclk_od
)
575 amdgpu_dpm_set_mclk_od(adev
, (uint32_t)value
);
577 if (adev
->powerplay
.pp_funcs
->dispatch_tasks
) {
578 amdgpu_dpm_dispatch_task(adev
, AMD_PP_TASK_READJUST_POWER_STATE
, NULL
, NULL
);
580 adev
->pm
.dpm
.current_ps
= adev
->pm
.dpm
.boot_ps
;
581 amdgpu_pm_compute_clocks(adev
);
588 static ssize_t
amdgpu_get_pp_power_profile(struct device
*dev
,
589 char *buf
, struct amd_pp_profile
*query
)
591 struct drm_device
*ddev
= dev_get_drvdata(dev
);
592 struct amdgpu_device
*adev
= ddev
->dev_private
;
595 if (adev
->powerplay
.pp_funcs
->get_power_profile_state
)
596 ret
= amdgpu_dpm_get_power_profile_state(
602 return snprintf(buf
, PAGE_SIZE
,
604 query
->min_sclk
/ 100,
605 query
->min_mclk
/ 100,
606 query
->activity_threshold
,
611 static ssize_t
amdgpu_get_pp_gfx_power_profile(struct device
*dev
,
612 struct device_attribute
*attr
,
615 struct amd_pp_profile query
= {0};
617 query
.type
= AMD_PP_GFX_PROFILE
;
619 return amdgpu_get_pp_power_profile(dev
, buf
, &query
);
622 static ssize_t
amdgpu_get_pp_compute_power_profile(struct device
*dev
,
623 struct device_attribute
*attr
,
626 struct amd_pp_profile query
= {0};
628 query
.type
= AMD_PP_COMPUTE_PROFILE
;
630 return amdgpu_get_pp_power_profile(dev
, buf
, &query
);
633 static ssize_t
amdgpu_set_pp_power_profile(struct device
*dev
,
636 struct amd_pp_profile
*request
)
638 struct drm_device
*ddev
= dev_get_drvdata(dev
);
639 struct amdgpu_device
*adev
= ddev
->dev_private
;
641 char *sub_str
, buf_cpy
[128], *tmp_str
;
642 const char delimiter
[3] = {' ', '\n', '\0'};
646 if (strncmp("reset", buf
, strlen("reset")) == 0) {
647 if (adev
->powerplay
.pp_funcs
->reset_power_profile_state
)
648 ret
= amdgpu_dpm_reset_power_profile_state(
657 if (strncmp("set", buf
, strlen("set")) == 0) {
658 if (adev
->powerplay
.pp_funcs
->set_power_profile_state
)
659 ret
= amdgpu_dpm_set_power_profile_state(
669 if (count
+ 1 >= 128) {
674 memcpy(buf_cpy
, buf
, count
+ 1);
678 sub_str
= strsep(&tmp_str
, delimiter
);
679 ret
= kstrtol(sub_str
, 0, &value
);
687 /* input unit MHz convert to dpm table unit 10KHz*/
688 request
->min_sclk
= (uint32_t)value
* 100;
691 /* input unit MHz convert to dpm table unit 10KHz*/
692 request
->min_mclk
= (uint32_t)value
* 100;
695 request
->activity_threshold
= (uint16_t)value
;
698 request
->up_hyst
= (uint8_t)value
;
701 request
->down_hyst
= (uint8_t)value
;
709 if (adev
->powerplay
.pp_funcs
->set_power_profile_state
)
710 ret
= amdgpu_dpm_set_power_profile_state(adev
, request
);
719 static ssize_t
amdgpu_set_pp_gfx_power_profile(struct device
*dev
,
720 struct device_attribute
*attr
,
724 struct amd_pp_profile request
= {0};
726 request
.type
= AMD_PP_GFX_PROFILE
;
728 return amdgpu_set_pp_power_profile(dev
, buf
, count
, &request
);
731 static ssize_t
amdgpu_set_pp_compute_power_profile(struct device
*dev
,
732 struct device_attribute
*attr
,
736 struct amd_pp_profile request
= {0};
738 request
.type
= AMD_PP_COMPUTE_PROFILE
;
740 return amdgpu_set_pp_power_profile(dev
, buf
, count
, &request
);
743 static DEVICE_ATTR(power_dpm_state
, S_IRUGO
| S_IWUSR
, amdgpu_get_dpm_state
, amdgpu_set_dpm_state
);
744 static DEVICE_ATTR(power_dpm_force_performance_level
, S_IRUGO
| S_IWUSR
,
745 amdgpu_get_dpm_forced_performance_level
,
746 amdgpu_set_dpm_forced_performance_level
);
747 static DEVICE_ATTR(pp_num_states
, S_IRUGO
, amdgpu_get_pp_num_states
, NULL
);
748 static DEVICE_ATTR(pp_cur_state
, S_IRUGO
, amdgpu_get_pp_cur_state
, NULL
);
749 static DEVICE_ATTR(pp_force_state
, S_IRUGO
| S_IWUSR
,
750 amdgpu_get_pp_force_state
,
751 amdgpu_set_pp_force_state
);
752 static DEVICE_ATTR(pp_table
, S_IRUGO
| S_IWUSR
,
754 amdgpu_set_pp_table
);
755 static DEVICE_ATTR(pp_dpm_sclk
, S_IRUGO
| S_IWUSR
,
756 amdgpu_get_pp_dpm_sclk
,
757 amdgpu_set_pp_dpm_sclk
);
758 static DEVICE_ATTR(pp_dpm_mclk
, S_IRUGO
| S_IWUSR
,
759 amdgpu_get_pp_dpm_mclk
,
760 amdgpu_set_pp_dpm_mclk
);
761 static DEVICE_ATTR(pp_dpm_pcie
, S_IRUGO
| S_IWUSR
,
762 amdgpu_get_pp_dpm_pcie
,
763 amdgpu_set_pp_dpm_pcie
);
764 static DEVICE_ATTR(pp_sclk_od
, S_IRUGO
| S_IWUSR
,
765 amdgpu_get_pp_sclk_od
,
766 amdgpu_set_pp_sclk_od
);
767 static DEVICE_ATTR(pp_mclk_od
, S_IRUGO
| S_IWUSR
,
768 amdgpu_get_pp_mclk_od
,
769 amdgpu_set_pp_mclk_od
);
770 static DEVICE_ATTR(pp_gfx_power_profile
, S_IRUGO
| S_IWUSR
,
771 amdgpu_get_pp_gfx_power_profile
,
772 amdgpu_set_pp_gfx_power_profile
);
773 static DEVICE_ATTR(pp_compute_power_profile
, S_IRUGO
| S_IWUSR
,
774 amdgpu_get_pp_compute_power_profile
,
775 amdgpu_set_pp_compute_power_profile
);
777 static ssize_t
amdgpu_hwmon_show_temp(struct device
*dev
,
778 struct device_attribute
*attr
,
781 struct amdgpu_device
*adev
= dev_get_drvdata(dev
);
782 struct drm_device
*ddev
= adev
->ddev
;
785 /* Can't get temperature when the card is off */
786 if ((adev
->flags
& AMD_IS_PX
) &&
787 (ddev
->switch_power_state
!= DRM_SWITCH_POWER_ON
))
790 if (!adev
->powerplay
.pp_funcs
->get_temperature
)
793 temp
= amdgpu_dpm_get_temperature(adev
);
795 return snprintf(buf
, PAGE_SIZE
, "%d\n", temp
);
798 static ssize_t
amdgpu_hwmon_show_temp_thresh(struct device
*dev
,
799 struct device_attribute
*attr
,
802 struct amdgpu_device
*adev
= dev_get_drvdata(dev
);
803 int hyst
= to_sensor_dev_attr(attr
)->index
;
807 temp
= adev
->pm
.dpm
.thermal
.min_temp
;
809 temp
= adev
->pm
.dpm
.thermal
.max_temp
;
811 return snprintf(buf
, PAGE_SIZE
, "%d\n", temp
);
814 static ssize_t
amdgpu_hwmon_get_pwm1_enable(struct device
*dev
,
815 struct device_attribute
*attr
,
818 struct amdgpu_device
*adev
= dev_get_drvdata(dev
);
821 if (!adev
->powerplay
.pp_funcs
->get_fan_control_mode
)
824 pwm_mode
= amdgpu_dpm_get_fan_control_mode(adev
);
826 return sprintf(buf
, "%i\n", pwm_mode
);
829 static ssize_t
amdgpu_hwmon_set_pwm1_enable(struct device
*dev
,
830 struct device_attribute
*attr
,
834 struct amdgpu_device
*adev
= dev_get_drvdata(dev
);
838 if (!adev
->powerplay
.pp_funcs
->set_fan_control_mode
)
841 err
= kstrtoint(buf
, 10, &value
);
845 amdgpu_dpm_set_fan_control_mode(adev
, value
);
850 static ssize_t
amdgpu_hwmon_get_pwm1_min(struct device
*dev
,
851 struct device_attribute
*attr
,
854 return sprintf(buf
, "%i\n", 0);
857 static ssize_t
amdgpu_hwmon_get_pwm1_max(struct device
*dev
,
858 struct device_attribute
*attr
,
861 return sprintf(buf
, "%i\n", 255);
864 static ssize_t
amdgpu_hwmon_set_pwm1(struct device
*dev
,
865 struct device_attribute
*attr
,
866 const char *buf
, size_t count
)
868 struct amdgpu_device
*adev
= dev_get_drvdata(dev
);
872 err
= kstrtou32(buf
, 10, &value
);
876 value
= (value
* 100) / 255;
878 if (adev
->powerplay
.pp_funcs
->set_fan_speed_percent
) {
879 err
= amdgpu_dpm_set_fan_speed_percent(adev
, value
);
887 static ssize_t
amdgpu_hwmon_get_pwm1(struct device
*dev
,
888 struct device_attribute
*attr
,
891 struct amdgpu_device
*adev
= dev_get_drvdata(dev
);
895 if (adev
->powerplay
.pp_funcs
->get_fan_speed_percent
) {
896 err
= amdgpu_dpm_get_fan_speed_percent(adev
, &speed
);
901 speed
= (speed
* 255) / 100;
903 return sprintf(buf
, "%i\n", speed
);
906 static ssize_t
amdgpu_hwmon_get_fan1_input(struct device
*dev
,
907 struct device_attribute
*attr
,
910 struct amdgpu_device
*adev
= dev_get_drvdata(dev
);
914 if (adev
->powerplay
.pp_funcs
->get_fan_speed_rpm
) {
915 err
= amdgpu_dpm_get_fan_speed_rpm(adev
, &speed
);
920 return sprintf(buf
, "%i\n", speed
);
923 static SENSOR_DEVICE_ATTR(temp1_input
, S_IRUGO
, amdgpu_hwmon_show_temp
, NULL
, 0);
924 static SENSOR_DEVICE_ATTR(temp1_crit
, S_IRUGO
, amdgpu_hwmon_show_temp_thresh
, NULL
, 0);
925 static SENSOR_DEVICE_ATTR(temp1_crit_hyst
, S_IRUGO
, amdgpu_hwmon_show_temp_thresh
, NULL
, 1);
926 static SENSOR_DEVICE_ATTR(pwm1
, S_IRUGO
| S_IWUSR
, amdgpu_hwmon_get_pwm1
, amdgpu_hwmon_set_pwm1
, 0);
927 static SENSOR_DEVICE_ATTR(pwm1_enable
, S_IRUGO
| S_IWUSR
, amdgpu_hwmon_get_pwm1_enable
, amdgpu_hwmon_set_pwm1_enable
, 0);
928 static SENSOR_DEVICE_ATTR(pwm1_min
, S_IRUGO
, amdgpu_hwmon_get_pwm1_min
, NULL
, 0);
929 static SENSOR_DEVICE_ATTR(pwm1_max
, S_IRUGO
, amdgpu_hwmon_get_pwm1_max
, NULL
, 0);
930 static SENSOR_DEVICE_ATTR(fan1_input
, S_IRUGO
, amdgpu_hwmon_get_fan1_input
, NULL
, 0);
932 static struct attribute
*hwmon_attributes
[] = {
933 &sensor_dev_attr_temp1_input
.dev_attr
.attr
,
934 &sensor_dev_attr_temp1_crit
.dev_attr
.attr
,
935 &sensor_dev_attr_temp1_crit_hyst
.dev_attr
.attr
,
936 &sensor_dev_attr_pwm1
.dev_attr
.attr
,
937 &sensor_dev_attr_pwm1_enable
.dev_attr
.attr
,
938 &sensor_dev_attr_pwm1_min
.dev_attr
.attr
,
939 &sensor_dev_attr_pwm1_max
.dev_attr
.attr
,
940 &sensor_dev_attr_fan1_input
.dev_attr
.attr
,
944 static umode_t
hwmon_attributes_visible(struct kobject
*kobj
,
945 struct attribute
*attr
, int index
)
947 struct device
*dev
= kobj_to_dev(kobj
);
948 struct amdgpu_device
*adev
= dev_get_drvdata(dev
);
949 umode_t effective_mode
= attr
->mode
;
951 /* no skipping for powerplay */
952 if (adev
->powerplay
.cgs_device
)
953 return effective_mode
;
955 /* Skip limit attributes if DPM is not enabled */
956 if (!adev
->pm
.dpm_enabled
&&
957 (attr
== &sensor_dev_attr_temp1_crit
.dev_attr
.attr
||
958 attr
== &sensor_dev_attr_temp1_crit_hyst
.dev_attr
.attr
||
959 attr
== &sensor_dev_attr_pwm1
.dev_attr
.attr
||
960 attr
== &sensor_dev_attr_pwm1_enable
.dev_attr
.attr
||
961 attr
== &sensor_dev_attr_pwm1_max
.dev_attr
.attr
||
962 attr
== &sensor_dev_attr_pwm1_min
.dev_attr
.attr
))
965 /* Skip fan attributes if fan is not present */
966 if (adev
->pm
.no_fan
&&
967 (attr
== &sensor_dev_attr_pwm1
.dev_attr
.attr
||
968 attr
== &sensor_dev_attr_pwm1_enable
.dev_attr
.attr
||
969 attr
== &sensor_dev_attr_pwm1_max
.dev_attr
.attr
||
970 attr
== &sensor_dev_attr_pwm1_min
.dev_attr
.attr
))
973 /* mask fan attributes if we have no bindings for this asic to expose */
974 if ((!adev
->powerplay
.pp_funcs
->get_fan_speed_percent
&&
975 attr
== &sensor_dev_attr_pwm1
.dev_attr
.attr
) || /* can't query fan */
976 (!adev
->powerplay
.pp_funcs
->get_fan_control_mode
&&
977 attr
== &sensor_dev_attr_pwm1_enable
.dev_attr
.attr
)) /* can't query state */
978 effective_mode
&= ~S_IRUGO
;
980 if ((!adev
->powerplay
.pp_funcs
->set_fan_speed_percent
&&
981 attr
== &sensor_dev_attr_pwm1
.dev_attr
.attr
) || /* can't manage fan */
982 (!adev
->powerplay
.pp_funcs
->set_fan_control_mode
&&
983 attr
== &sensor_dev_attr_pwm1_enable
.dev_attr
.attr
)) /* can't manage state */
984 effective_mode
&= ~S_IWUSR
;
986 /* hide max/min values if we can't both query and manage the fan */
987 if ((!adev
->powerplay
.pp_funcs
->set_fan_speed_percent
&&
988 !adev
->powerplay
.pp_funcs
->get_fan_speed_percent
) &&
989 (attr
== &sensor_dev_attr_pwm1_max
.dev_attr
.attr
||
990 attr
== &sensor_dev_attr_pwm1_min
.dev_attr
.attr
))
993 /* requires powerplay */
994 if (attr
== &sensor_dev_attr_fan1_input
.dev_attr
.attr
)
997 return effective_mode
;
1000 static const struct attribute_group hwmon_attrgroup
= {
1001 .attrs
= hwmon_attributes
,
1002 .is_visible
= hwmon_attributes_visible
,
1005 static const struct attribute_group
*hwmon_groups
[] = {
1010 void amdgpu_dpm_thermal_work_handler(struct work_struct
*work
)
1012 struct amdgpu_device
*adev
=
1013 container_of(work
, struct amdgpu_device
,
1014 pm
.dpm
.thermal
.work
);
1015 /* switch to the thermal state */
1016 enum amd_pm_state_type dpm_state
= POWER_STATE_TYPE_INTERNAL_THERMAL
;
1018 if (!adev
->pm
.dpm_enabled
)
1021 if (adev
->powerplay
.pp_funcs
->get_temperature
) {
1022 int temp
= amdgpu_dpm_get_temperature(adev
);
1024 if (temp
< adev
->pm
.dpm
.thermal
.min_temp
)
1025 /* switch back the user state */
1026 dpm_state
= adev
->pm
.dpm
.user_state
;
1028 if (adev
->pm
.dpm
.thermal
.high_to_low
)
1029 /* switch back the user state */
1030 dpm_state
= adev
->pm
.dpm
.user_state
;
1032 mutex_lock(&adev
->pm
.mutex
);
1033 if (dpm_state
== POWER_STATE_TYPE_INTERNAL_THERMAL
)
1034 adev
->pm
.dpm
.thermal_active
= true;
1036 adev
->pm
.dpm
.thermal_active
= false;
1037 adev
->pm
.dpm
.state
= dpm_state
;
1038 mutex_unlock(&adev
->pm
.mutex
);
1040 amdgpu_pm_compute_clocks(adev
);
1043 static struct amdgpu_ps
*amdgpu_dpm_pick_power_state(struct amdgpu_device
*adev
,
1044 enum amd_pm_state_type dpm_state
)
1047 struct amdgpu_ps
*ps
;
1049 bool single_display
= (adev
->pm
.dpm
.new_active_crtc_count
< 2) ?
1052 /* check if the vblank period is too short to adjust the mclk */
1053 if (single_display
&& adev
->powerplay
.pp_funcs
->vblank_too_short
) {
1054 if (amdgpu_dpm_vblank_too_short(adev
))
1055 single_display
= false;
1058 /* certain older asics have a separare 3D performance state,
1059 * so try that first if the user selected performance
1061 if (dpm_state
== POWER_STATE_TYPE_PERFORMANCE
)
1062 dpm_state
= POWER_STATE_TYPE_INTERNAL_3DPERF
;
1063 /* balanced states don't exist at the moment */
1064 if (dpm_state
== POWER_STATE_TYPE_BALANCED
)
1065 dpm_state
= POWER_STATE_TYPE_PERFORMANCE
;
1068 /* Pick the best power state based on current conditions */
1069 for (i
= 0; i
< adev
->pm
.dpm
.num_ps
; i
++) {
1070 ps
= &adev
->pm
.dpm
.ps
[i
];
1071 ui_class
= ps
->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK
;
1072 switch (dpm_state
) {
1074 case POWER_STATE_TYPE_BATTERY
:
1075 if (ui_class
== ATOM_PPLIB_CLASSIFICATION_UI_BATTERY
) {
1076 if (ps
->caps
& ATOM_PPLIB_SINGLE_DISPLAY_ONLY
) {
1083 case POWER_STATE_TYPE_BALANCED
:
1084 if (ui_class
== ATOM_PPLIB_CLASSIFICATION_UI_BALANCED
) {
1085 if (ps
->caps
& ATOM_PPLIB_SINGLE_DISPLAY_ONLY
) {
1092 case POWER_STATE_TYPE_PERFORMANCE
:
1093 if (ui_class
== ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE
) {
1094 if (ps
->caps
& ATOM_PPLIB_SINGLE_DISPLAY_ONLY
) {
1101 /* internal states */
1102 case POWER_STATE_TYPE_INTERNAL_UVD
:
1103 if (adev
->pm
.dpm
.uvd_ps
)
1104 return adev
->pm
.dpm
.uvd_ps
;
1107 case POWER_STATE_TYPE_INTERNAL_UVD_SD
:
1108 if (ps
->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE
)
1111 case POWER_STATE_TYPE_INTERNAL_UVD_HD
:
1112 if (ps
->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE
)
1115 case POWER_STATE_TYPE_INTERNAL_UVD_HD2
:
1116 if (ps
->class & ATOM_PPLIB_CLASSIFICATION_HD2STATE
)
1119 case POWER_STATE_TYPE_INTERNAL_UVD_MVC
:
1120 if (ps
->class2
& ATOM_PPLIB_CLASSIFICATION2_MVC
)
1123 case POWER_STATE_TYPE_INTERNAL_BOOT
:
1124 return adev
->pm
.dpm
.boot_ps
;
1125 case POWER_STATE_TYPE_INTERNAL_THERMAL
:
1126 if (ps
->class & ATOM_PPLIB_CLASSIFICATION_THERMAL
)
1129 case POWER_STATE_TYPE_INTERNAL_ACPI
:
1130 if (ps
->class & ATOM_PPLIB_CLASSIFICATION_ACPI
)
1133 case POWER_STATE_TYPE_INTERNAL_ULV
:
1134 if (ps
->class2
& ATOM_PPLIB_CLASSIFICATION2_ULV
)
1137 case POWER_STATE_TYPE_INTERNAL_3DPERF
:
1138 if (ps
->class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE
)
1145 /* use a fallback state if we didn't match */
1146 switch (dpm_state
) {
1147 case POWER_STATE_TYPE_INTERNAL_UVD_SD
:
1148 dpm_state
= POWER_STATE_TYPE_INTERNAL_UVD_HD
;
1149 goto restart_search
;
1150 case POWER_STATE_TYPE_INTERNAL_UVD_HD
:
1151 case POWER_STATE_TYPE_INTERNAL_UVD_HD2
:
1152 case POWER_STATE_TYPE_INTERNAL_UVD_MVC
:
1153 if (adev
->pm
.dpm
.uvd_ps
) {
1154 return adev
->pm
.dpm
.uvd_ps
;
1156 dpm_state
= POWER_STATE_TYPE_PERFORMANCE
;
1157 goto restart_search
;
1159 case POWER_STATE_TYPE_INTERNAL_THERMAL
:
1160 dpm_state
= POWER_STATE_TYPE_INTERNAL_ACPI
;
1161 goto restart_search
;
1162 case POWER_STATE_TYPE_INTERNAL_ACPI
:
1163 dpm_state
= POWER_STATE_TYPE_BATTERY
;
1164 goto restart_search
;
1165 case POWER_STATE_TYPE_BATTERY
:
1166 case POWER_STATE_TYPE_BALANCED
:
1167 case POWER_STATE_TYPE_INTERNAL_3DPERF
:
1168 dpm_state
= POWER_STATE_TYPE_PERFORMANCE
;
1169 goto restart_search
;
1177 static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device
*adev
)
1179 struct amdgpu_ps
*ps
;
1180 enum amd_pm_state_type dpm_state
;
1184 /* if dpm init failed */
1185 if (!adev
->pm
.dpm_enabled
)
1188 if (adev
->pm
.dpm
.user_state
!= adev
->pm
.dpm
.state
) {
1189 /* add other state override checks here */
1190 if ((!adev
->pm
.dpm
.thermal_active
) &&
1191 (!adev
->pm
.dpm
.uvd_active
))
1192 adev
->pm
.dpm
.state
= adev
->pm
.dpm
.user_state
;
1194 dpm_state
= adev
->pm
.dpm
.state
;
1196 ps
= amdgpu_dpm_pick_power_state(adev
, dpm_state
);
1198 adev
->pm
.dpm
.requested_ps
= ps
;
1202 if (amdgpu_dpm
== 1 && adev
->powerplay
.pp_funcs
->print_power_state
) {
1203 printk("switching from power state:\n");
1204 amdgpu_dpm_print_power_state(adev
, adev
->pm
.dpm
.current_ps
);
1205 printk("switching to power state:\n");
1206 amdgpu_dpm_print_power_state(adev
, adev
->pm
.dpm
.requested_ps
);
1209 /* update whether vce is active */
1210 ps
->vce_active
= adev
->pm
.dpm
.vce_active
;
1211 if (adev
->powerplay
.pp_funcs
->display_configuration_changed
)
1212 amdgpu_dpm_display_configuration_changed(adev
);
1214 ret
= amdgpu_dpm_pre_set_power_state(adev
);
1218 if (adev
->powerplay
.pp_funcs
->check_state_equal
) {
1219 if (0 != amdgpu_dpm_check_state_equal(adev
, adev
->pm
.dpm
.current_ps
, adev
->pm
.dpm
.requested_ps
, &equal
))
1226 amdgpu_dpm_set_power_state(adev
);
1227 amdgpu_dpm_post_set_power_state(adev
);
1229 adev
->pm
.dpm
.current_active_crtcs
= adev
->pm
.dpm
.new_active_crtcs
;
1230 adev
->pm
.dpm
.current_active_crtc_count
= adev
->pm
.dpm
.new_active_crtc_count
;
1232 if (adev
->powerplay
.pp_funcs
->force_performance_level
) {
1233 if (adev
->pm
.dpm
.thermal_active
) {
1234 enum amd_dpm_forced_level level
= adev
->pm
.dpm
.forced_level
;
1235 /* force low perf level for thermal */
1236 amdgpu_dpm_force_performance_level(adev
, AMD_DPM_FORCED_LEVEL_LOW
);
1237 /* save the user's level */
1238 adev
->pm
.dpm
.forced_level
= level
;
1240 /* otherwise, user selected level */
1241 amdgpu_dpm_force_performance_level(adev
, adev
->pm
.dpm
.forced_level
);
1246 void amdgpu_dpm_enable_uvd(struct amdgpu_device
*adev
, bool enable
)
1248 if (adev
->powerplay
.pp_funcs
->powergate_uvd
) {
1249 /* enable/disable UVD */
1250 mutex_lock(&adev
->pm
.mutex
);
1251 amdgpu_dpm_powergate_uvd(adev
, !enable
);
1252 mutex_unlock(&adev
->pm
.mutex
);
1255 mutex_lock(&adev
->pm
.mutex
);
1256 adev
->pm
.dpm
.uvd_active
= true;
1257 adev
->pm
.dpm
.state
= POWER_STATE_TYPE_INTERNAL_UVD
;
1258 mutex_unlock(&adev
->pm
.mutex
);
1260 mutex_lock(&adev
->pm
.mutex
);
1261 adev
->pm
.dpm
.uvd_active
= false;
1262 mutex_unlock(&adev
->pm
.mutex
);
1264 amdgpu_pm_compute_clocks(adev
);
1268 void amdgpu_dpm_enable_vce(struct amdgpu_device
*adev
, bool enable
)
1270 if (adev
->powerplay
.pp_funcs
->powergate_vce
) {
1271 /* enable/disable VCE */
1272 mutex_lock(&adev
->pm
.mutex
);
1273 amdgpu_dpm_powergate_vce(adev
, !enable
);
1274 mutex_unlock(&adev
->pm
.mutex
);
1277 mutex_lock(&adev
->pm
.mutex
);
1278 adev
->pm
.dpm
.vce_active
= true;
1279 /* XXX select vce level based on ring/task */
1280 adev
->pm
.dpm
.vce_level
= AMD_VCE_LEVEL_AC_ALL
;
1281 mutex_unlock(&adev
->pm
.mutex
);
1282 amdgpu_set_clockgating_state(adev
, AMD_IP_BLOCK_TYPE_VCE
,
1283 AMD_CG_STATE_UNGATE
);
1284 amdgpu_set_powergating_state(adev
, AMD_IP_BLOCK_TYPE_VCE
,
1285 AMD_PG_STATE_UNGATE
);
1286 amdgpu_pm_compute_clocks(adev
);
1288 amdgpu_set_powergating_state(adev
, AMD_IP_BLOCK_TYPE_VCE
,
1290 amdgpu_set_clockgating_state(adev
, AMD_IP_BLOCK_TYPE_VCE
,
1292 mutex_lock(&adev
->pm
.mutex
);
1293 adev
->pm
.dpm
.vce_active
= false;
1294 mutex_unlock(&adev
->pm
.mutex
);
1295 amdgpu_pm_compute_clocks(adev
);
1301 void amdgpu_pm_print_power_states(struct amdgpu_device
*adev
)
1305 if (adev
->powerplay
.pp_funcs
->print_power_state
== NULL
)
1308 for (i
= 0; i
< adev
->pm
.dpm
.num_ps
; i
++)
1309 amdgpu_dpm_print_power_state(adev
, &adev
->pm
.dpm
.ps
[i
]);
1313 int amdgpu_pm_sysfs_init(struct amdgpu_device
*adev
)
1317 if (adev
->pm
.sysfs_initialized
)
1320 if (adev
->pm
.dpm_enabled
== 0)
1323 if (adev
->powerplay
.pp_funcs
->get_temperature
== NULL
)
1326 adev
->pm
.int_hwmon_dev
= hwmon_device_register_with_groups(adev
->dev
,
1329 if (IS_ERR(adev
->pm
.int_hwmon_dev
)) {
1330 ret
= PTR_ERR(adev
->pm
.int_hwmon_dev
);
1332 "Unable to register hwmon device: %d\n", ret
);
1336 ret
= device_create_file(adev
->dev
, &dev_attr_power_dpm_state
);
1338 DRM_ERROR("failed to create device file for dpm state\n");
1341 ret
= device_create_file(adev
->dev
, &dev_attr_power_dpm_force_performance_level
);
1343 DRM_ERROR("failed to create device file for dpm state\n");
1348 ret
= device_create_file(adev
->dev
, &dev_attr_pp_num_states
);
1350 DRM_ERROR("failed to create device file pp_num_states\n");
1353 ret
= device_create_file(adev
->dev
, &dev_attr_pp_cur_state
);
1355 DRM_ERROR("failed to create device file pp_cur_state\n");
1358 ret
= device_create_file(adev
->dev
, &dev_attr_pp_force_state
);
1360 DRM_ERROR("failed to create device file pp_force_state\n");
1363 ret
= device_create_file(adev
->dev
, &dev_attr_pp_table
);
1365 DRM_ERROR("failed to create device file pp_table\n");
1369 ret
= device_create_file(adev
->dev
, &dev_attr_pp_dpm_sclk
);
1371 DRM_ERROR("failed to create device file pp_dpm_sclk\n");
1374 ret
= device_create_file(adev
->dev
, &dev_attr_pp_dpm_mclk
);
1376 DRM_ERROR("failed to create device file pp_dpm_mclk\n");
1379 ret
= device_create_file(adev
->dev
, &dev_attr_pp_dpm_pcie
);
1381 DRM_ERROR("failed to create device file pp_dpm_pcie\n");
1384 ret
= device_create_file(adev
->dev
, &dev_attr_pp_sclk_od
);
1386 DRM_ERROR("failed to create device file pp_sclk_od\n");
1389 ret
= device_create_file(adev
->dev
, &dev_attr_pp_mclk_od
);
1391 DRM_ERROR("failed to create device file pp_mclk_od\n");
1394 ret
= device_create_file(adev
->dev
,
1395 &dev_attr_pp_gfx_power_profile
);
1397 DRM_ERROR("failed to create device file "
1398 "pp_gfx_power_profile\n");
1401 ret
= device_create_file(adev
->dev
,
1402 &dev_attr_pp_compute_power_profile
);
1404 DRM_ERROR("failed to create device file "
1405 "pp_compute_power_profile\n");
1409 ret
= amdgpu_debugfs_pm_init(adev
);
1411 DRM_ERROR("Failed to register debugfs file for dpm!\n");
1415 adev
->pm
.sysfs_initialized
= true;
1420 void amdgpu_pm_sysfs_fini(struct amdgpu_device
*adev
)
1422 if (adev
->pm
.dpm_enabled
== 0)
1425 if (adev
->pm
.int_hwmon_dev
)
1426 hwmon_device_unregister(adev
->pm
.int_hwmon_dev
);
1427 device_remove_file(adev
->dev
, &dev_attr_power_dpm_state
);
1428 device_remove_file(adev
->dev
, &dev_attr_power_dpm_force_performance_level
);
1430 device_remove_file(adev
->dev
, &dev_attr_pp_num_states
);
1431 device_remove_file(adev
->dev
, &dev_attr_pp_cur_state
);
1432 device_remove_file(adev
->dev
, &dev_attr_pp_force_state
);
1433 device_remove_file(adev
->dev
, &dev_attr_pp_table
);
1435 device_remove_file(adev
->dev
, &dev_attr_pp_dpm_sclk
);
1436 device_remove_file(adev
->dev
, &dev_attr_pp_dpm_mclk
);
1437 device_remove_file(adev
->dev
, &dev_attr_pp_dpm_pcie
);
1438 device_remove_file(adev
->dev
, &dev_attr_pp_sclk_od
);
1439 device_remove_file(adev
->dev
, &dev_attr_pp_mclk_od
);
1440 device_remove_file(adev
->dev
,
1441 &dev_attr_pp_gfx_power_profile
);
1442 device_remove_file(adev
->dev
,
1443 &dev_attr_pp_compute_power_profile
);
1446 void amdgpu_pm_compute_clocks(struct amdgpu_device
*adev
)
1448 struct drm_device
*ddev
= adev
->ddev
;
1449 struct drm_crtc
*crtc
;
1450 struct amdgpu_crtc
*amdgpu_crtc
;
1453 if (!adev
->pm
.dpm_enabled
)
1456 if (adev
->mode_info
.num_crtc
)
1457 amdgpu_display_bandwidth_update(adev
);
1459 for (i
= 0; i
< AMDGPU_MAX_RINGS
; i
++) {
1460 struct amdgpu_ring
*ring
= adev
->rings
[i
];
1461 if (ring
&& ring
->ready
)
1462 amdgpu_fence_wait_empty(ring
);
1465 if (adev
->powerplay
.pp_funcs
->dispatch_tasks
) {
1466 amdgpu_dpm_dispatch_task(adev
, AMD_PP_TASK_DISPLAY_CONFIG_CHANGE
, NULL
, NULL
);
1468 mutex_lock(&adev
->pm
.mutex
);
1469 adev
->pm
.dpm
.new_active_crtcs
= 0;
1470 adev
->pm
.dpm
.new_active_crtc_count
= 0;
1471 if (adev
->mode_info
.num_crtc
&& adev
->mode_info
.mode_config_initialized
) {
1472 list_for_each_entry(crtc
,
1473 &ddev
->mode_config
.crtc_list
, head
) {
1474 amdgpu_crtc
= to_amdgpu_crtc(crtc
);
1475 if (amdgpu_crtc
->enabled
) {
1476 adev
->pm
.dpm
.new_active_crtcs
|= (1 << amdgpu_crtc
->crtc_id
);
1477 adev
->pm
.dpm
.new_active_crtc_count
++;
1481 /* update battery/ac status */
1482 if (power_supply_is_system_supplied() > 0)
1483 adev
->pm
.dpm
.ac_power
= true;
1485 adev
->pm
.dpm
.ac_power
= false;
1487 amdgpu_dpm_change_power_state_locked(adev
);
1489 mutex_unlock(&adev
->pm
.mutex
);
1496 #if defined(CONFIG_DEBUG_FS)
1498 static int amdgpu_debugfs_pm_info_pp(struct seq_file
*m
, struct amdgpu_device
*adev
)
1501 struct pp_gpu_power query
= {0};
1504 /* sanity check PP is enabled */
1505 if (!(adev
->powerplay
.pp_funcs
&&
1506 adev
->powerplay
.pp_funcs
->read_sensor
))
1510 size
= sizeof(value
);
1511 seq_printf(m
, "GFX Clocks and Power:\n");
1512 if (!amdgpu_dpm_read_sensor(adev
, AMDGPU_PP_SENSOR_GFX_MCLK
, (void *)&value
, &size
))
1513 seq_printf(m
, "\t%u MHz (MCLK)\n", value
/100);
1514 if (!amdgpu_dpm_read_sensor(adev
, AMDGPU_PP_SENSOR_GFX_SCLK
, (void *)&value
, &size
))
1515 seq_printf(m
, "\t%u MHz (SCLK)\n", value
/100);
1516 if (!amdgpu_dpm_read_sensor(adev
, AMDGPU_PP_SENSOR_VDDGFX
, (void *)&value
, &size
))
1517 seq_printf(m
, "\t%u mV (VDDGFX)\n", value
);
1518 if (!amdgpu_dpm_read_sensor(adev
, AMDGPU_PP_SENSOR_VDDNB
, (void *)&value
, &size
))
1519 seq_printf(m
, "\t%u mV (VDDNB)\n", value
);
1520 size
= sizeof(query
);
1521 if (!amdgpu_dpm_read_sensor(adev
, AMDGPU_PP_SENSOR_GPU_POWER
, (void *)&query
, &size
)) {
1522 seq_printf(m
, "\t%u.%u W (VDDC)\n", query
.vddc_power
>> 8,
1523 query
.vddc_power
& 0xff);
1524 seq_printf(m
, "\t%u.%u W (VDDCI)\n", query
.vddci_power
>> 8,
1525 query
.vddci_power
& 0xff);
1526 seq_printf(m
, "\t%u.%u W (max GPU)\n", query
.max_gpu_power
>> 8,
1527 query
.max_gpu_power
& 0xff);
1528 seq_printf(m
, "\t%u.%u W (average GPU)\n", query
.average_gpu_power
>> 8,
1529 query
.average_gpu_power
& 0xff);
1531 size
= sizeof(value
);
1532 seq_printf(m
, "\n");
1535 if (!amdgpu_dpm_read_sensor(adev
, AMDGPU_PP_SENSOR_GPU_TEMP
, (void *)&value
, &size
))
1536 seq_printf(m
, "GPU Temperature: %u C\n", value
/1000);
1539 if (!amdgpu_dpm_read_sensor(adev
, AMDGPU_PP_SENSOR_GPU_LOAD
, (void *)&value
, &size
))
1540 seq_printf(m
, "GPU Load: %u %%\n", value
);
1541 seq_printf(m
, "\n");
1544 if (!amdgpu_dpm_read_sensor(adev
, AMDGPU_PP_SENSOR_UVD_POWER
, (void *)&value
, &size
)) {
1546 seq_printf(m
, "UVD: Disabled\n");
1548 seq_printf(m
, "UVD: Enabled\n");
1549 if (!amdgpu_dpm_read_sensor(adev
, AMDGPU_PP_SENSOR_UVD_DCLK
, (void *)&value
, &size
))
1550 seq_printf(m
, "\t%u MHz (DCLK)\n", value
/100);
1551 if (!amdgpu_dpm_read_sensor(adev
, AMDGPU_PP_SENSOR_UVD_VCLK
, (void *)&value
, &size
))
1552 seq_printf(m
, "\t%u MHz (VCLK)\n", value
/100);
1555 seq_printf(m
, "\n");
1558 if (!amdgpu_dpm_read_sensor(adev
, AMDGPU_PP_SENSOR_VCE_POWER
, (void *)&value
, &size
)) {
1560 seq_printf(m
, "VCE: Disabled\n");
1562 seq_printf(m
, "VCE: Enabled\n");
1563 if (!amdgpu_dpm_read_sensor(adev
, AMDGPU_PP_SENSOR_VCE_ECCLK
, (void *)&value
, &size
))
1564 seq_printf(m
, "\t%u MHz (ECCLK)\n", value
/100);
1571 static void amdgpu_parse_cg_state(struct seq_file
*m
, u32 flags
)
1575 for (i
= 0; clocks
[i
].flag
; i
++)
1576 seq_printf(m
, "\t%s: %s\n", clocks
[i
].name
,
1577 (flags
& clocks
[i
].flag
) ? "On" : "Off");
1580 static int amdgpu_debugfs_pm_info(struct seq_file
*m
, void *data
)
1582 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1583 struct drm_device
*dev
= node
->minor
->dev
;
1584 struct amdgpu_device
*adev
= dev
->dev_private
;
1585 struct drm_device
*ddev
= adev
->ddev
;
1588 amdgpu_get_clockgating_state(adev
, &flags
);
1589 seq_printf(m
, "Clock Gating Flags Mask: 0x%x\n", flags
);
1590 amdgpu_parse_cg_state(m
, flags
);
1591 seq_printf(m
, "\n");
1593 if (!adev
->pm
.dpm_enabled
) {
1594 seq_printf(m
, "dpm not enabled\n");
1597 if ((adev
->flags
& AMD_IS_PX
) &&
1598 (ddev
->switch_power_state
!= DRM_SWITCH_POWER_ON
)) {
1599 seq_printf(m
, "PX asic powered off\n");
1600 } else if (adev
->powerplay
.pp_funcs
->debugfs_print_current_performance_level
) {
1601 mutex_lock(&adev
->pm
.mutex
);
1602 if (adev
->powerplay
.pp_funcs
->debugfs_print_current_performance_level
)
1603 adev
->powerplay
.pp_funcs
->debugfs_print_current_performance_level(adev
, m
);
1605 seq_printf(m
, "Debugfs support not implemented for this asic\n");
1606 mutex_unlock(&adev
->pm
.mutex
);
1608 return amdgpu_debugfs_pm_info_pp(m
, adev
);
1614 static const struct drm_info_list amdgpu_pm_info_list
[] = {
1615 {"amdgpu_pm_info", amdgpu_debugfs_pm_info
, 0, NULL
},
1619 static int amdgpu_debugfs_pm_init(struct amdgpu_device
*adev
)
1621 #if defined(CONFIG_DEBUG_FS)
1622 return amdgpu_debugfs_add_files(adev
, amdgpu_pm_info_list
, ARRAY_SIZE(amdgpu_pm_info_list
));