]>
Commit | Line | Data |
---|---|---|
d38ceaf9 | 1 | /* |
9ce6aae1 AD |
2 | * Copyright 2017 Advanced Micro Devices, Inc. |
3 | * | |
d38ceaf9 AD |
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice shall be included in | |
12 | * all copies or substantial portions of the Software. | |
13 | * | |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
20 | * OTHER DEALINGS IN THE SOFTWARE. | |
21 | * | |
22 | * Authors: Rafał Miłecki <zajec5@gmail.com> | |
23 | * Alex Deucher <alexdeucher@gmail.com> | |
24 | */ | |
25 | #include <drm/drmP.h> | |
26 | #include "amdgpu.h" | |
27 | #include "amdgpu_drv.h" | |
28 | #include "amdgpu_pm.h" | |
29 | #include "amdgpu_dpm.h" | |
5df58525 | 30 | #include "amdgpu_display.h" |
86ac8803 | 31 | #include "amdgpu_smu.h" |
d38ceaf9 AD |
32 | #include "atom.h" |
33 | #include <linux/power_supply.h> | |
34 | #include <linux/hwmon.h> | |
35 | #include <linux/hwmon-sysfs.h> | |
ddf74e79 | 36 | #include <linux/nospec.h> |
8ca606de GS |
37 | #include "hwmgr.h" |
38 | #define WIDTH_4K 3840 | |
1b5708ff | 39 | |
d38ceaf9 AD |
40 | static int amdgpu_debugfs_pm_init(struct amdgpu_device *adev); |
41 | ||
a8503b15 HR |
42 | static const struct cg_flag_name clocks[] = { |
43 | {AMD_CG_SUPPORT_GFX_MGCG, "Graphics Medium Grain Clock Gating"}, | |
44 | {AMD_CG_SUPPORT_GFX_MGLS, "Graphics Medium Grain memory Light Sleep"}, | |
45 | {AMD_CG_SUPPORT_GFX_CGCG, "Graphics Coarse Grain Clock Gating"}, | |
46 | {AMD_CG_SUPPORT_GFX_CGLS, "Graphics Coarse Grain memory Light Sleep"}, | |
54170226 | 47 | {AMD_CG_SUPPORT_GFX_CGTS, "Graphics Coarse Grain Tree Shader Clock Gating"}, |
a8503b15 HR |
48 | {AMD_CG_SUPPORT_GFX_CGTS_LS, "Graphics Coarse Grain Tree Shader Light Sleep"}, |
49 | {AMD_CG_SUPPORT_GFX_CP_LS, "Graphics Command Processor Light Sleep"}, | |
50 | {AMD_CG_SUPPORT_GFX_RLC_LS, "Graphics Run List Controller Light Sleep"}, | |
12ad27fa HR |
51 | {AMD_CG_SUPPORT_GFX_3D_CGCG, "Graphics 3D Coarse Grain Clock Gating"}, |
52 | {AMD_CG_SUPPORT_GFX_3D_CGLS, "Graphics 3D Coarse Grain memory Light Sleep"}, | |
a8503b15 HR |
53 | {AMD_CG_SUPPORT_MC_LS, "Memory Controller Light Sleep"}, |
54 | {AMD_CG_SUPPORT_MC_MGCG, "Memory Controller Medium Grain Clock Gating"}, | |
55 | {AMD_CG_SUPPORT_SDMA_LS, "System Direct Memory Access Light Sleep"}, | |
56 | {AMD_CG_SUPPORT_SDMA_MGCG, "System Direct Memory Access Medium Grain Clock Gating"}, | |
e96487a6 | 57 | {AMD_CG_SUPPORT_BIF_MGCG, "Bus Interface Medium Grain Clock Gating"}, |
a8503b15 HR |
58 | {AMD_CG_SUPPORT_BIF_LS, "Bus Interface Light Sleep"}, |
59 | {AMD_CG_SUPPORT_UVD_MGCG, "Unified Video Decoder Medium Grain Clock Gating"}, | |
60 | {AMD_CG_SUPPORT_VCE_MGCG, "Video Compression Engine Medium Grain Clock Gating"}, | |
61 | {AMD_CG_SUPPORT_HDP_LS, "Host Data Path Light Sleep"}, | |
62 | {AMD_CG_SUPPORT_HDP_MGCG, "Host Data Path Medium Grain Clock Gating"}, | |
f9abe35c HR |
63 | {AMD_CG_SUPPORT_DRM_MGCG, "Digital Right Management Medium Grain Clock Gating"}, |
64 | {AMD_CG_SUPPORT_DRM_LS, "Digital Right Management Light Sleep"}, | |
a8503b15 | 65 | {AMD_CG_SUPPORT_ROM_MGCG, "Rom Medium Grain Clock Gating"}, |
f9abe35c | 66 | {AMD_CG_SUPPORT_DF_MGCG, "Data Fabric Medium Grain Clock Gating"}, |
a8503b15 HR |
67 | {0, NULL}, |
68 | }; | |
69 | ||
d38ceaf9 AD |
70 | void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev) |
71 | { | |
72 | if (adev->pm.dpm_enabled) { | |
73 | mutex_lock(&adev->pm.mutex); | |
74 | if (power_supply_is_system_supplied() > 0) | |
600ae890 | 75 | adev->pm.ac_power = true; |
d38ceaf9 | 76 | else |
600ae890 | 77 | adev->pm.ac_power = false; |
cd4d7464 | 78 | if (adev->powerplay.pp_funcs->enable_bapm) |
600ae890 | 79 | amdgpu_dpm_enable_bapm(adev, adev->pm.ac_power); |
d38ceaf9 AD |
80 | mutex_unlock(&adev->pm.mutex); |
81 | } | |
82 | } | |
83 | ||
4a5a2de6 KW |
84 | int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors sensor, |
85 | void *data, uint32_t *size) | |
86 | { | |
87 | int ret = 0; | |
88 | ||
89 | if (!data || !size) | |
90 | return -EINVAL; | |
91 | ||
92 | if (is_support_sw_smu(adev)) | |
93 | ret = smu_read_sensor(&adev->smu, sensor, data, size); | |
94 | else { | |
95 | if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->read_sensor) | |
96 | ret = adev->powerplay.pp_funcs->read_sensor((adev)->powerplay.pp_handle, | |
97 | sensor, data, size); | |
98 | else | |
99 | ret = -EINVAL; | |
100 | } | |
101 | ||
102 | return ret; | |
103 | } | |
104 | ||
ca8d40ca AD |
105 | /** |
106 | * DOC: power_dpm_state | |
107 | * | |
dc85db25 AD |
108 | * The power_dpm_state file is a legacy interface and is only provided for |
109 | * backwards compatibility. The amdgpu driver provides a sysfs API for adjusting | |
110 | * certain power related parameters. The file power_dpm_state is used for this. | |
ca8d40ca | 111 | * It accepts the following arguments: |
dc85db25 | 112 | * |
ca8d40ca | 113 | * - battery |
dc85db25 | 114 | * |
ca8d40ca | 115 | * - balanced |
dc85db25 | 116 | * |
ca8d40ca AD |
117 | * - performance |
118 | * | |
119 | * battery | |
120 | * | |
121 | * On older GPUs, the vbios provided a special power state for battery | |
122 | * operation. Selecting battery switched to this state. This is no | |
123 | * longer provided on newer GPUs so the option does nothing in that case. | |
124 | * | |
125 | * balanced | |
126 | * | |
127 | * On older GPUs, the vbios provided a special power state for balanced | |
128 | * operation. Selecting balanced switched to this state. This is no | |
129 | * longer provided on newer GPUs so the option does nothing in that case. | |
130 | * | |
131 | * performance | |
132 | * | |
133 | * On older GPUs, the vbios provided a special power state for performance | |
134 | * operation. Selecting performance switched to this state. This is no | |
135 | * longer provided on newer GPUs so the option does nothing in that case. | |
136 | * | |
137 | */ | |
138 | ||
d38ceaf9 AD |
139 | static ssize_t amdgpu_get_dpm_state(struct device *dev, |
140 | struct device_attribute *attr, | |
141 | char *buf) | |
142 | { | |
143 | struct drm_device *ddev = dev_get_drvdata(dev); | |
144 | struct amdgpu_device *adev = ddev->dev_private; | |
1b5708ff RZ |
145 | enum amd_pm_state_type pm; |
146 | ||
bb20be75 | 147 | if (is_support_sw_smu(adev) && adev->smu.ppt_funcs->get_current_power_state) |
8554e67d CG |
148 | pm = amdgpu_smu_get_current_power_state(adev); |
149 | else if (adev->powerplay.pp_funcs->get_current_power_state) | |
1b5708ff | 150 | pm = amdgpu_dpm_get_current_power_state(adev); |
cd4d7464 | 151 | else |
1b5708ff | 152 | pm = adev->pm.dpm.user_state; |
d38ceaf9 AD |
153 | |
154 | return snprintf(buf, PAGE_SIZE, "%s\n", | |
155 | (pm == POWER_STATE_TYPE_BATTERY) ? "battery" : | |
156 | (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance"); | |
157 | } | |
158 | ||
159 | static ssize_t amdgpu_set_dpm_state(struct device *dev, | |
160 | struct device_attribute *attr, | |
161 | const char *buf, | |
162 | size_t count) | |
163 | { | |
164 | struct drm_device *ddev = dev_get_drvdata(dev); | |
165 | struct amdgpu_device *adev = ddev->dev_private; | |
1b5708ff | 166 | enum amd_pm_state_type state; |
d38ceaf9 | 167 | |
d38ceaf9 | 168 | if (strncmp("battery", buf, strlen("battery")) == 0) |
1b5708ff | 169 | state = POWER_STATE_TYPE_BATTERY; |
d38ceaf9 | 170 | else if (strncmp("balanced", buf, strlen("balanced")) == 0) |
1b5708ff | 171 | state = POWER_STATE_TYPE_BALANCED; |
d38ceaf9 | 172 | else if (strncmp("performance", buf, strlen("performance")) == 0) |
1b5708ff | 173 | state = POWER_STATE_TYPE_PERFORMANCE; |
d38ceaf9 | 174 | else { |
d38ceaf9 AD |
175 | count = -EINVAL; |
176 | goto fail; | |
177 | } | |
d38ceaf9 | 178 | |
6d07fe7b | 179 | if (adev->powerplay.pp_funcs->dispatch_tasks) { |
39199b80 | 180 | amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_ENABLE_USER_STATE, &state); |
1b5708ff RZ |
181 | } else { |
182 | mutex_lock(&adev->pm.mutex); | |
183 | adev->pm.dpm.user_state = state; | |
184 | mutex_unlock(&adev->pm.mutex); | |
185 | ||
186 | /* Can't set dpm state when the card is off */ | |
187 | if (!(adev->flags & AMD_IS_PX) || | |
188 | (ddev->switch_power_state == DRM_SWITCH_POWER_ON)) | |
189 | amdgpu_pm_compute_clocks(adev); | |
190 | } | |
d38ceaf9 AD |
191 | fail: |
192 | return count; | |
193 | } | |
194 | ||
8567f681 AD |
195 | |
196 | /** | |
197 | * DOC: power_dpm_force_performance_level | |
198 | * | |
199 | * The amdgpu driver provides a sysfs API for adjusting certain power | |
200 | * related parameters. The file power_dpm_force_performance_level is | |
201 | * used for this. It accepts the following arguments: | |
dc85db25 | 202 | * |
8567f681 | 203 | * - auto |
dc85db25 | 204 | * |
8567f681 | 205 | * - low |
dc85db25 | 206 | * |
8567f681 | 207 | * - high |
dc85db25 | 208 | * |
8567f681 | 209 | * - manual |
dc85db25 | 210 | * |
8567f681 | 211 | * - profile_standard |
dc85db25 | 212 | * |
8567f681 | 213 | * - profile_min_sclk |
dc85db25 | 214 | * |
8567f681 | 215 | * - profile_min_mclk |
dc85db25 | 216 | * |
8567f681 AD |
217 | * - profile_peak |
218 | * | |
219 | * auto | |
220 | * | |
221 | * When auto is selected, the driver will attempt to dynamically select | |
222 | * the optimal power profile for current conditions in the driver. | |
223 | * | |
224 | * low | |
225 | * | |
226 | * When low is selected, the clocks are forced to the lowest power state. | |
227 | * | |
228 | * high | |
229 | * | |
230 | * When high is selected, the clocks are forced to the highest power state. | |
231 | * | |
232 | * manual | |
233 | * | |
234 | * When manual is selected, the user can manually adjust which power states | |
235 | * are enabled for each clock domain via the sysfs pp_dpm_mclk, pp_dpm_sclk, | |
236 | * and pp_dpm_pcie files and adjust the power state transition heuristics | |
237 | * via the pp_power_profile_mode sysfs file. | |
238 | * | |
239 | * profile_standard | |
240 | * profile_min_sclk | |
241 | * profile_min_mclk | |
242 | * profile_peak | |
243 | * | |
244 | * When the profiling modes are selected, clock and power gating are | |
245 | * disabled and the clocks are set for different profiling cases. This | |
246 | * mode is recommended for profiling specific work loads where you do | |
247 | * not want clock or power gating for clock fluctuation to interfere | |
248 | * with your results. profile_standard sets the clocks to a fixed clock | |
249 | * level which varies from asic to asic. profile_min_sclk forces the sclk | |
250 | * to the lowest level. profile_min_mclk forces the mclk to the lowest level. | |
251 | * profile_peak sets all clocks (mclk, sclk, pcie) to the highest levels. | |
252 | * | |
253 | */ | |
254 | ||
d38ceaf9 | 255 | static ssize_t amdgpu_get_dpm_forced_performance_level(struct device *dev, |
1b5708ff RZ |
256 | struct device_attribute *attr, |
257 | char *buf) | |
d38ceaf9 AD |
258 | { |
259 | struct drm_device *ddev = dev_get_drvdata(dev); | |
260 | struct amdgpu_device *adev = ddev->dev_private; | |
cd4d7464 | 261 | enum amd_dpm_forced_level level = 0xff; |
d38ceaf9 | 262 | |
0c67df48 AD |
263 | if ((adev->flags & AMD_IS_PX) && |
264 | (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) | |
265 | return snprintf(buf, PAGE_SIZE, "off\n"); | |
266 | ||
9a431038 CG |
267 | if (is_support_sw_smu(adev)) |
268 | level = smu_get_performance_level(&adev->smu); | |
269 | else if (adev->powerplay.pp_funcs->get_performance_level) | |
cd4d7464 RZ |
270 | level = amdgpu_dpm_get_performance_level(adev); |
271 | else | |
272 | level = adev->pm.dpm.forced_level; | |
273 | ||
e5d03ac2 | 274 | return snprintf(buf, PAGE_SIZE, "%s\n", |
570272d2 RZ |
275 | (level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" : |
276 | (level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" : | |
277 | (level == AMD_DPM_FORCED_LEVEL_HIGH) ? "high" : | |
278 | (level == AMD_DPM_FORCED_LEVEL_MANUAL) ? "manual" : | |
279 | (level == AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD) ? "profile_standard" : | |
280 | (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) ? "profile_min_sclk" : | |
281 | (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) ? "profile_min_mclk" : | |
282 | (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) ? "profile_peak" : | |
283 | "unknown"); | |
d38ceaf9 AD |
284 | } |
285 | ||
286 | static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev, | |
287 | struct device_attribute *attr, | |
288 | const char *buf, | |
289 | size_t count) | |
290 | { | |
291 | struct drm_device *ddev = dev_get_drvdata(dev); | |
292 | struct amdgpu_device *adev = ddev->dev_private; | |
e5d03ac2 | 293 | enum amd_dpm_forced_level level; |
cd4d7464 | 294 | enum amd_dpm_forced_level current_level = 0xff; |
d38ceaf9 AD |
295 | int ret = 0; |
296 | ||
0c67df48 AD |
297 | /* Can't force performance level when the card is off */ |
298 | if ((adev->flags & AMD_IS_PX) && | |
299 | (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) | |
300 | return -EINVAL; | |
301 | ||
9a431038 CG |
302 | if (is_support_sw_smu(adev)) |
303 | current_level = smu_get_performance_level(&adev->smu); | |
304 | else if (adev->powerplay.pp_funcs->get_performance_level) | |
cd4d7464 | 305 | current_level = amdgpu_dpm_get_performance_level(adev); |
3bd58979 | 306 | |
d38ceaf9 | 307 | if (strncmp("low", buf, strlen("low")) == 0) { |
e5d03ac2 | 308 | level = AMD_DPM_FORCED_LEVEL_LOW; |
d38ceaf9 | 309 | } else if (strncmp("high", buf, strlen("high")) == 0) { |
e5d03ac2 | 310 | level = AMD_DPM_FORCED_LEVEL_HIGH; |
d38ceaf9 | 311 | } else if (strncmp("auto", buf, strlen("auto")) == 0) { |
e5d03ac2 | 312 | level = AMD_DPM_FORCED_LEVEL_AUTO; |
f3898ea1 | 313 | } else if (strncmp("manual", buf, strlen("manual")) == 0) { |
e5d03ac2 | 314 | level = AMD_DPM_FORCED_LEVEL_MANUAL; |
570272d2 RZ |
315 | } else if (strncmp("profile_exit", buf, strlen("profile_exit")) == 0) { |
316 | level = AMD_DPM_FORCED_LEVEL_PROFILE_EXIT; | |
317 | } else if (strncmp("profile_standard", buf, strlen("profile_standard")) == 0) { | |
318 | level = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD; | |
319 | } else if (strncmp("profile_min_sclk", buf, strlen("profile_min_sclk")) == 0) { | |
320 | level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK; | |
321 | } else if (strncmp("profile_min_mclk", buf, strlen("profile_min_mclk")) == 0) { | |
322 | level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK; | |
323 | } else if (strncmp("profile_peak", buf, strlen("profile_peak")) == 0) { | |
324 | level = AMD_DPM_FORCED_LEVEL_PROFILE_PEAK; | |
325 | } else { | |
d38ceaf9 AD |
326 | count = -EINVAL; |
327 | goto fail; | |
328 | } | |
1b5708ff | 329 | |
bb5a2bdf YT |
330 | if (amdgpu_sriov_vf(adev)) { |
331 | if (amdgim_is_hwperf(adev) && | |
332 | adev->virt.ops->force_dpm_level) { | |
333 | mutex_lock(&adev->pm.mutex); | |
334 | adev->virt.ops->force_dpm_level(adev, level); | |
335 | mutex_unlock(&adev->pm.mutex); | |
336 | return count; | |
337 | } else { | |
338 | return -EINVAL; | |
339 | } | |
340 | } | |
341 | ||
3bd58979 | 342 | if (current_level == level) |
8e7afd34 | 343 | return count; |
3bd58979 | 344 | |
db8a974f EQ |
345 | /* profile_exit setting is valid only when current mode is in profile mode */ |
346 | if (!(current_level & (AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD | | |
347 | AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK | | |
348 | AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK | | |
349 | AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)) && | |
350 | (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)) { | |
351 | pr_err("Currently not in any profile mode!\n"); | |
352 | return -EINVAL; | |
353 | } | |
354 | ||
9a431038 CG |
355 | if (is_support_sw_smu(adev)) { |
356 | mutex_lock(&adev->pm.mutex); | |
357 | if (adev->pm.dpm.thermal_active) { | |
358 | count = -EINVAL; | |
359 | mutex_unlock(&adev->pm.mutex); | |
360 | goto fail; | |
361 | } | |
362 | ret = smu_force_performance_level(&adev->smu, level); | |
363 | if (ret) | |
364 | count = -EINVAL; | |
365 | else | |
366 | adev->pm.dpm.forced_level = level; | |
367 | mutex_unlock(&adev->pm.mutex); | |
368 | } else if (adev->powerplay.pp_funcs->force_performance_level) { | |
1b5708ff | 369 | mutex_lock(&adev->pm.mutex); |
d38ceaf9 AD |
370 | if (adev->pm.dpm.thermal_active) { |
371 | count = -EINVAL; | |
10f950f6 | 372 | mutex_unlock(&adev->pm.mutex); |
d38ceaf9 AD |
373 | goto fail; |
374 | } | |
375 | ret = amdgpu_dpm_force_performance_level(adev, level); | |
376 | if (ret) | |
377 | count = -EINVAL; | |
1b5708ff RZ |
378 | else |
379 | adev->pm.dpm.forced_level = level; | |
380 | mutex_unlock(&adev->pm.mutex); | |
d38ceaf9 | 381 | } |
570272d2 | 382 | |
d38ceaf9 | 383 | fail: |
d38ceaf9 AD |
384 | return count; |
385 | } | |
386 | ||
f3898ea1 EH |
387 | static ssize_t amdgpu_get_pp_num_states(struct device *dev, |
388 | struct device_attribute *attr, | |
389 | char *buf) | |
390 | { | |
391 | struct drm_device *ddev = dev_get_drvdata(dev); | |
392 | struct amdgpu_device *adev = ddev->dev_private; | |
393 | struct pp_states_info data; | |
09895323 | 394 | int i, buf_len, ret; |
f3898ea1 | 395 | |
09895323 KW |
396 | if (is_support_sw_smu(adev)) { |
397 | ret = smu_get_power_num_states(&adev->smu, &data); | |
398 | if (ret) | |
399 | return ret; | |
400 | } else if (adev->powerplay.pp_funcs->get_pp_num_states) | |
f3898ea1 EH |
401 | amdgpu_dpm_get_pp_num_states(adev, &data); |
402 | ||
403 | buf_len = snprintf(buf, PAGE_SIZE, "states: %d\n", data.nums); | |
404 | for (i = 0; i < data.nums; i++) | |
405 | buf_len += snprintf(buf + buf_len, PAGE_SIZE, "%d %s\n", i, | |
406 | (data.states[i] == POWER_STATE_TYPE_INTERNAL_BOOT) ? "boot" : | |
407 | (data.states[i] == POWER_STATE_TYPE_BATTERY) ? "battery" : | |
408 | (data.states[i] == POWER_STATE_TYPE_BALANCED) ? "balanced" : | |
409 | (data.states[i] == POWER_STATE_TYPE_PERFORMANCE) ? "performance" : "default"); | |
410 | ||
411 | return buf_len; | |
412 | } | |
413 | ||
414 | static ssize_t amdgpu_get_pp_cur_state(struct device *dev, | |
415 | struct device_attribute *attr, | |
416 | char *buf) | |
417 | { | |
418 | struct drm_device *ddev = dev_get_drvdata(dev); | |
419 | struct amdgpu_device *adev = ddev->dev_private; | |
420 | struct pp_states_info data; | |
ea2d0bf8 | 421 | struct smu_context *smu = &adev->smu; |
f3898ea1 | 422 | enum amd_pm_state_type pm = 0; |
ea2d0bf8 | 423 | int i = 0, ret = 0; |
f3898ea1 | 424 | |
ea2d0bf8 KW |
425 | if (is_support_sw_smu(adev)) { |
426 | pm = smu_get_current_power_state(smu); | |
427 | ret = smu_get_power_num_states(smu, &data); | |
428 | if (ret) | |
429 | return ret; | |
430 | } else if (adev->powerplay.pp_funcs->get_current_power_state | |
cd4d7464 | 431 | && adev->powerplay.pp_funcs->get_pp_num_states) { |
f3898ea1 EH |
432 | pm = amdgpu_dpm_get_current_power_state(adev); |
433 | amdgpu_dpm_get_pp_num_states(adev, &data); | |
ea2d0bf8 | 434 | } |
f3898ea1 | 435 | |
ea2d0bf8 KW |
436 | for (i = 0; i < data.nums; i++) { |
437 | if (pm == data.states[i]) | |
438 | break; | |
f3898ea1 EH |
439 | } |
440 | ||
ea2d0bf8 KW |
441 | if (i == data.nums) |
442 | i = -EINVAL; | |
443 | ||
f3898ea1 EH |
444 | return snprintf(buf, PAGE_SIZE, "%d\n", i); |
445 | } | |
446 | ||
447 | static ssize_t amdgpu_get_pp_force_state(struct device *dev, | |
448 | struct device_attribute *attr, | |
449 | char *buf) | |
450 | { | |
451 | struct drm_device *ddev = dev_get_drvdata(dev); | |
452 | struct amdgpu_device *adev = ddev->dev_private; | |
f3898ea1 | 453 | |
cd4d7464 RZ |
454 | if (adev->pp_force_state_enabled) |
455 | return amdgpu_get_pp_cur_state(dev, attr, buf); | |
456 | else | |
f3898ea1 EH |
457 | return snprintf(buf, PAGE_SIZE, "\n"); |
458 | } | |
459 | ||
460 | static ssize_t amdgpu_set_pp_force_state(struct device *dev, | |
461 | struct device_attribute *attr, | |
462 | const char *buf, | |
463 | size_t count) | |
464 | { | |
465 | struct drm_device *ddev = dev_get_drvdata(dev); | |
466 | struct amdgpu_device *adev = ddev->dev_private; | |
467 | enum amd_pm_state_type state = 0; | |
041bf022 | 468 | unsigned long idx; |
f3898ea1 EH |
469 | int ret; |
470 | ||
471 | if (strlen(buf) == 1) | |
472 | adev->pp_force_state_enabled = false; | |
0b53f9ad KW |
473 | else if (is_support_sw_smu(adev)) |
474 | adev->pp_force_state_enabled = false; | |
6d07fe7b RZ |
475 | else if (adev->powerplay.pp_funcs->dispatch_tasks && |
476 | adev->powerplay.pp_funcs->get_pp_num_states) { | |
041bf022 | 477 | struct pp_states_info data; |
f3898ea1 | 478 | |
041bf022 DC |
479 | ret = kstrtoul(buf, 0, &idx); |
480 | if (ret || idx >= ARRAY_SIZE(data.states)) { | |
f3898ea1 EH |
481 | count = -EINVAL; |
482 | goto fail; | |
483 | } | |
ddf74e79 | 484 | idx = array_index_nospec(idx, ARRAY_SIZE(data.states)); |
f3898ea1 | 485 | |
041bf022 DC |
486 | amdgpu_dpm_get_pp_num_states(adev, &data); |
487 | state = data.states[idx]; | |
488 | /* only set user selected power states */ | |
489 | if (state != POWER_STATE_TYPE_INTERNAL_BOOT && | |
490 | state != POWER_STATE_TYPE_DEFAULT) { | |
491 | amdgpu_dpm_dispatch_task(adev, | |
39199b80 | 492 | AMD_PP_TASK_ENABLE_USER_STATE, &state); |
041bf022 | 493 | adev->pp_force_state_enabled = true; |
f3898ea1 EH |
494 | } |
495 | } | |
496 | fail: | |
497 | return count; | |
498 | } | |
499 | ||
d54bb40f AD |
500 | /** |
501 | * DOC: pp_table | |
502 | * | |
503 | * The amdgpu driver provides a sysfs API for uploading new powerplay | |
504 | * tables. The file pp_table is used for this. Reading the file | |
505 | * will dump the current power play table. Writing to the file | |
506 | * will attempt to upload a new powerplay table and re-initialize | |
507 | * powerplay using that new table. | |
508 | * | |
509 | */ | |
510 | ||
f3898ea1 EH |
511 | static ssize_t amdgpu_get_pp_table(struct device *dev, |
512 | struct device_attribute *attr, | |
513 | char *buf) | |
514 | { | |
515 | struct drm_device *ddev = dev_get_drvdata(dev); | |
516 | struct amdgpu_device *adev = ddev->dev_private; | |
517 | char *table = NULL; | |
1684d3ba | 518 | int size; |
f3898ea1 | 519 | |
289921b0 KW |
520 | if (is_support_sw_smu(adev)) { |
521 | size = smu_sys_get_pp_table(&adev->smu, (void **)&table); | |
522 | if (size < 0) | |
523 | return size; | |
524 | } | |
525 | else if (adev->powerplay.pp_funcs->get_pp_table) | |
f3898ea1 EH |
526 | size = amdgpu_dpm_get_pp_table(adev, &table); |
527 | else | |
528 | return 0; | |
529 | ||
530 | if (size >= PAGE_SIZE) | |
531 | size = PAGE_SIZE - 1; | |
532 | ||
1684d3ba | 533 | memcpy(buf, table, size); |
f3898ea1 EH |
534 | |
535 | return size; | |
536 | } | |
537 | ||
538 | static ssize_t amdgpu_set_pp_table(struct device *dev, | |
539 | struct device_attribute *attr, | |
540 | const char *buf, | |
541 | size_t count) | |
542 | { | |
543 | struct drm_device *ddev = dev_get_drvdata(dev); | |
544 | struct amdgpu_device *adev = ddev->dev_private; | |
289921b0 | 545 | int ret = 0; |
f3898ea1 | 546 | |
289921b0 KW |
547 | if (is_support_sw_smu(adev)) { |
548 | ret = smu_sys_set_pp_table(&adev->smu, (void *)buf, count); | |
549 | if (ret) | |
550 | return ret; | |
551 | } else if (adev->powerplay.pp_funcs->set_pp_table) | |
f3898ea1 EH |
552 | amdgpu_dpm_set_pp_table(adev, buf, count); |
553 | ||
554 | return count; | |
555 | } | |
556 | ||
4e418c34 AD |
557 | /** |
558 | * DOC: pp_od_clk_voltage | |
559 | * | |
560 | * The amdgpu driver provides a sysfs API for adjusting the clocks and voltages | |
561 | * in each power level within a power state. The pp_od_clk_voltage is used for | |
562 | * this. | |
563 | * | |
d5bf2653 EQ |
564 | * < For Vega10 and previous ASICs > |
565 | * | |
4e418c34 | 566 | * Reading the file will display: |
dc85db25 | 567 | * |
4e418c34 | 568 | * - a list of engine clock levels and voltages labeled OD_SCLK |
dc85db25 | 569 | * |
4e418c34 | 570 | * - a list of memory clock levels and voltages labeled OD_MCLK |
dc85db25 | 571 | * |
4e418c34 AD |
572 | * - a list of valid ranges for sclk, mclk, and voltage labeled OD_RANGE |
573 | * | |
574 | * To manually adjust these settings, first select manual using | |
575 | * power_dpm_force_performance_level. Enter a new value for each | |
576 | * level by writing a string that contains "s/m level clock voltage" to | |
577 | * the file. E.g., "s 1 500 820" will update sclk level 1 to be 500 MHz | |
578 | * at 820 mV; "m 0 350 810" will update mclk level 0 to be 350 MHz at | |
579 | * 810 mV. When you have edited all of the states as needed, write | |
580 | * "c" (commit) to the file to commit your changes. If you want to reset to the | |
581 | * default power levels, write "r" (reset) to the file to reset them. | |
582 | * | |
d5bf2653 EQ |
583 | * |
584 | * < For Vega20 > | |
585 | * | |
586 | * Reading the file will display: | |
587 | * | |
588 | * - minimum and maximum engine clock labeled OD_SCLK | |
589 | * | |
590 | * - maximum memory clock labeled OD_MCLK | |
591 | * | |
b1f82cb2 | 592 | * - three <frequency, voltage> points labeled OD_VDDC_CURVE. |
d5bf2653 EQ |
593 | * They can be used to calibrate the sclk voltage curve. |
594 | * | |
595 | * - a list of valid ranges for sclk, mclk, and voltage curve points | |
596 | * labeled OD_RANGE | |
597 | * | |
598 | * To manually adjust these settings: | |
599 | * | |
600 | * - First select manual using power_dpm_force_performance_level | |
601 | * | |
602 | * - For clock frequency setting, enter a new value by writing a | |
603 | * string that contains "s/m index clock" to the file. The index | |
604 | * should be 0 if to set minimum clock. And 1 if to set maximum | |
605 | * clock. E.g., "s 0 500" will update minimum sclk to be 500 MHz. | |
606 | * "m 1 800" will update maximum mclk to be 800Mhz. | |
607 | * | |
608 | * For sclk voltage curve, enter the new values by writing a | |
b1f82cb2 EQ |
609 | * string that contains "vc point clock voltage" to the file. The |
610 | * points are indexed by 0, 1 and 2. E.g., "vc 0 300 600" will | |
611 | * update point1 with clock set as 300Mhz and voltage as | |
612 | * 600mV. "vc 2 1000 1000" will update point3 with clock set | |
613 | * as 1000Mhz and voltage 1000mV. | |
d5bf2653 EQ |
614 | * |
615 | * - When you have edited all of the states as needed, write "c" (commit) | |
616 | * to the file to commit your changes | |
617 | * | |
618 | * - If you want to reset to the default power levels, write "r" (reset) | |
619 | * to the file to reset them | |
620 | * | |
4e418c34 AD |
621 | */ |
622 | ||
e3933f26 RZ |
623 | static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev, |
624 | struct device_attribute *attr, | |
625 | const char *buf, | |
626 | size_t count) | |
627 | { | |
628 | struct drm_device *ddev = dev_get_drvdata(dev); | |
629 | struct amdgpu_device *adev = ddev->dev_private; | |
630 | int ret; | |
631 | uint32_t parameter_size = 0; | |
632 | long parameter[64]; | |
633 | char buf_cpy[128]; | |
634 | char *tmp_str; | |
635 | char *sub_str; | |
636 | const char delimiter[3] = {' ', '\n', '\0'}; | |
637 | uint32_t type; | |
638 | ||
639 | if (count > 127) | |
640 | return -EINVAL; | |
641 | ||
642 | if (*buf == 's') | |
643 | type = PP_OD_EDIT_SCLK_VDDC_TABLE; | |
644 | else if (*buf == 'm') | |
645 | type = PP_OD_EDIT_MCLK_VDDC_TABLE; | |
646 | else if(*buf == 'r') | |
647 | type = PP_OD_RESTORE_DEFAULT_TABLE; | |
648 | else if (*buf == 'c') | |
649 | type = PP_OD_COMMIT_DPM_TABLE; | |
d5bf2653 EQ |
650 | else if (!strncmp(buf, "vc", 2)) |
651 | type = PP_OD_EDIT_VDDC_CURVE; | |
e3933f26 RZ |
652 | else |
653 | return -EINVAL; | |
654 | ||
655 | memcpy(buf_cpy, buf, count+1); | |
656 | ||
657 | tmp_str = buf_cpy; | |
658 | ||
d5bf2653 EQ |
659 | if (type == PP_OD_EDIT_VDDC_CURVE) |
660 | tmp_str++; | |
e3933f26 RZ |
661 | while (isspace(*++tmp_str)); |
662 | ||
663 | while (tmp_str[0]) { | |
664 | sub_str = strsep(&tmp_str, delimiter); | |
665 | ret = kstrtol(sub_str, 0, ¶meter[parameter_size]); | |
666 | if (ret) | |
667 | return -EINVAL; | |
668 | parameter_size++; | |
669 | ||
670 | while (isspace(*tmp_str)) | |
671 | tmp_str++; | |
672 | } | |
673 | ||
e388cc47 LG |
674 | if (is_support_sw_smu(adev)) { |
675 | ret = smu_od_edit_dpm_table(&adev->smu, type, | |
676 | parameter, parameter_size); | |
e3933f26 | 677 | |
e388cc47 LG |
678 | if (ret) |
679 | return -EINVAL; | |
680 | } else { | |
681 | if (adev->powerplay.pp_funcs->odn_edit_dpm_table) | |
682 | ret = amdgpu_dpm_odn_edit_dpm_table(adev, type, | |
683 | parameter, parameter_size); | |
e3933f26 | 684 | |
e388cc47 | 685 | if (ret) |
e3933f26 | 686 | return -EINVAL; |
e388cc47 LG |
687 | |
688 | if (type == PP_OD_COMMIT_DPM_TABLE) { | |
689 | if (adev->powerplay.pp_funcs->dispatch_tasks) { | |
690 | amdgpu_dpm_dispatch_task(adev, | |
691 | AMD_PP_TASK_READJUST_POWER_STATE, | |
692 | NULL); | |
693 | return count; | |
694 | } else { | |
695 | return -EINVAL; | |
696 | } | |
e3933f26 RZ |
697 | } |
698 | } | |
699 | ||
700 | return count; | |
701 | } | |
702 | ||
703 | static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev, | |
704 | struct device_attribute *attr, | |
705 | char *buf) | |
706 | { | |
707 | struct drm_device *ddev = dev_get_drvdata(dev); | |
708 | struct amdgpu_device *adev = ddev->dev_private; | |
709 | uint32_t size = 0; | |
710 | ||
c4d74f53 LG |
711 | if (is_support_sw_smu(adev)) { |
712 | size = smu_print_clk_levels(&adev->smu, OD_SCLK, buf); | |
713 | size += smu_print_clk_levels(&adev->smu, OD_MCLK, buf+size); | |
714 | size += smu_print_clk_levels(&adev->smu, OD_VDDC_CURVE, buf+size); | |
715 | size += smu_print_clk_levels(&adev->smu, OD_RANGE, buf+size); | |
716 | return size; | |
717 | } else if (adev->powerplay.pp_funcs->print_clock_levels) { | |
e3933f26 RZ |
718 | size = amdgpu_dpm_print_clock_levels(adev, OD_SCLK, buf); |
719 | size += amdgpu_dpm_print_clock_levels(adev, OD_MCLK, buf+size); | |
d5bf2653 | 720 | size += amdgpu_dpm_print_clock_levels(adev, OD_VDDC_CURVE, buf+size); |
a3c991f9 | 721 | size += amdgpu_dpm_print_clock_levels(adev, OD_RANGE, buf+size); |
e3933f26 RZ |
722 | return size; |
723 | } else { | |
724 | return snprintf(buf, PAGE_SIZE, "\n"); | |
725 | } | |
726 | ||
727 | } | |
728 | ||
7ca881a8 EQ |
729 | /** |
730 | * DOC: ppfeatures | |
731 | * | |
732 | * The amdgpu driver provides a sysfs API for adjusting what powerplay | |
733 | * features to be enabled. The file ppfeatures is used for this. And | |
734 | * this is only available for Vega10 and later dGPUs. | |
735 | * | |
736 | * Reading back the file will show you the followings: | |
737 | * - Current ppfeature masks | |
738 | * - List of the all supported powerplay features with their naming, | |
739 | * bitmasks and enablement status('Y'/'N' means "enabled"/"disabled"). | |
740 | * | |
741 | * To manually enable or disable a specific feature, just set or clear | |
742 | * the corresponding bit from original ppfeature masks and input the | |
743 | * new ppfeature masks. | |
744 | */ | |
745 | static ssize_t amdgpu_set_ppfeature_status(struct device *dev, | |
746 | struct device_attribute *attr, | |
747 | const char *buf, | |
748 | size_t count) | |
749 | { | |
750 | struct drm_device *ddev = dev_get_drvdata(dev); | |
751 | struct amdgpu_device *adev = ddev->dev_private; | |
752 | uint64_t featuremask; | |
753 | int ret; | |
754 | ||
755 | ret = kstrtou64(buf, 0, &featuremask); | |
756 | if (ret) | |
757 | return -EINVAL; | |
758 | ||
759 | pr_debug("featuremask = 0x%llx\n", featuremask); | |
760 | ||
761 | if (adev->powerplay.pp_funcs->set_ppfeature_status) { | |
762 | ret = amdgpu_dpm_set_ppfeature_status(adev, featuremask); | |
763 | if (ret) | |
764 | return -EINVAL; | |
765 | } | |
766 | ||
767 | return count; | |
768 | } | |
769 | ||
770 | static ssize_t amdgpu_get_ppfeature_status(struct device *dev, | |
771 | struct device_attribute *attr, | |
772 | char *buf) | |
773 | { | |
774 | struct drm_device *ddev = dev_get_drvdata(dev); | |
775 | struct amdgpu_device *adev = ddev->dev_private; | |
776 | ||
777 | if (adev->powerplay.pp_funcs->get_ppfeature_status) | |
778 | return amdgpu_dpm_get_ppfeature_status(adev, buf); | |
779 | ||
780 | return snprintf(buf, PAGE_SIZE, "\n"); | |
781 | } | |
782 | ||
271dc908 | 783 | /** |
d7e28e2d EQ |
784 | * DOC: pp_dpm_sclk pp_dpm_mclk pp_dpm_socclk pp_dpm_fclk pp_dpm_dcefclk |
785 | * pp_dpm_pcie | |
271dc908 AD |
786 | * |
787 | * The amdgpu driver provides a sysfs API for adjusting what power levels | |
788 | * are enabled for a given power state. The files pp_dpm_sclk, pp_dpm_mclk, | |
d7e28e2d EQ |
789 | * pp_dpm_socclk, pp_dpm_fclk, pp_dpm_dcefclk and pp_dpm_pcie are used for |
790 | * this. | |
d7337ca2 | 791 | * |
d7e28e2d EQ |
792 | * pp_dpm_socclk and pp_dpm_dcefclk interfaces are only available for |
793 | * Vega10 and later ASICs. | |
828e37ef | 794 | * pp_dpm_fclk interface is only available for Vega20 and later ASICs. |
271dc908 AD |
795 | * |
796 | * Reading back the files will show you the available power levels within | |
797 | * the power state and the clock information for those levels. | |
798 | * | |
799 | * To manually adjust these states, first select manual using | |
48edde39 | 800 | * power_dpm_force_performance_level. |
801 | * Secondly,Enter a new value for each level by inputing a string that | |
802 | * contains " echo xx xx xx > pp_dpm_sclk/mclk/pcie" | |
803 | * E.g., echo 4 5 6 to > pp_dpm_sclk will enable sclk levels 4, 5, and 6. | |
d7e28e2d EQ |
804 | * |
805 | * NOTE: change to the dcefclk max dpm level is not supported now | |
271dc908 AD |
806 | */ |
807 | ||
f3898ea1 EH |
808 | static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev, |
809 | struct device_attribute *attr, | |
810 | char *buf) | |
811 | { | |
812 | struct drm_device *ddev = dev_get_drvdata(dev); | |
813 | struct amdgpu_device *adev = ddev->dev_private; | |
f3898ea1 | 814 | |
bb5a2bdf YT |
815 | if (amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev) && |
816 | adev->virt.ops->get_pp_clk) | |
817 | return adev->virt.ops->get_pp_clk(adev, PP_SCLK, buf); | |
818 | ||
dc8e3a0c | 819 | if (is_support_sw_smu(adev)) |
86ac8803 LG |
820 | return smu_print_clk_levels(&adev->smu, PP_SCLK, buf); |
821 | else if (adev->powerplay.pp_funcs->print_clock_levels) | |
cd4d7464 RZ |
822 | return amdgpu_dpm_print_clock_levels(adev, PP_SCLK, buf); |
823 | else | |
824 | return snprintf(buf, PAGE_SIZE, "\n"); | |
f3898ea1 EH |
825 | } |
826 | ||
4b4bd048 KC |
827 | /* |
828 | * Worst case: 32 bits individually specified, in octal at 12 characters | |
829 | * per line (+1 for \n). | |
830 | */ | |
831 | #define AMDGPU_MASK_BUF_MAX (32 * 13) | |
832 | ||
833 | static ssize_t amdgpu_read_mask(const char *buf, size_t count, uint32_t *mask) | |
f3898ea1 | 834 | { |
f3898ea1 EH |
835 | int ret; |
836 | long level; | |
48edde39 | 837 | char *sub_str = NULL; |
838 | char *tmp; | |
4b4bd048 | 839 | char buf_cpy[AMDGPU_MASK_BUF_MAX + 1]; |
48edde39 | 840 | const char delimiter[3] = {' ', '\n', '\0'}; |
4b4bd048 | 841 | size_t bytes; |
f3898ea1 | 842 | |
4b4bd048 KC |
843 | *mask = 0; |
844 | ||
845 | bytes = min(count, sizeof(buf_cpy) - 1); | |
846 | memcpy(buf_cpy, buf, bytes); | |
847 | buf_cpy[bytes] = '\0'; | |
48edde39 | 848 | tmp = buf_cpy; |
849 | while (tmp[0]) { | |
4b4bd048 | 850 | sub_str = strsep(&tmp, delimiter); |
48edde39 | 851 | if (strlen(sub_str)) { |
852 | ret = kstrtol(sub_str, 0, &level); | |
4b4bd048 KC |
853 | if (ret) |
854 | return -EINVAL; | |
855 | *mask |= 1 << level; | |
48edde39 | 856 | } else |
857 | break; | |
f3898ea1 | 858 | } |
4b4bd048 KC |
859 | |
860 | return 0; | |
861 | } | |
862 | ||
863 | static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev, | |
864 | struct device_attribute *attr, | |
865 | const char *buf, | |
866 | size_t count) | |
867 | { | |
868 | struct drm_device *ddev = dev_get_drvdata(dev); | |
869 | struct amdgpu_device *adev = ddev->dev_private; | |
870 | int ret; | |
871 | uint32_t mask = 0; | |
872 | ||
873 | ret = amdgpu_read_mask(buf, count, &mask); | |
874 | if (ret) | |
875 | return ret; | |
876 | ||
7292fd7d LG |
877 | if (is_support_sw_smu(adev)) |
878 | ret = smu_force_clk_levels(&adev->smu, PP_SCLK, mask); | |
879 | else if (adev->powerplay.pp_funcs->force_clock_level) | |
241dbbb1 EQ |
880 | ret = amdgpu_dpm_force_clock_level(adev, PP_SCLK, mask); |
881 | ||
882 | if (ret) | |
883 | return -EINVAL; | |
cd4d7464 | 884 | |
f3898ea1 EH |
885 | return count; |
886 | } | |
887 | ||
888 | static ssize_t amdgpu_get_pp_dpm_mclk(struct device *dev, | |
889 | struct device_attribute *attr, | |
890 | char *buf) | |
891 | { | |
892 | struct drm_device *ddev = dev_get_drvdata(dev); | |
893 | struct amdgpu_device *adev = ddev->dev_private; | |
f3898ea1 | 894 | |
dc8e3a0c | 895 | if (is_support_sw_smu(adev)) |
86ac8803 LG |
896 | return smu_print_clk_levels(&adev->smu, PP_MCLK, buf); |
897 | else if (adev->powerplay.pp_funcs->print_clock_levels) | |
cd4d7464 RZ |
898 | return amdgpu_dpm_print_clock_levels(adev, PP_MCLK, buf); |
899 | else | |
900 | return snprintf(buf, PAGE_SIZE, "\n"); | |
f3898ea1 EH |
901 | } |
902 | ||
903 | static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev, | |
904 | struct device_attribute *attr, | |
905 | const char *buf, | |
906 | size_t count) | |
907 | { | |
908 | struct drm_device *ddev = dev_get_drvdata(dev); | |
909 | struct amdgpu_device *adev = ddev->dev_private; | |
910 | int ret; | |
48edde39 | 911 | uint32_t mask = 0; |
f3898ea1 | 912 | |
4b4bd048 KC |
913 | ret = amdgpu_read_mask(buf, count, &mask); |
914 | if (ret) | |
915 | return ret; | |
48edde39 | 916 | |
7292fd7d LG |
917 | if (is_support_sw_smu(adev)) |
918 | ret = smu_force_clk_levels(&adev->smu, PP_MCLK, mask); | |
919 | else if (adev->powerplay.pp_funcs->force_clock_level) | |
241dbbb1 EQ |
920 | ret = amdgpu_dpm_force_clock_level(adev, PP_MCLK, mask); |
921 | ||
922 | if (ret) | |
923 | return -EINVAL; | |
cd4d7464 | 924 | |
f3898ea1 EH |
925 | return count; |
926 | } | |
927 | ||
d7337ca2 EQ |
928 | static ssize_t amdgpu_get_pp_dpm_socclk(struct device *dev, |
929 | struct device_attribute *attr, | |
930 | char *buf) | |
931 | { | |
932 | struct drm_device *ddev = dev_get_drvdata(dev); | |
933 | struct amdgpu_device *adev = ddev->dev_private; | |
934 | ||
09676101 LG |
935 | if (is_support_sw_smu(adev)) |
936 | return smu_print_clk_levels(&adev->smu, PP_SOCCLK, buf); | |
937 | else if (adev->powerplay.pp_funcs->print_clock_levels) | |
d7337ca2 EQ |
938 | return amdgpu_dpm_print_clock_levels(adev, PP_SOCCLK, buf); |
939 | else | |
940 | return snprintf(buf, PAGE_SIZE, "\n"); | |
941 | } | |
942 | ||
943 | static ssize_t amdgpu_set_pp_dpm_socclk(struct device *dev, | |
944 | struct device_attribute *attr, | |
945 | const char *buf, | |
946 | size_t count) | |
947 | { | |
948 | struct drm_device *ddev = dev_get_drvdata(dev); | |
949 | struct amdgpu_device *adev = ddev->dev_private; | |
950 | int ret; | |
951 | uint32_t mask = 0; | |
952 | ||
953 | ret = amdgpu_read_mask(buf, count, &mask); | |
954 | if (ret) | |
955 | return ret; | |
956 | ||
4b77faaf LG |
957 | if (is_support_sw_smu(adev)) |
958 | ret = smu_force_clk_levels(&adev->smu, PP_SOCCLK, mask); | |
959 | else if (adev->powerplay.pp_funcs->force_clock_level) | |
d7337ca2 EQ |
960 | ret = amdgpu_dpm_force_clock_level(adev, PP_SOCCLK, mask); |
961 | ||
962 | if (ret) | |
963 | return -EINVAL; | |
964 | ||
965 | return count; | |
966 | } | |
967 | ||
828e37ef EQ |
968 | static ssize_t amdgpu_get_pp_dpm_fclk(struct device *dev, |
969 | struct device_attribute *attr, | |
970 | char *buf) | |
971 | { | |
972 | struct drm_device *ddev = dev_get_drvdata(dev); | |
973 | struct amdgpu_device *adev = ddev->dev_private; | |
974 | ||
09676101 LG |
975 | if (is_support_sw_smu(adev)) |
976 | return smu_print_clk_levels(&adev->smu, PP_FCLK, buf); | |
977 | else if (adev->powerplay.pp_funcs->print_clock_levels) | |
828e37ef EQ |
978 | return amdgpu_dpm_print_clock_levels(adev, PP_FCLK, buf); |
979 | else | |
980 | return snprintf(buf, PAGE_SIZE, "\n"); | |
981 | } | |
982 | ||
983 | static ssize_t amdgpu_set_pp_dpm_fclk(struct device *dev, | |
984 | struct device_attribute *attr, | |
985 | const char *buf, | |
986 | size_t count) | |
987 | { | |
988 | struct drm_device *ddev = dev_get_drvdata(dev); | |
989 | struct amdgpu_device *adev = ddev->dev_private; | |
990 | int ret; | |
991 | uint32_t mask = 0; | |
992 | ||
993 | ret = amdgpu_read_mask(buf, count, &mask); | |
994 | if (ret) | |
995 | return ret; | |
996 | ||
4b77faaf LG |
997 | if (is_support_sw_smu(adev)) |
998 | ret = smu_force_clk_levels(&adev->smu, PP_FCLK, mask); | |
999 | else if (adev->powerplay.pp_funcs->force_clock_level) | |
828e37ef EQ |
1000 | ret = amdgpu_dpm_force_clock_level(adev, PP_FCLK, mask); |
1001 | ||
1002 | if (ret) | |
1003 | return -EINVAL; | |
1004 | ||
1005 | return count; | |
1006 | } | |
1007 | ||
d7e28e2d EQ |
1008 | static ssize_t amdgpu_get_pp_dpm_dcefclk(struct device *dev, |
1009 | struct device_attribute *attr, | |
1010 | char *buf) | |
1011 | { | |
1012 | struct drm_device *ddev = dev_get_drvdata(dev); | |
1013 | struct amdgpu_device *adev = ddev->dev_private; | |
1014 | ||
09676101 LG |
1015 | if (is_support_sw_smu(adev)) |
1016 | return smu_print_clk_levels(&adev->smu, PP_DCEFCLK, buf); | |
1017 | else if (adev->powerplay.pp_funcs->print_clock_levels) | |
d7e28e2d EQ |
1018 | return amdgpu_dpm_print_clock_levels(adev, PP_DCEFCLK, buf); |
1019 | else | |
1020 | return snprintf(buf, PAGE_SIZE, "\n"); | |
1021 | } | |
1022 | ||
1023 | static ssize_t amdgpu_set_pp_dpm_dcefclk(struct device *dev, | |
1024 | struct device_attribute *attr, | |
1025 | const char *buf, | |
1026 | size_t count) | |
1027 | { | |
1028 | struct drm_device *ddev = dev_get_drvdata(dev); | |
1029 | struct amdgpu_device *adev = ddev->dev_private; | |
1030 | int ret; | |
1031 | uint32_t mask = 0; | |
1032 | ||
1033 | ret = amdgpu_read_mask(buf, count, &mask); | |
1034 | if (ret) | |
1035 | return ret; | |
1036 | ||
4b77faaf LG |
1037 | if (is_support_sw_smu(adev)) |
1038 | ret = smu_force_clk_levels(&adev->smu, PP_DCEFCLK, mask); | |
1039 | else if (adev->powerplay.pp_funcs->force_clock_level) | |
d7e28e2d EQ |
1040 | ret = amdgpu_dpm_force_clock_level(adev, PP_DCEFCLK, mask); |
1041 | ||
1042 | if (ret) | |
1043 | return -EINVAL; | |
1044 | ||
1045 | return count; | |
1046 | } | |
1047 | ||
f3898ea1 EH |
1048 | static ssize_t amdgpu_get_pp_dpm_pcie(struct device *dev, |
1049 | struct device_attribute *attr, | |
1050 | char *buf) | |
1051 | { | |
1052 | struct drm_device *ddev = dev_get_drvdata(dev); | |
1053 | struct amdgpu_device *adev = ddev->dev_private; | |
f3898ea1 | 1054 | |
dfbd1187 LG |
1055 | if (is_support_sw_smu(adev)) |
1056 | return smu_print_clk_levels(&adev->smu, PP_PCIE, buf); | |
1057 | else if (adev->powerplay.pp_funcs->print_clock_levels) | |
cd4d7464 RZ |
1058 | return amdgpu_dpm_print_clock_levels(adev, PP_PCIE, buf); |
1059 | else | |
1060 | return snprintf(buf, PAGE_SIZE, "\n"); | |
f3898ea1 EH |
1061 | } |
1062 | ||
1063 | static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev, | |
1064 | struct device_attribute *attr, | |
1065 | const char *buf, | |
1066 | size_t count) | |
1067 | { | |
1068 | struct drm_device *ddev = dev_get_drvdata(dev); | |
1069 | struct amdgpu_device *adev = ddev->dev_private; | |
1070 | int ret; | |
48edde39 | 1071 | uint32_t mask = 0; |
f3898ea1 | 1072 | |
4b4bd048 KC |
1073 | ret = amdgpu_read_mask(buf, count, &mask); |
1074 | if (ret) | |
1075 | return ret; | |
48edde39 | 1076 | |
dfbd1187 LG |
1077 | if (is_support_sw_smu(adev)) |
1078 | ret = smu_force_clk_levels(&adev->smu, PP_PCIE, mask); | |
1079 | else if (adev->powerplay.pp_funcs->force_clock_level) | |
241dbbb1 EQ |
1080 | ret = amdgpu_dpm_force_clock_level(adev, PP_PCIE, mask); |
1081 | ||
1082 | if (ret) | |
1083 | return -EINVAL; | |
cd4d7464 | 1084 | |
f3898ea1 EH |
1085 | return count; |
1086 | } | |
1087 | ||
428bafa8 EH |
1088 | static ssize_t amdgpu_get_pp_sclk_od(struct device *dev, |
1089 | struct device_attribute *attr, | |
1090 | char *buf) | |
1091 | { | |
1092 | struct drm_device *ddev = dev_get_drvdata(dev); | |
1093 | struct amdgpu_device *adev = ddev->dev_private; | |
1094 | uint32_t value = 0; | |
1095 | ||
6d7c8302 LG |
1096 | if (is_support_sw_smu(adev)) |
1097 | value = smu_get_od_percentage(&(adev->smu), OD_SCLK); | |
1098 | else if (adev->powerplay.pp_funcs->get_sclk_od) | |
428bafa8 EH |
1099 | value = amdgpu_dpm_get_sclk_od(adev); |
1100 | ||
1101 | return snprintf(buf, PAGE_SIZE, "%d\n", value); | |
1102 | } | |
1103 | ||
1104 | static ssize_t amdgpu_set_pp_sclk_od(struct device *dev, | |
1105 | struct device_attribute *attr, | |
1106 | const char *buf, | |
1107 | size_t count) | |
1108 | { | |
1109 | struct drm_device *ddev = dev_get_drvdata(dev); | |
1110 | struct amdgpu_device *adev = ddev->dev_private; | |
1111 | int ret; | |
1112 | long int value; | |
1113 | ||
1114 | ret = kstrtol(buf, 0, &value); | |
1115 | ||
1116 | if (ret) { | |
1117 | count = -EINVAL; | |
1118 | goto fail; | |
1119 | } | |
1120 | ||
e9c5b46e LG |
1121 | if (is_support_sw_smu(adev)) { |
1122 | value = smu_set_od_percentage(&(adev->smu), OD_SCLK, (uint32_t)value); | |
cd4d7464 | 1123 | } else { |
e9c5b46e LG |
1124 | if (adev->powerplay.pp_funcs->set_sclk_od) |
1125 | amdgpu_dpm_set_sclk_od(adev, (uint32_t)value); | |
1126 | ||
1127 | if (adev->powerplay.pp_funcs->dispatch_tasks) { | |
1128 | amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL); | |
1129 | } else { | |
1130 | adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps; | |
1131 | amdgpu_pm_compute_clocks(adev); | |
1132 | } | |
8b2e574d | 1133 | } |
428bafa8 EH |
1134 | |
1135 | fail: | |
1136 | return count; | |
1137 | } | |
1138 | ||
f2bdc05f EH |
1139 | static ssize_t amdgpu_get_pp_mclk_od(struct device *dev, |
1140 | struct device_attribute *attr, | |
1141 | char *buf) | |
1142 | { | |
1143 | struct drm_device *ddev = dev_get_drvdata(dev); | |
1144 | struct amdgpu_device *adev = ddev->dev_private; | |
1145 | uint32_t value = 0; | |
1146 | ||
6d7c8302 LG |
1147 | if (is_support_sw_smu(adev)) |
1148 | value = smu_get_od_percentage(&(adev->smu), OD_MCLK); | |
1149 | else if (adev->powerplay.pp_funcs->get_mclk_od) | |
f2bdc05f | 1150 | value = amdgpu_dpm_get_mclk_od(adev); |
f2bdc05f EH |
1151 | |
1152 | return snprintf(buf, PAGE_SIZE, "%d\n", value); | |
1153 | } | |
1154 | ||
1155 | static ssize_t amdgpu_set_pp_mclk_od(struct device *dev, | |
1156 | struct device_attribute *attr, | |
1157 | const char *buf, | |
1158 | size_t count) | |
1159 | { | |
1160 | struct drm_device *ddev = dev_get_drvdata(dev); | |
1161 | struct amdgpu_device *adev = ddev->dev_private; | |
1162 | int ret; | |
1163 | long int value; | |
1164 | ||
1165 | ret = kstrtol(buf, 0, &value); | |
1166 | ||
1167 | if (ret) { | |
1168 | count = -EINVAL; | |
1169 | goto fail; | |
1170 | } | |
1171 | ||
e9c5b46e LG |
1172 | if (is_support_sw_smu(adev)) { |
1173 | value = smu_set_od_percentage(&(adev->smu), OD_MCLK, (uint32_t)value); | |
cd4d7464 | 1174 | } else { |
e9c5b46e LG |
1175 | if (adev->powerplay.pp_funcs->set_mclk_od) |
1176 | amdgpu_dpm_set_mclk_od(adev, (uint32_t)value); | |
1177 | ||
1178 | if (adev->powerplay.pp_funcs->dispatch_tasks) { | |
1179 | amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL); | |
1180 | } else { | |
1181 | adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps; | |
1182 | amdgpu_pm_compute_clocks(adev); | |
1183 | } | |
f2bdc05f EH |
1184 | } |
1185 | ||
1186 | fail: | |
1187 | return count; | |
1188 | } | |
1189 | ||
6b2576f5 AD |
1190 | /** |
1191 | * DOC: pp_power_profile_mode | |
1192 | * | |
1193 | * The amdgpu driver provides a sysfs API for adjusting the heuristics | |
1194 | * related to switching between power levels in a power state. The file | |
1195 | * pp_power_profile_mode is used for this. | |
1196 | * | |
1197 | * Reading this file outputs a list of all of the predefined power profiles | |
1198 | * and the relevant heuristics settings for that profile. | |
1199 | * | |
1200 | * To select a profile or create a custom profile, first select manual using | |
1201 | * power_dpm_force_performance_level. Writing the number of a predefined | |
1202 | * profile to pp_power_profile_mode will enable those heuristics. To | |
1203 | * create a custom set of heuristics, write a string of numbers to the file | |
1204 | * starting with the number of the custom profile along with a setting | |
1205 | * for each heuristic parameter. Due to differences across asic families | |
1206 | * the heuristic parameters vary from family to family. | |
1207 | * | |
1208 | */ | |
1209 | ||
37c5c4db RZ |
1210 | static ssize_t amdgpu_get_pp_power_profile_mode(struct device *dev, |
1211 | struct device_attribute *attr, | |
1212 | char *buf) | |
1213 | { | |
1214 | struct drm_device *ddev = dev_get_drvdata(dev); | |
1215 | struct amdgpu_device *adev = ddev->dev_private; | |
1216 | ||
16177fd0 CG |
1217 | if (is_support_sw_smu(adev)) |
1218 | return smu_get_power_profile_mode(&adev->smu, buf); | |
1219 | else if (adev->powerplay.pp_funcs->get_power_profile_mode) | |
37c5c4db RZ |
1220 | return amdgpu_dpm_get_power_profile_mode(adev, buf); |
1221 | ||
1222 | return snprintf(buf, PAGE_SIZE, "\n"); | |
1223 | } | |
1224 | ||
1225 | ||
1226 | static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev, | |
1227 | struct device_attribute *attr, | |
1228 | const char *buf, | |
1229 | size_t count) | |
1230 | { | |
1231 | int ret = 0xff; | |
1232 | struct drm_device *ddev = dev_get_drvdata(dev); | |
1233 | struct amdgpu_device *adev = ddev->dev_private; | |
1234 | uint32_t parameter_size = 0; | |
1235 | long parameter[64]; | |
1236 | char *sub_str, buf_cpy[128]; | |
1237 | char *tmp_str; | |
1238 | uint32_t i = 0; | |
1239 | char tmp[2]; | |
1240 | long int profile_mode = 0; | |
1241 | const char delimiter[3] = {' ', '\n', '\0'}; | |
1242 | ||
1243 | tmp[0] = *(buf); | |
1244 | tmp[1] = '\0'; | |
1245 | ret = kstrtol(tmp, 0, &profile_mode); | |
1246 | if (ret) | |
1247 | goto fail; | |
1248 | ||
1249 | if (profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) { | |
1250 | if (count < 2 || count > 127) | |
1251 | return -EINVAL; | |
1252 | while (isspace(*++buf)) | |
1253 | i++; | |
1254 | memcpy(buf_cpy, buf, count-i); | |
1255 | tmp_str = buf_cpy; | |
1256 | while (tmp_str[0]) { | |
1257 | sub_str = strsep(&tmp_str, delimiter); | |
1258 | ret = kstrtol(sub_str, 0, ¶meter[parameter_size]); | |
1259 | if (ret) { | |
1260 | count = -EINVAL; | |
1261 | goto fail; | |
1262 | } | |
37c5c4db RZ |
1263 | parameter_size++; |
1264 | while (isspace(*tmp_str)) | |
1265 | tmp_str++; | |
1266 | } | |
1267 | } | |
1268 | parameter[parameter_size] = profile_mode; | |
16177fd0 CG |
1269 | if (is_support_sw_smu(adev)) |
1270 | ret = smu_set_power_profile_mode(&adev->smu, parameter, parameter_size); | |
1271 | else if (adev->powerplay.pp_funcs->set_power_profile_mode) | |
37c5c4db | 1272 | ret = amdgpu_dpm_set_power_profile_mode(adev, parameter, parameter_size); |
37c5c4db RZ |
1273 | if (!ret) |
1274 | return count; | |
1275 | fail: | |
1276 | return -EINVAL; | |
1277 | } | |
1278 | ||
b374d82d TSD |
1279 | /** |
1280 | * DOC: busy_percent | |
1281 | * | |
1282 | * The amdgpu driver provides a sysfs API for reading how busy the GPU | |
1283 | * is as a percentage. The file gpu_busy_percent is used for this. | |
1284 | * The SMU firmware computes a percentage of load based on the | |
1285 | * aggregate activity level in the IP cores. | |
1286 | */ | |
1287 | static ssize_t amdgpu_get_busy_percent(struct device *dev, | |
1288 | struct device_attribute *attr, | |
1289 | char *buf) | |
1290 | { | |
1291 | struct drm_device *ddev = dev_get_drvdata(dev); | |
1292 | struct amdgpu_device *adev = ddev->dev_private; | |
1293 | int r, value, size = sizeof(value); | |
1294 | ||
b374d82d TSD |
1295 | /* read the IP busy sensor */ |
1296 | r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD, | |
1297 | (void *)&value, &size); | |
4a5a2de6 | 1298 | |
b374d82d TSD |
1299 | if (r) |
1300 | return r; | |
1301 | ||
1302 | return snprintf(buf, PAGE_SIZE, "%d\n", value); | |
1303 | } | |
1304 | ||
b45e18ac KR |
1305 | /** |
1306 | * DOC: pcie_bw | |
1307 | * | |
1308 | * The amdgpu driver provides a sysfs API for estimating how much data | |
1309 | * has been received and sent by the GPU in the last second through PCIe. | |
1310 | * The file pcie_bw is used for this. | |
1311 | * The Perf counters count the number of received and sent messages and return | |
1312 | * those values, as well as the maximum payload size of a PCIe packet (mps). | |
1313 | * Note that it is not possible to easily and quickly obtain the size of each | |
1314 | * packet transmitted, so we output the max payload size (mps) to allow for | |
1315 | * quick estimation of the PCIe bandwidth usage | |
1316 | */ | |
1317 | static ssize_t amdgpu_get_pcie_bw(struct device *dev, | |
1318 | struct device_attribute *attr, | |
1319 | char *buf) | |
1320 | { | |
1321 | struct drm_device *ddev = dev_get_drvdata(dev); | |
1322 | struct amdgpu_device *adev = ddev->dev_private; | |
1323 | uint64_t count0, count1; | |
1324 | ||
1325 | amdgpu_asic_get_pcie_usage(adev, &count0, &count1); | |
1326 | return snprintf(buf, PAGE_SIZE, "%llu %llu %i\n", | |
1327 | count0, count1, pcie_get_mps(adev->pdev)); | |
1328 | } | |
1329 | ||
d38ceaf9 AD |
1330 | static DEVICE_ATTR(power_dpm_state, S_IRUGO | S_IWUSR, amdgpu_get_dpm_state, amdgpu_set_dpm_state); |
1331 | static DEVICE_ATTR(power_dpm_force_performance_level, S_IRUGO | S_IWUSR, | |
1332 | amdgpu_get_dpm_forced_performance_level, | |
1333 | amdgpu_set_dpm_forced_performance_level); | |
f3898ea1 EH |
1334 | static DEVICE_ATTR(pp_num_states, S_IRUGO, amdgpu_get_pp_num_states, NULL); |
1335 | static DEVICE_ATTR(pp_cur_state, S_IRUGO, amdgpu_get_pp_cur_state, NULL); | |
1336 | static DEVICE_ATTR(pp_force_state, S_IRUGO | S_IWUSR, | |
1337 | amdgpu_get_pp_force_state, | |
1338 | amdgpu_set_pp_force_state); | |
1339 | static DEVICE_ATTR(pp_table, S_IRUGO | S_IWUSR, | |
1340 | amdgpu_get_pp_table, | |
1341 | amdgpu_set_pp_table); | |
1342 | static DEVICE_ATTR(pp_dpm_sclk, S_IRUGO | S_IWUSR, | |
1343 | amdgpu_get_pp_dpm_sclk, | |
1344 | amdgpu_set_pp_dpm_sclk); | |
1345 | static DEVICE_ATTR(pp_dpm_mclk, S_IRUGO | S_IWUSR, | |
1346 | amdgpu_get_pp_dpm_mclk, | |
1347 | amdgpu_set_pp_dpm_mclk); | |
d7337ca2 EQ |
1348 | static DEVICE_ATTR(pp_dpm_socclk, S_IRUGO | S_IWUSR, |
1349 | amdgpu_get_pp_dpm_socclk, | |
1350 | amdgpu_set_pp_dpm_socclk); | |
828e37ef EQ |
1351 | static DEVICE_ATTR(pp_dpm_fclk, S_IRUGO | S_IWUSR, |
1352 | amdgpu_get_pp_dpm_fclk, | |
1353 | amdgpu_set_pp_dpm_fclk); | |
d7e28e2d EQ |
1354 | static DEVICE_ATTR(pp_dpm_dcefclk, S_IRUGO | S_IWUSR, |
1355 | amdgpu_get_pp_dpm_dcefclk, | |
1356 | amdgpu_set_pp_dpm_dcefclk); | |
f3898ea1 EH |
1357 | static DEVICE_ATTR(pp_dpm_pcie, S_IRUGO | S_IWUSR, |
1358 | amdgpu_get_pp_dpm_pcie, | |
1359 | amdgpu_set_pp_dpm_pcie); | |
428bafa8 EH |
1360 | static DEVICE_ATTR(pp_sclk_od, S_IRUGO | S_IWUSR, |
1361 | amdgpu_get_pp_sclk_od, | |
1362 | amdgpu_set_pp_sclk_od); | |
f2bdc05f EH |
1363 | static DEVICE_ATTR(pp_mclk_od, S_IRUGO | S_IWUSR, |
1364 | amdgpu_get_pp_mclk_od, | |
1365 | amdgpu_set_pp_mclk_od); | |
37c5c4db RZ |
1366 | static DEVICE_ATTR(pp_power_profile_mode, S_IRUGO | S_IWUSR, |
1367 | amdgpu_get_pp_power_profile_mode, | |
1368 | amdgpu_set_pp_power_profile_mode); | |
e3933f26 RZ |
1369 | static DEVICE_ATTR(pp_od_clk_voltage, S_IRUGO | S_IWUSR, |
1370 | amdgpu_get_pp_od_clk_voltage, | |
1371 | amdgpu_set_pp_od_clk_voltage); | |
b374d82d TSD |
1372 | static DEVICE_ATTR(gpu_busy_percent, S_IRUGO, |
1373 | amdgpu_get_busy_percent, NULL); | |
b45e18ac | 1374 | static DEVICE_ATTR(pcie_bw, S_IRUGO, amdgpu_get_pcie_bw, NULL); |
7ca881a8 EQ |
1375 | static DEVICE_ATTR(ppfeatures, S_IRUGO | S_IWUSR, |
1376 | amdgpu_get_ppfeature_status, | |
1377 | amdgpu_set_ppfeature_status); | |
e3933f26 | 1378 | |
d38ceaf9 AD |
1379 | static ssize_t amdgpu_hwmon_show_temp(struct device *dev, |
1380 | struct device_attribute *attr, | |
1381 | char *buf) | |
1382 | { | |
1383 | struct amdgpu_device *adev = dev_get_drvdata(dev); | |
0c67df48 | 1384 | struct drm_device *ddev = adev->ddev; |
71c9b9ad | 1385 | int r, temp, size = sizeof(temp); |
d38ceaf9 | 1386 | |
0c67df48 AD |
1387 | /* Can't get temperature when the card is off */ |
1388 | if ((adev->flags & AMD_IS_PX) && | |
1389 | (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) | |
1390 | return -EINVAL; | |
1391 | ||
71c9b9ad AD |
1392 | /* get the temperature */ |
1393 | r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP, | |
1394 | (void *)&temp, &size); | |
1395 | if (r) | |
1396 | return r; | |
d38ceaf9 AD |
1397 | |
1398 | return snprintf(buf, PAGE_SIZE, "%d\n", temp); | |
1399 | } | |
1400 | ||
1401 | static ssize_t amdgpu_hwmon_show_temp_thresh(struct device *dev, | |
1402 | struct device_attribute *attr, | |
1403 | char *buf) | |
1404 | { | |
1405 | struct amdgpu_device *adev = dev_get_drvdata(dev); | |
1406 | int hyst = to_sensor_dev_attr(attr)->index; | |
1407 | int temp; | |
1408 | ||
1409 | if (hyst) | |
1410 | temp = adev->pm.dpm.thermal.min_temp; | |
1411 | else | |
1412 | temp = adev->pm.dpm.thermal.max_temp; | |
1413 | ||
1414 | return snprintf(buf, PAGE_SIZE, "%d\n", temp); | |
1415 | } | |
1416 | ||
1417 | static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev, | |
1418 | struct device_attribute *attr, | |
1419 | char *buf) | |
1420 | { | |
1421 | struct amdgpu_device *adev = dev_get_drvdata(dev); | |
1422 | u32 pwm_mode = 0; | |
a76ff5af CG |
1423 | if (is_support_sw_smu(adev)) { |
1424 | pwm_mode = smu_get_fan_control_mode(&adev->smu); | |
1425 | } else { | |
1426 | if (!adev->powerplay.pp_funcs->get_fan_control_mode) | |
1427 | return -EINVAL; | |
d38ceaf9 | 1428 | |
a76ff5af CG |
1429 | pwm_mode = amdgpu_dpm_get_fan_control_mode(adev); |
1430 | } | |
d38ceaf9 | 1431 | |
aad22ca4 | 1432 | return sprintf(buf, "%i\n", pwm_mode); |
d38ceaf9 AD |
1433 | } |
1434 | ||
1435 | static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev, | |
1436 | struct device_attribute *attr, | |
1437 | const char *buf, | |
1438 | size_t count) | |
1439 | { | |
1440 | struct amdgpu_device *adev = dev_get_drvdata(dev); | |
1441 | int err; | |
1442 | int value; | |
1443 | ||
5ec36e2d AD |
1444 | /* Can't adjust fan when the card is off */ |
1445 | if ((adev->flags & AMD_IS_PX) && | |
1446 | (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON)) | |
1447 | return -EINVAL; | |
1448 | ||
a76ff5af CG |
1449 | if (is_support_sw_smu(adev)) { |
1450 | err = kstrtoint(buf, 10, &value); | |
1451 | if (err) | |
1452 | return err; | |
1453 | ||
1454 | smu_set_fan_control_mode(&adev->smu, value); | |
1455 | } else { | |
1456 | if (!adev->powerplay.pp_funcs->set_fan_control_mode) | |
1457 | return -EINVAL; | |
d38ceaf9 | 1458 | |
a76ff5af CG |
1459 | err = kstrtoint(buf, 10, &value); |
1460 | if (err) | |
1461 | return err; | |
d38ceaf9 | 1462 | |
a76ff5af CG |
1463 | amdgpu_dpm_set_fan_control_mode(adev, value); |
1464 | } | |
d38ceaf9 AD |
1465 | |
1466 | return count; | |
1467 | } | |
1468 | ||
1469 | static ssize_t amdgpu_hwmon_get_pwm1_min(struct device *dev, | |
1470 | struct device_attribute *attr, | |
1471 | char *buf) | |
1472 | { | |
1473 | return sprintf(buf, "%i\n", 0); | |
1474 | } | |
1475 | ||
1476 | static ssize_t amdgpu_hwmon_get_pwm1_max(struct device *dev, | |
1477 | struct device_attribute *attr, | |
1478 | char *buf) | |
1479 | { | |
1480 | return sprintf(buf, "%i\n", 255); | |
1481 | } | |
1482 | ||
1483 | static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev, | |
1484 | struct device_attribute *attr, | |
1485 | const char *buf, size_t count) | |
1486 | { | |
1487 | struct amdgpu_device *adev = dev_get_drvdata(dev); | |
1488 | int err; | |
1489 | u32 value; | |
b8a9c003 | 1490 | u32 pwm_mode; |
d38ceaf9 | 1491 | |
5ec36e2d AD |
1492 | /* Can't adjust fan when the card is off */ |
1493 | if ((adev->flags & AMD_IS_PX) && | |
1494 | (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON)) | |
1495 | return -EINVAL; | |
008a9524 CG |
1496 | if (is_support_sw_smu(adev)) |
1497 | pwm_mode = smu_get_fan_control_mode(&adev->smu); | |
1498 | else | |
1499 | pwm_mode = amdgpu_dpm_get_fan_control_mode(adev); | |
b8a9c003 RZ |
1500 | if (pwm_mode != AMD_FAN_CTRL_MANUAL) { |
1501 | pr_info("manual fan speed control should be enabled first\n"); | |
1502 | return -EINVAL; | |
1503 | } | |
1504 | ||
d38ceaf9 AD |
1505 | err = kstrtou32(buf, 10, &value); |
1506 | if (err) | |
1507 | return err; | |
1508 | ||
1509 | value = (value * 100) / 255; | |
1510 | ||
008a9524 CG |
1511 | if (is_support_sw_smu(adev)) { |
1512 | err = smu_set_fan_speed_percent(&adev->smu, value); | |
1513 | if (err) | |
1514 | return err; | |
1515 | } else if (adev->powerplay.pp_funcs->set_fan_speed_percent) { | |
cd4d7464 RZ |
1516 | err = amdgpu_dpm_set_fan_speed_percent(adev, value); |
1517 | if (err) | |
1518 | return err; | |
1519 | } | |
d38ceaf9 AD |
1520 | |
1521 | return count; | |
1522 | } | |
1523 | ||
1524 | static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev, | |
1525 | struct device_attribute *attr, | |
1526 | char *buf) | |
1527 | { | |
1528 | struct amdgpu_device *adev = dev_get_drvdata(dev); | |
1529 | int err; | |
cd4d7464 | 1530 | u32 speed = 0; |
d38ceaf9 | 1531 | |
5ec36e2d AD |
1532 | /* Can't adjust fan when the card is off */ |
1533 | if ((adev->flags & AMD_IS_PX) && | |
1534 | (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON)) | |
1535 | return -EINVAL; | |
1536 | ||
008a9524 CG |
1537 | if (is_support_sw_smu(adev)) { |
1538 | err = smu_get_fan_speed_percent(&adev->smu, &speed); | |
1539 | if (err) | |
1540 | return err; | |
1541 | } else if (adev->powerplay.pp_funcs->get_fan_speed_percent) { | |
cd4d7464 RZ |
1542 | err = amdgpu_dpm_get_fan_speed_percent(adev, &speed); |
1543 | if (err) | |
1544 | return err; | |
1545 | } | |
d38ceaf9 AD |
1546 | |
1547 | speed = (speed * 255) / 100; | |
1548 | ||
1549 | return sprintf(buf, "%i\n", speed); | |
1550 | } | |
1551 | ||
81c1514b GI |
1552 | static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev, |
1553 | struct device_attribute *attr, | |
1554 | char *buf) | |
1555 | { | |
1556 | struct amdgpu_device *adev = dev_get_drvdata(dev); | |
1557 | int err; | |
cd4d7464 | 1558 | u32 speed = 0; |
81c1514b | 1559 | |
5ec36e2d AD |
1560 | /* Can't adjust fan when the card is off */ |
1561 | if ((adev->flags & AMD_IS_PX) && | |
1562 | (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON)) | |
1563 | return -EINVAL; | |
1564 | ||
3ac4ffdd LG |
1565 | if (is_support_sw_smu(adev)) { |
1566 | err = smu_get_current_rpm(&adev->smu, &speed); | |
1567 | if (err) | |
1568 | return err; | |
1569 | } else if (adev->powerplay.pp_funcs->get_fan_speed_rpm) { | |
cd4d7464 RZ |
1570 | err = amdgpu_dpm_get_fan_speed_rpm(adev, &speed); |
1571 | if (err) | |
1572 | return err; | |
1573 | } | |
81c1514b GI |
1574 | |
1575 | return sprintf(buf, "%i\n", speed); | |
1576 | } | |
1577 | ||
c2870527 RZ |
1578 | static ssize_t amdgpu_hwmon_get_fan1_min(struct device *dev, |
1579 | struct device_attribute *attr, | |
1580 | char *buf) | |
1581 | { | |
1582 | struct amdgpu_device *adev = dev_get_drvdata(dev); | |
1583 | u32 min_rpm = 0; | |
1584 | u32 size = sizeof(min_rpm); | |
1585 | int r; | |
1586 | ||
c2870527 RZ |
1587 | r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MIN_FAN_RPM, |
1588 | (void *)&min_rpm, &size); | |
1589 | if (r) | |
1590 | return r; | |
1591 | ||
1592 | return snprintf(buf, PAGE_SIZE, "%d\n", min_rpm); | |
1593 | } | |
1594 | ||
1595 | static ssize_t amdgpu_hwmon_get_fan1_max(struct device *dev, | |
1596 | struct device_attribute *attr, | |
1597 | char *buf) | |
1598 | { | |
1599 | struct amdgpu_device *adev = dev_get_drvdata(dev); | |
1600 | u32 max_rpm = 0; | |
1601 | u32 size = sizeof(max_rpm); | |
1602 | int r; | |
1603 | ||
c2870527 RZ |
1604 | r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MAX_FAN_RPM, |
1605 | (void *)&max_rpm, &size); | |
1606 | if (r) | |
1607 | return r; | |
1608 | ||
1609 | return snprintf(buf, PAGE_SIZE, "%d\n", max_rpm); | |
1610 | } | |
1611 | ||
1612 | static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev, | |
1613 | struct device_attribute *attr, | |
1614 | char *buf) | |
1615 | { | |
1616 | struct amdgpu_device *adev = dev_get_drvdata(dev); | |
1617 | int err; | |
1618 | u32 rpm = 0; | |
c2870527 RZ |
1619 | |
1620 | /* Can't adjust fan when the card is off */ | |
1621 | if ((adev->flags & AMD_IS_PX) && | |
1622 | (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON)) | |
1623 | return -EINVAL; | |
1624 | ||
3ac4ffdd LG |
1625 | if (is_support_sw_smu(adev)) { |
1626 | err = smu_get_current_rpm(&adev->smu, &rpm); | |
1627 | if (err) | |
1628 | return err; | |
1629 | } else if (adev->powerplay.pp_funcs->get_fan_speed_rpm) { | |
c2870527 RZ |
1630 | err = amdgpu_dpm_get_fan_speed_rpm(adev, &rpm); |
1631 | if (err) | |
1632 | return err; | |
1633 | } | |
1634 | ||
1635 | return sprintf(buf, "%i\n", rpm); | |
1636 | } | |
1637 | ||
1638 | static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev, | |
1639 | struct device_attribute *attr, | |
1640 | const char *buf, size_t count) | |
1641 | { | |
1642 | struct amdgpu_device *adev = dev_get_drvdata(dev); | |
1643 | int err; | |
1644 | u32 value; | |
1645 | u32 pwm_mode; | |
1646 | ||
96026ce0 LG |
1647 | if (is_support_sw_smu(adev)) |
1648 | pwm_mode = smu_get_fan_control_mode(&adev->smu); | |
1649 | else | |
1650 | pwm_mode = amdgpu_dpm_get_fan_control_mode(adev); | |
1651 | ||
c2870527 RZ |
1652 | if (pwm_mode != AMD_FAN_CTRL_MANUAL) |
1653 | return -ENODATA; | |
1654 | ||
1655 | /* Can't adjust fan when the card is off */ | |
1656 | if ((adev->flags & AMD_IS_PX) && | |
1657 | (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON)) | |
1658 | return -EINVAL; | |
1659 | ||
1660 | err = kstrtou32(buf, 10, &value); | |
1661 | if (err) | |
1662 | return err; | |
1663 | ||
96026ce0 LG |
1664 | if (is_support_sw_smu(adev)) { |
1665 | err = smu_set_fan_speed_rpm(&adev->smu, value); | |
1666 | if (err) | |
1667 | return err; | |
1668 | } else if (adev->powerplay.pp_funcs->set_fan_speed_rpm) { | |
c2870527 RZ |
1669 | err = amdgpu_dpm_set_fan_speed_rpm(adev, value); |
1670 | if (err) | |
1671 | return err; | |
1672 | } | |
1673 | ||
1674 | return count; | |
1675 | } | |
1676 | ||
1677 | static ssize_t amdgpu_hwmon_get_fan1_enable(struct device *dev, | |
1678 | struct device_attribute *attr, | |
1679 | char *buf) | |
1680 | { | |
1681 | struct amdgpu_device *adev = dev_get_drvdata(dev); | |
1682 | u32 pwm_mode = 0; | |
1683 | ||
da5f18e8 CG |
1684 | if (is_support_sw_smu(adev)) { |
1685 | pwm_mode = smu_get_fan_control_mode(&adev->smu); | |
1686 | } else { | |
1687 | if (!adev->powerplay.pp_funcs->get_fan_control_mode) | |
1688 | return -EINVAL; | |
c2870527 | 1689 | |
da5f18e8 CG |
1690 | pwm_mode = amdgpu_dpm_get_fan_control_mode(adev); |
1691 | } | |
c2870527 RZ |
1692 | return sprintf(buf, "%i\n", pwm_mode == AMD_FAN_CTRL_AUTO ? 0 : 1); |
1693 | } | |
1694 | ||
1695 | static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev, | |
1696 | struct device_attribute *attr, | |
1697 | const char *buf, | |
1698 | size_t count) | |
1699 | { | |
1700 | struct amdgpu_device *adev = dev_get_drvdata(dev); | |
1701 | int err; | |
1702 | int value; | |
1703 | u32 pwm_mode; | |
1704 | ||
1705 | /* Can't adjust fan when the card is off */ | |
1706 | if ((adev->flags & AMD_IS_PX) && | |
1707 | (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON)) | |
1708 | return -EINVAL; | |
1709 | ||
c2870527 RZ |
1710 | |
1711 | err = kstrtoint(buf, 10, &value); | |
1712 | if (err) | |
1713 | return err; | |
1714 | ||
1715 | if (value == 0) | |
1716 | pwm_mode = AMD_FAN_CTRL_AUTO; | |
1717 | else if (value == 1) | |
1718 | pwm_mode = AMD_FAN_CTRL_MANUAL; | |
1719 | else | |
1720 | return -EINVAL; | |
1721 | ||
da5f18e8 CG |
1722 | if (is_support_sw_smu(adev)) { |
1723 | smu_set_fan_control_mode(&adev->smu, pwm_mode); | |
1724 | } else { | |
1725 | if (!adev->powerplay.pp_funcs->set_fan_control_mode) | |
1726 | return -EINVAL; | |
1727 | amdgpu_dpm_set_fan_control_mode(adev, pwm_mode); | |
1728 | } | |
c2870527 RZ |
1729 | |
1730 | return count; | |
1731 | } | |
1732 | ||
2bd376bf AD |
1733 | static ssize_t amdgpu_hwmon_show_vddgfx(struct device *dev, |
1734 | struct device_attribute *attr, | |
1735 | char *buf) | |
1736 | { | |
1737 | struct amdgpu_device *adev = dev_get_drvdata(dev); | |
1738 | struct drm_device *ddev = adev->ddev; | |
1739 | u32 vddgfx; | |
1740 | int r, size = sizeof(vddgfx); | |
1741 | ||
1742 | /* Can't get voltage when the card is off */ | |
1743 | if ((adev->flags & AMD_IS_PX) && | |
1744 | (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) | |
1745 | return -EINVAL; | |
1746 | ||
2bd376bf AD |
1747 | /* get the voltage */ |
1748 | r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX, | |
1749 | (void *)&vddgfx, &size); | |
1750 | if (r) | |
1751 | return r; | |
1752 | ||
1753 | return snprintf(buf, PAGE_SIZE, "%d\n", vddgfx); | |
1754 | } | |
1755 | ||
1756 | static ssize_t amdgpu_hwmon_show_vddgfx_label(struct device *dev, | |
1757 | struct device_attribute *attr, | |
1758 | char *buf) | |
1759 | { | |
1760 | return snprintf(buf, PAGE_SIZE, "vddgfx\n"); | |
1761 | } | |
1762 | ||
1763 | static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev, | |
1764 | struct device_attribute *attr, | |
1765 | char *buf) | |
1766 | { | |
1767 | struct amdgpu_device *adev = dev_get_drvdata(dev); | |
1768 | struct drm_device *ddev = adev->ddev; | |
1769 | u32 vddnb; | |
1770 | int r, size = sizeof(vddnb); | |
1771 | ||
1772 | /* only APUs have vddnb */ | |
ccf9ef0b | 1773 | if (!(adev->flags & AMD_IS_APU)) |
2bd376bf AD |
1774 | return -EINVAL; |
1775 | ||
1776 | /* Can't get voltage when the card is off */ | |
1777 | if ((adev->flags & AMD_IS_PX) && | |
1778 | (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) | |
1779 | return -EINVAL; | |
1780 | ||
2bd376bf AD |
1781 | /* get the voltage */ |
1782 | r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB, | |
1783 | (void *)&vddnb, &size); | |
1784 | if (r) | |
1785 | return r; | |
1786 | ||
1787 | return snprintf(buf, PAGE_SIZE, "%d\n", vddnb); | |
1788 | } | |
1789 | ||
1790 | static ssize_t amdgpu_hwmon_show_vddnb_label(struct device *dev, | |
1791 | struct device_attribute *attr, | |
1792 | char *buf) | |
1793 | { | |
1794 | return snprintf(buf, PAGE_SIZE, "vddnb\n"); | |
1795 | } | |
1796 | ||
2976fc26 AD |
1797 | static ssize_t amdgpu_hwmon_show_power_avg(struct device *dev, |
1798 | struct device_attribute *attr, | |
1799 | char *buf) | |
1800 | { | |
1801 | struct amdgpu_device *adev = dev_get_drvdata(dev); | |
1802 | struct drm_device *ddev = adev->ddev; | |
5b79d048 RZ |
1803 | u32 query = 0; |
1804 | int r, size = sizeof(u32); | |
2976fc26 AD |
1805 | unsigned uw; |
1806 | ||
1807 | /* Can't get power when the card is off */ | |
1808 | if ((adev->flags & AMD_IS_PX) && | |
1809 | (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) | |
1810 | return -EINVAL; | |
1811 | ||
2976fc26 AD |
1812 | /* get the voltage */ |
1813 | r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_POWER, | |
1814 | (void *)&query, &size); | |
1815 | if (r) | |
1816 | return r; | |
1817 | ||
1818 | /* convert to microwatts */ | |
5b79d048 | 1819 | uw = (query >> 8) * 1000000 + (query & 0xff) * 1000; |
2976fc26 AD |
1820 | |
1821 | return snprintf(buf, PAGE_SIZE, "%u\n", uw); | |
1822 | } | |
1823 | ||
8d81bce7 RZ |
1824 | static ssize_t amdgpu_hwmon_show_power_cap_min(struct device *dev, |
1825 | struct device_attribute *attr, | |
1826 | char *buf) | |
1827 | { | |
1828 | return sprintf(buf, "%i\n", 0); | |
1829 | } | |
1830 | ||
1831 | static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev, | |
1832 | struct device_attribute *attr, | |
1833 | char *buf) | |
1834 | { | |
1835 | struct amdgpu_device *adev = dev_get_drvdata(dev); | |
1836 | uint32_t limit = 0; | |
1837 | ||
014c4440 CG |
1838 | if (is_support_sw_smu(adev)) { |
1839 | smu_get_power_limit(&adev->smu, &limit, true); | |
1840 | return snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000); | |
1841 | } else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) { | |
8d81bce7 RZ |
1842 | adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, true); |
1843 | return snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000); | |
1844 | } else { | |
1845 | return snprintf(buf, PAGE_SIZE, "\n"); | |
1846 | } | |
1847 | } | |
1848 | ||
1849 | static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev, | |
1850 | struct device_attribute *attr, | |
1851 | char *buf) | |
1852 | { | |
1853 | struct amdgpu_device *adev = dev_get_drvdata(dev); | |
1854 | uint32_t limit = 0; | |
1855 | ||
014c4440 CG |
1856 | if (is_support_sw_smu(adev)) { |
1857 | smu_get_power_limit(&adev->smu, &limit, false); | |
1858 | return snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000); | |
1859 | } else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) { | |
8d81bce7 RZ |
1860 | adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, false); |
1861 | return snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000); | |
1862 | } else { | |
1863 | return snprintf(buf, PAGE_SIZE, "\n"); | |
1864 | } | |
1865 | } | |
1866 | ||
1867 | ||
1868 | static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev, | |
1869 | struct device_attribute *attr, | |
1870 | const char *buf, | |
1871 | size_t count) | |
1872 | { | |
1873 | struct amdgpu_device *adev = dev_get_drvdata(dev); | |
1874 | int err; | |
1875 | u32 value; | |
1876 | ||
1877 | err = kstrtou32(buf, 10, &value); | |
1878 | if (err) | |
1879 | return err; | |
1880 | ||
1881 | value = value / 1000000; /* convert to Watt */ | |
014c4440 CG |
1882 | if (is_support_sw_smu(adev)) { |
1883 | adev->smu.funcs->set_power_limit(&adev->smu, value); | |
1884 | } else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->set_power_limit) { | |
8d81bce7 RZ |
1885 | err = adev->powerplay.pp_funcs->set_power_limit(adev->powerplay.pp_handle, value); |
1886 | if (err) | |
1887 | return err; | |
1888 | } else { | |
1889 | return -EINVAL; | |
1890 | } | |
1891 | ||
1892 | return count; | |
1893 | } | |
1894 | ||
d0948af7 AD |
1895 | static ssize_t amdgpu_hwmon_show_sclk(struct device *dev, |
1896 | struct device_attribute *attr, | |
1897 | char *buf) | |
1898 | { | |
1899 | struct amdgpu_device *adev = dev_get_drvdata(dev); | |
1900 | struct drm_device *ddev = adev->ddev; | |
1901 | uint32_t sclk; | |
1902 | int r, size = sizeof(sclk); | |
1903 | ||
1904 | /* Can't get voltage when the card is off */ | |
1905 | if ((adev->flags & AMD_IS_PX) && | |
1906 | (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) | |
1907 | return -EINVAL; | |
1908 | ||
1909 | /* sanity check PP is enabled */ | |
1910 | if (!(adev->powerplay.pp_funcs && | |
1911 | adev->powerplay.pp_funcs->read_sensor)) | |
1912 | return -EINVAL; | |
1913 | ||
1914 | /* get the sclk */ | |
1915 | r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK, | |
1916 | (void *)&sclk, &size); | |
1917 | if (r) | |
1918 | return r; | |
1919 | ||
1920 | return snprintf(buf, PAGE_SIZE, "%d\n", sclk * 10 * 1000); | |
1921 | } | |
1922 | ||
1923 | static ssize_t amdgpu_hwmon_show_sclk_label(struct device *dev, | |
1924 | struct device_attribute *attr, | |
1925 | char *buf) | |
1926 | { | |
1927 | return snprintf(buf, PAGE_SIZE, "sclk\n"); | |
1928 | } | |
1929 | ||
1930 | static ssize_t amdgpu_hwmon_show_mclk(struct device *dev, | |
1931 | struct device_attribute *attr, | |
1932 | char *buf) | |
1933 | { | |
1934 | struct amdgpu_device *adev = dev_get_drvdata(dev); | |
1935 | struct drm_device *ddev = adev->ddev; | |
1936 | uint32_t mclk; | |
1937 | int r, size = sizeof(mclk); | |
1938 | ||
1939 | /* Can't get voltage when the card is off */ | |
1940 | if ((adev->flags & AMD_IS_PX) && | |
1941 | (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) | |
1942 | return -EINVAL; | |
1943 | ||
1944 | /* sanity check PP is enabled */ | |
1945 | if (!(adev->powerplay.pp_funcs && | |
1946 | adev->powerplay.pp_funcs->read_sensor)) | |
1947 | return -EINVAL; | |
1948 | ||
1949 | /* get the sclk */ | |
1950 | r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK, | |
1951 | (void *)&mclk, &size); | |
1952 | if (r) | |
1953 | return r; | |
1954 | ||
1955 | return snprintf(buf, PAGE_SIZE, "%d\n", mclk * 10 * 1000); | |
1956 | } | |
1957 | ||
1958 | static ssize_t amdgpu_hwmon_show_mclk_label(struct device *dev, | |
1959 | struct device_attribute *attr, | |
1960 | char *buf) | |
1961 | { | |
1962 | return snprintf(buf, PAGE_SIZE, "mclk\n"); | |
1963 | } | |
844c5419 AD |
1964 | |
1965 | /** | |
1966 | * DOC: hwmon | |
1967 | * | |
1968 | * The amdgpu driver exposes the following sensor interfaces: | |
dc85db25 | 1969 | * |
844c5419 | 1970 | * - GPU temperature (via the on-die sensor) |
dc85db25 | 1971 | * |
844c5419 | 1972 | * - GPU voltage |
dc85db25 | 1973 | * |
844c5419 | 1974 | * - Northbridge voltage (APUs only) |
dc85db25 | 1975 | * |
844c5419 | 1976 | * - GPU power |
dc85db25 | 1977 | * |
844c5419 AD |
1978 | * - GPU fan |
1979 | * | |
d0948af7 AD |
1980 | * - GPU gfx/compute engine clock |
1981 | * | |
1982 | * - GPU memory clock (dGPU only) | |
1983 | * | |
844c5419 | 1984 | * hwmon interfaces for GPU temperature: |
dc85db25 | 1985 | * |
844c5419 | 1986 | * - temp1_input: the on die GPU temperature in millidegrees Celsius |
dc85db25 | 1987 | * |
844c5419 | 1988 | * - temp1_crit: temperature critical max value in millidegrees Celsius |
dc85db25 | 1989 | * |
844c5419 AD |
1990 | * - temp1_crit_hyst: temperature hysteresis for critical limit in millidegrees Celsius |
1991 | * | |
1992 | * hwmon interfaces for GPU voltage: | |
dc85db25 | 1993 | * |
844c5419 | 1994 | * - in0_input: the voltage on the GPU in millivolts |
dc85db25 | 1995 | * |
844c5419 AD |
1996 | * - in1_input: the voltage on the Northbridge in millivolts |
1997 | * | |
1998 | * hwmon interfaces for GPU power: | |
dc85db25 | 1999 | * |
844c5419 | 2000 | * - power1_average: average power used by the GPU in microWatts |
dc85db25 | 2001 | * |
844c5419 | 2002 | * - power1_cap_min: minimum cap supported in microWatts |
dc85db25 | 2003 | * |
844c5419 | 2004 | * - power1_cap_max: maximum cap supported in microWatts |
dc85db25 | 2005 | * |
844c5419 AD |
2006 | * - power1_cap: selected power cap in microWatts |
2007 | * | |
2008 | * hwmon interfaces for GPU fan: | |
dc85db25 | 2009 | * |
844c5419 | 2010 | * - pwm1: pulse width modulation fan level (0-255) |
dc85db25 AD |
2011 | * |
2012 | * - pwm1_enable: pulse width modulation fan control method (0: no fan speed control, 1: manual fan speed control using pwm interface, 2: automatic fan speed control) | |
2013 | * | |
844c5419 | 2014 | * - pwm1_min: pulse width modulation fan control minimum level (0) |
dc85db25 | 2015 | * |
844c5419 | 2016 | * - pwm1_max: pulse width modulation fan control maximum level (255) |
dc85db25 | 2017 | * |
c2870527 RZ |
2018 | * - fan1_min: an minimum value Unit: revolution/min (RPM) |
2019 | * | |
2020 | * - fan1_max: an maxmum value Unit: revolution/max (RPM) | |
2021 | * | |
844c5419 AD |
2022 | * - fan1_input: fan speed in RPM |
2023 | * | |
c2870527 RZ |
2024 | * - fan[1-*]_target: Desired fan speed Unit: revolution/min (RPM) |
2025 | * | |
2026 | * - fan[1-*]_enable: Enable or disable the sensors.1: Enable 0: Disable | |
2027 | * | |
d0948af7 AD |
2028 | * hwmon interfaces for GPU clocks: |
2029 | * | |
2030 | * - freq1_input: the gfx/compute clock in hertz | |
2031 | * | |
2032 | * - freq2_input: the memory clock in hertz | |
2033 | * | |
844c5419 AD |
2034 | * You can use hwmon tools like sensors to view this information on your system. |
2035 | * | |
2036 | */ | |
2037 | ||
d38ceaf9 AD |
2038 | static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, 0); |
2039 | static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 0); | |
2040 | static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 1); | |
2041 | static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1, amdgpu_hwmon_set_pwm1, 0); | |
2042 | static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1_enable, amdgpu_hwmon_set_pwm1_enable, 0); | |
2043 | static SENSOR_DEVICE_ATTR(pwm1_min, S_IRUGO, amdgpu_hwmon_get_pwm1_min, NULL, 0); | |
2044 | static SENSOR_DEVICE_ATTR(pwm1_max, S_IRUGO, amdgpu_hwmon_get_pwm1_max, NULL, 0); | |
81c1514b | 2045 | static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, amdgpu_hwmon_get_fan1_input, NULL, 0); |
c2870527 RZ |
2046 | static SENSOR_DEVICE_ATTR(fan1_min, S_IRUGO, amdgpu_hwmon_get_fan1_min, NULL, 0); |
2047 | static SENSOR_DEVICE_ATTR(fan1_max, S_IRUGO, amdgpu_hwmon_get_fan1_max, NULL, 0); | |
2048 | static SENSOR_DEVICE_ATTR(fan1_target, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_fan1_target, amdgpu_hwmon_set_fan1_target, 0); | |
2049 | static SENSOR_DEVICE_ATTR(fan1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_fan1_enable, amdgpu_hwmon_set_fan1_enable, 0); | |
2bd376bf AD |
2050 | static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, amdgpu_hwmon_show_vddgfx, NULL, 0); |
2051 | static SENSOR_DEVICE_ATTR(in0_label, S_IRUGO, amdgpu_hwmon_show_vddgfx_label, NULL, 0); | |
2052 | static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, amdgpu_hwmon_show_vddnb, NULL, 0); | |
2053 | static SENSOR_DEVICE_ATTR(in1_label, S_IRUGO, amdgpu_hwmon_show_vddnb_label, NULL, 0); | |
2976fc26 | 2054 | static SENSOR_DEVICE_ATTR(power1_average, S_IRUGO, amdgpu_hwmon_show_power_avg, NULL, 0); |
8d81bce7 RZ |
2055 | static SENSOR_DEVICE_ATTR(power1_cap_max, S_IRUGO, amdgpu_hwmon_show_power_cap_max, NULL, 0); |
2056 | static SENSOR_DEVICE_ATTR(power1_cap_min, S_IRUGO, amdgpu_hwmon_show_power_cap_min, NULL, 0); | |
2057 | static SENSOR_DEVICE_ATTR(power1_cap, S_IRUGO | S_IWUSR, amdgpu_hwmon_show_power_cap, amdgpu_hwmon_set_power_cap, 0); | |
d0948af7 AD |
2058 | static SENSOR_DEVICE_ATTR(freq1_input, S_IRUGO, amdgpu_hwmon_show_sclk, NULL, 0); |
2059 | static SENSOR_DEVICE_ATTR(freq1_label, S_IRUGO, amdgpu_hwmon_show_sclk_label, NULL, 0); | |
2060 | static SENSOR_DEVICE_ATTR(freq2_input, S_IRUGO, amdgpu_hwmon_show_mclk, NULL, 0); | |
2061 | static SENSOR_DEVICE_ATTR(freq2_label, S_IRUGO, amdgpu_hwmon_show_mclk_label, NULL, 0); | |
d38ceaf9 AD |
2062 | |
2063 | static struct attribute *hwmon_attributes[] = { | |
2064 | &sensor_dev_attr_temp1_input.dev_attr.attr, | |
2065 | &sensor_dev_attr_temp1_crit.dev_attr.attr, | |
2066 | &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr, | |
2067 | &sensor_dev_attr_pwm1.dev_attr.attr, | |
2068 | &sensor_dev_attr_pwm1_enable.dev_attr.attr, | |
2069 | &sensor_dev_attr_pwm1_min.dev_attr.attr, | |
2070 | &sensor_dev_attr_pwm1_max.dev_attr.attr, | |
81c1514b | 2071 | &sensor_dev_attr_fan1_input.dev_attr.attr, |
c2870527 RZ |
2072 | &sensor_dev_attr_fan1_min.dev_attr.attr, |
2073 | &sensor_dev_attr_fan1_max.dev_attr.attr, | |
2074 | &sensor_dev_attr_fan1_target.dev_attr.attr, | |
2075 | &sensor_dev_attr_fan1_enable.dev_attr.attr, | |
2bd376bf AD |
2076 | &sensor_dev_attr_in0_input.dev_attr.attr, |
2077 | &sensor_dev_attr_in0_label.dev_attr.attr, | |
2078 | &sensor_dev_attr_in1_input.dev_attr.attr, | |
2079 | &sensor_dev_attr_in1_label.dev_attr.attr, | |
2976fc26 | 2080 | &sensor_dev_attr_power1_average.dev_attr.attr, |
8d81bce7 RZ |
2081 | &sensor_dev_attr_power1_cap_max.dev_attr.attr, |
2082 | &sensor_dev_attr_power1_cap_min.dev_attr.attr, | |
2083 | &sensor_dev_attr_power1_cap.dev_attr.attr, | |
d0948af7 AD |
2084 | &sensor_dev_attr_freq1_input.dev_attr.attr, |
2085 | &sensor_dev_attr_freq1_label.dev_attr.attr, | |
2086 | &sensor_dev_attr_freq2_input.dev_attr.attr, | |
2087 | &sensor_dev_attr_freq2_label.dev_attr.attr, | |
d38ceaf9 AD |
2088 | NULL |
2089 | }; | |
2090 | ||
2091 | static umode_t hwmon_attributes_visible(struct kobject *kobj, | |
2092 | struct attribute *attr, int index) | |
2093 | { | |
cc29ec87 | 2094 | struct device *dev = kobj_to_dev(kobj); |
d38ceaf9 AD |
2095 | struct amdgpu_device *adev = dev_get_drvdata(dev); |
2096 | umode_t effective_mode = attr->mode; | |
2097 | ||
fc5a136d RZ |
2098 | /* Skip fan attributes if fan is not present */ |
2099 | if (adev->pm.no_fan && (attr == &sensor_dev_attr_pwm1.dev_attr.attr || | |
2100 | attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr || | |
2101 | attr == &sensor_dev_attr_pwm1_max.dev_attr.attr || | |
2102 | attr == &sensor_dev_attr_pwm1_min.dev_attr.attr || | |
c2870527 RZ |
2103 | attr == &sensor_dev_attr_fan1_input.dev_attr.attr || |
2104 | attr == &sensor_dev_attr_fan1_min.dev_attr.attr || | |
2105 | attr == &sensor_dev_attr_fan1_max.dev_attr.attr || | |
2106 | attr == &sensor_dev_attr_fan1_target.dev_attr.attr || | |
2107 | attr == &sensor_dev_attr_fan1_enable.dev_attr.attr)) | |
fc5a136d | 2108 | return 0; |
135f9711 | 2109 | |
20a96cd3 AD |
2110 | /* Skip fan attributes on APU */ |
2111 | if ((adev->flags & AMD_IS_APU) && | |
2112 | (attr == &sensor_dev_attr_pwm1.dev_attr.attr || | |
2113 | attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr || | |
2114 | attr == &sensor_dev_attr_pwm1_max.dev_attr.attr || | |
2115 | attr == &sensor_dev_attr_pwm1_min.dev_attr.attr || | |
2116 | attr == &sensor_dev_attr_fan1_input.dev_attr.attr || | |
2117 | attr == &sensor_dev_attr_fan1_min.dev_attr.attr || | |
2118 | attr == &sensor_dev_attr_fan1_max.dev_attr.attr || | |
2119 | attr == &sensor_dev_attr_fan1_target.dev_attr.attr || | |
2120 | attr == &sensor_dev_attr_fan1_enable.dev_attr.attr)) | |
2121 | return 0; | |
2122 | ||
1b5708ff | 2123 | /* Skip limit attributes if DPM is not enabled */ |
d38ceaf9 AD |
2124 | if (!adev->pm.dpm_enabled && |
2125 | (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr || | |
27100735 AD |
2126 | attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr || |
2127 | attr == &sensor_dev_attr_pwm1.dev_attr.attr || | |
2128 | attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr || | |
2129 | attr == &sensor_dev_attr_pwm1_max.dev_attr.attr || | |
c2870527 RZ |
2130 | attr == &sensor_dev_attr_pwm1_min.dev_attr.attr || |
2131 | attr == &sensor_dev_attr_fan1_input.dev_attr.attr || | |
2132 | attr == &sensor_dev_attr_fan1_min.dev_attr.attr || | |
2133 | attr == &sensor_dev_attr_fan1_max.dev_attr.attr || | |
2134 | attr == &sensor_dev_attr_fan1_target.dev_attr.attr || | |
2135 | attr == &sensor_dev_attr_fan1_enable.dev_attr.attr)) | |
d38ceaf9 AD |
2136 | return 0; |
2137 | ||
239873fc KW |
2138 | if (!is_support_sw_smu(adev)) { |
2139 | /* mask fan attributes if we have no bindings for this asic to expose */ | |
2140 | if ((!adev->powerplay.pp_funcs->get_fan_speed_percent && | |
2141 | attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't query fan */ | |
2142 | (!adev->powerplay.pp_funcs->get_fan_control_mode && | |
2143 | attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't query state */ | |
2144 | effective_mode &= ~S_IRUGO; | |
2145 | ||
2146 | if ((!adev->powerplay.pp_funcs->set_fan_speed_percent && | |
2147 | attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't manage fan */ | |
2148 | (!adev->powerplay.pp_funcs->set_fan_control_mode && | |
2149 | attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't manage state */ | |
2150 | effective_mode &= ~S_IWUSR; | |
2151 | } | |
d38ceaf9 | 2152 | |
8d81bce7 | 2153 | if ((adev->flags & AMD_IS_APU) && |
84d32455 AD |
2154 | (attr == &sensor_dev_attr_power1_average.dev_attr.attr || |
2155 | attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr || | |
8d81bce7 RZ |
2156 | attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr|| |
2157 | attr == &sensor_dev_attr_power1_cap.dev_attr.attr)) | |
2158 | return 0; | |
2159 | ||
239873fc KW |
2160 | if (!is_support_sw_smu(adev)) { |
2161 | /* hide max/min values if we can't both query and manage the fan */ | |
2162 | if ((!adev->powerplay.pp_funcs->set_fan_speed_percent && | |
2163 | !adev->powerplay.pp_funcs->get_fan_speed_percent) && | |
2164 | (!adev->powerplay.pp_funcs->set_fan_speed_rpm && | |
2165 | !adev->powerplay.pp_funcs->get_fan_speed_rpm) && | |
2166 | (attr == &sensor_dev_attr_pwm1_max.dev_attr.attr || | |
2167 | attr == &sensor_dev_attr_pwm1_min.dev_attr.attr)) | |
2168 | return 0; | |
2169 | ||
2170 | if ((!adev->powerplay.pp_funcs->set_fan_speed_rpm && | |
2171 | !adev->powerplay.pp_funcs->get_fan_speed_rpm) && | |
2172 | (attr == &sensor_dev_attr_fan1_max.dev_attr.attr || | |
2173 | attr == &sensor_dev_attr_fan1_min.dev_attr.attr)) | |
2174 | return 0; | |
2175 | } | |
c2870527 | 2176 | |
0d35bc78 AD |
2177 | /* only APUs have vddnb */ |
2178 | if (!(adev->flags & AMD_IS_APU) && | |
2179 | (attr == &sensor_dev_attr_in1_input.dev_attr.attr || | |
2180 | attr == &sensor_dev_attr_in1_label.dev_attr.attr)) | |
81c1514b GI |
2181 | return 0; |
2182 | ||
d0948af7 AD |
2183 | /* no mclk on APUs */ |
2184 | if ((adev->flags & AMD_IS_APU) && | |
2185 | (attr == &sensor_dev_attr_freq2_input.dev_attr.attr || | |
2186 | attr == &sensor_dev_attr_freq2_label.dev_attr.attr)) | |
2187 | return 0; | |
2188 | ||
d38ceaf9 AD |
2189 | return effective_mode; |
2190 | } | |
2191 | ||
2192 | static const struct attribute_group hwmon_attrgroup = { | |
2193 | .attrs = hwmon_attributes, | |
2194 | .is_visible = hwmon_attributes_visible, | |
2195 | }; | |
2196 | ||
2197 | static const struct attribute_group *hwmon_groups[] = { | |
2198 | &hwmon_attrgroup, | |
2199 | NULL | |
2200 | }; | |
2201 | ||
2202 | void amdgpu_dpm_thermal_work_handler(struct work_struct *work) | |
2203 | { | |
2204 | struct amdgpu_device *adev = | |
2205 | container_of(work, struct amdgpu_device, | |
2206 | pm.dpm.thermal.work); | |
2207 | /* switch to the thermal state */ | |
3a2c788d | 2208 | enum amd_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL; |
71c9b9ad | 2209 | int temp, size = sizeof(temp); |
d38ceaf9 AD |
2210 | |
2211 | if (!adev->pm.dpm_enabled) | |
2212 | return; | |
2213 | ||
4a5a2de6 | 2214 | if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP, |
71c9b9ad | 2215 | (void *)&temp, &size)) { |
d38ceaf9 AD |
2216 | if (temp < adev->pm.dpm.thermal.min_temp) |
2217 | /* switch back the user state */ | |
2218 | dpm_state = adev->pm.dpm.user_state; | |
2219 | } else { | |
2220 | if (adev->pm.dpm.thermal.high_to_low) | |
2221 | /* switch back the user state */ | |
2222 | dpm_state = adev->pm.dpm.user_state; | |
2223 | } | |
2224 | mutex_lock(&adev->pm.mutex); | |
2225 | if (dpm_state == POWER_STATE_TYPE_INTERNAL_THERMAL) | |
2226 | adev->pm.dpm.thermal_active = true; | |
2227 | else | |
2228 | adev->pm.dpm.thermal_active = false; | |
2229 | adev->pm.dpm.state = dpm_state; | |
2230 | mutex_unlock(&adev->pm.mutex); | |
2231 | ||
2232 | amdgpu_pm_compute_clocks(adev); | |
2233 | } | |
2234 | ||
2235 | static struct amdgpu_ps *amdgpu_dpm_pick_power_state(struct amdgpu_device *adev, | |
3a2c788d | 2236 | enum amd_pm_state_type dpm_state) |
d38ceaf9 AD |
2237 | { |
2238 | int i; | |
2239 | struct amdgpu_ps *ps; | |
2240 | u32 ui_class; | |
2241 | bool single_display = (adev->pm.dpm.new_active_crtc_count < 2) ? | |
2242 | true : false; | |
2243 | ||
2244 | /* check if the vblank period is too short to adjust the mclk */ | |
cd4d7464 | 2245 | if (single_display && adev->powerplay.pp_funcs->vblank_too_short) { |
d38ceaf9 AD |
2246 | if (amdgpu_dpm_vblank_too_short(adev)) |
2247 | single_display = false; | |
2248 | } | |
2249 | ||
2250 | /* certain older asics have a separare 3D performance state, | |
2251 | * so try that first if the user selected performance | |
2252 | */ | |
2253 | if (dpm_state == POWER_STATE_TYPE_PERFORMANCE) | |
2254 | dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF; | |
2255 | /* balanced states don't exist at the moment */ | |
2256 | if (dpm_state == POWER_STATE_TYPE_BALANCED) | |
2257 | dpm_state = POWER_STATE_TYPE_PERFORMANCE; | |
2258 | ||
2259 | restart_search: | |
2260 | /* Pick the best power state based on current conditions */ | |
2261 | for (i = 0; i < adev->pm.dpm.num_ps; i++) { | |
2262 | ps = &adev->pm.dpm.ps[i]; | |
2263 | ui_class = ps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK; | |
2264 | switch (dpm_state) { | |
2265 | /* user states */ | |
2266 | case POWER_STATE_TYPE_BATTERY: | |
2267 | if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) { | |
2268 | if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) { | |
2269 | if (single_display) | |
2270 | return ps; | |
2271 | } else | |
2272 | return ps; | |
2273 | } | |
2274 | break; | |
2275 | case POWER_STATE_TYPE_BALANCED: | |
2276 | if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BALANCED) { | |
2277 | if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) { | |
2278 | if (single_display) | |
2279 | return ps; | |
2280 | } else | |
2281 | return ps; | |
2282 | } | |
2283 | break; | |
2284 | case POWER_STATE_TYPE_PERFORMANCE: | |
2285 | if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) { | |
2286 | if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) { | |
2287 | if (single_display) | |
2288 | return ps; | |
2289 | } else | |
2290 | return ps; | |
2291 | } | |
2292 | break; | |
2293 | /* internal states */ | |
2294 | case POWER_STATE_TYPE_INTERNAL_UVD: | |
2295 | if (adev->pm.dpm.uvd_ps) | |
2296 | return adev->pm.dpm.uvd_ps; | |
2297 | else | |
2298 | break; | |
2299 | case POWER_STATE_TYPE_INTERNAL_UVD_SD: | |
2300 | if (ps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE) | |
2301 | return ps; | |
2302 | break; | |
2303 | case POWER_STATE_TYPE_INTERNAL_UVD_HD: | |
2304 | if (ps->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE) | |
2305 | return ps; | |
2306 | break; | |
2307 | case POWER_STATE_TYPE_INTERNAL_UVD_HD2: | |
2308 | if (ps->class & ATOM_PPLIB_CLASSIFICATION_HD2STATE) | |
2309 | return ps; | |
2310 | break; | |
2311 | case POWER_STATE_TYPE_INTERNAL_UVD_MVC: | |
2312 | if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_MVC) | |
2313 | return ps; | |
2314 | break; | |
2315 | case POWER_STATE_TYPE_INTERNAL_BOOT: | |
2316 | return adev->pm.dpm.boot_ps; | |
2317 | case POWER_STATE_TYPE_INTERNAL_THERMAL: | |
2318 | if (ps->class & ATOM_PPLIB_CLASSIFICATION_THERMAL) | |
2319 | return ps; | |
2320 | break; | |
2321 | case POWER_STATE_TYPE_INTERNAL_ACPI: | |
2322 | if (ps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) | |
2323 | return ps; | |
2324 | break; | |
2325 | case POWER_STATE_TYPE_INTERNAL_ULV: | |
2326 | if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) | |
2327 | return ps; | |
2328 | break; | |
2329 | case POWER_STATE_TYPE_INTERNAL_3DPERF: | |
2330 | if (ps->class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE) | |
2331 | return ps; | |
2332 | break; | |
2333 | default: | |
2334 | break; | |
2335 | } | |
2336 | } | |
2337 | /* use a fallback state if we didn't match */ | |
2338 | switch (dpm_state) { | |
2339 | case POWER_STATE_TYPE_INTERNAL_UVD_SD: | |
2340 | dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD; | |
2341 | goto restart_search; | |
2342 | case POWER_STATE_TYPE_INTERNAL_UVD_HD: | |
2343 | case POWER_STATE_TYPE_INTERNAL_UVD_HD2: | |
2344 | case POWER_STATE_TYPE_INTERNAL_UVD_MVC: | |
2345 | if (adev->pm.dpm.uvd_ps) { | |
2346 | return adev->pm.dpm.uvd_ps; | |
2347 | } else { | |
2348 | dpm_state = POWER_STATE_TYPE_PERFORMANCE; | |
2349 | goto restart_search; | |
2350 | } | |
2351 | case POWER_STATE_TYPE_INTERNAL_THERMAL: | |
2352 | dpm_state = POWER_STATE_TYPE_INTERNAL_ACPI; | |
2353 | goto restart_search; | |
2354 | case POWER_STATE_TYPE_INTERNAL_ACPI: | |
2355 | dpm_state = POWER_STATE_TYPE_BATTERY; | |
2356 | goto restart_search; | |
2357 | case POWER_STATE_TYPE_BATTERY: | |
2358 | case POWER_STATE_TYPE_BALANCED: | |
2359 | case POWER_STATE_TYPE_INTERNAL_3DPERF: | |
2360 | dpm_state = POWER_STATE_TYPE_PERFORMANCE; | |
2361 | goto restart_search; | |
2362 | default: | |
2363 | break; | |
2364 | } | |
2365 | ||
2366 | return NULL; | |
2367 | } | |
2368 | ||
2369 | static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev) | |
2370 | { | |
d38ceaf9 | 2371 | struct amdgpu_ps *ps; |
3a2c788d | 2372 | enum amd_pm_state_type dpm_state; |
d38ceaf9 | 2373 | int ret; |
cd4d7464 | 2374 | bool equal = false; |
d38ceaf9 AD |
2375 | |
2376 | /* if dpm init failed */ | |
2377 | if (!adev->pm.dpm_enabled) | |
2378 | return; | |
2379 | ||
2380 | if (adev->pm.dpm.user_state != adev->pm.dpm.state) { | |
2381 | /* add other state override checks here */ | |
2382 | if ((!adev->pm.dpm.thermal_active) && | |
2383 | (!adev->pm.dpm.uvd_active)) | |
2384 | adev->pm.dpm.state = adev->pm.dpm.user_state; | |
2385 | } | |
2386 | dpm_state = adev->pm.dpm.state; | |
2387 | ||
2388 | ps = amdgpu_dpm_pick_power_state(adev, dpm_state); | |
2389 | if (ps) | |
2390 | adev->pm.dpm.requested_ps = ps; | |
2391 | else | |
2392 | return; | |
2393 | ||
cd4d7464 | 2394 | if (amdgpu_dpm == 1 && adev->powerplay.pp_funcs->print_power_state) { |
d38ceaf9 AD |
2395 | printk("switching from power state:\n"); |
2396 | amdgpu_dpm_print_power_state(adev, adev->pm.dpm.current_ps); | |
2397 | printk("switching to power state:\n"); | |
2398 | amdgpu_dpm_print_power_state(adev, adev->pm.dpm.requested_ps); | |
2399 | } | |
2400 | ||
d38ceaf9 AD |
2401 | /* update whether vce is active */ |
2402 | ps->vce_active = adev->pm.dpm.vce_active; | |
cd4d7464 RZ |
2403 | if (adev->powerplay.pp_funcs->display_configuration_changed) |
2404 | amdgpu_dpm_display_configuration_changed(adev); | |
5e876c62 | 2405 | |
d38ceaf9 AD |
2406 | ret = amdgpu_dpm_pre_set_power_state(adev); |
2407 | if (ret) | |
a27de35c | 2408 | return; |
d38ceaf9 | 2409 | |
cd4d7464 RZ |
2410 | if (adev->powerplay.pp_funcs->check_state_equal) { |
2411 | if (0 != amdgpu_dpm_check_state_equal(adev, adev->pm.dpm.current_ps, adev->pm.dpm.requested_ps, &equal)) | |
2412 | equal = false; | |
2413 | } | |
d38ceaf9 | 2414 | |
5e876c62 RZ |
2415 | if (equal) |
2416 | return; | |
d38ceaf9 | 2417 | |
d38ceaf9 | 2418 | amdgpu_dpm_set_power_state(adev); |
d38ceaf9 AD |
2419 | amdgpu_dpm_post_set_power_state(adev); |
2420 | ||
eda1d1cf AD |
2421 | adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs; |
2422 | adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count; | |
2423 | ||
cd4d7464 | 2424 | if (adev->powerplay.pp_funcs->force_performance_level) { |
d38ceaf9 | 2425 | if (adev->pm.dpm.thermal_active) { |
e5d03ac2 | 2426 | enum amd_dpm_forced_level level = adev->pm.dpm.forced_level; |
d38ceaf9 | 2427 | /* force low perf level for thermal */ |
e5d03ac2 | 2428 | amdgpu_dpm_force_performance_level(adev, AMD_DPM_FORCED_LEVEL_LOW); |
d38ceaf9 AD |
2429 | /* save the user's level */ |
2430 | adev->pm.dpm.forced_level = level; | |
2431 | } else { | |
2432 | /* otherwise, user selected level */ | |
2433 | amdgpu_dpm_force_performance_level(adev, adev->pm.dpm.forced_level); | |
2434 | } | |
2435 | } | |
d38ceaf9 AD |
2436 | } |
2437 | ||
2438 | void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable) | |
2439 | { | |
72e91f37 KW |
2440 | int ret = 0; |
2441 | if (is_support_sw_smu(adev)) { | |
2442 | ret = smu_dpm_set_power_gate(&adev->smu, AMD_IP_BLOCK_TYPE_UVD, enable); | |
2443 | if (ret) | |
2444 | DRM_ERROR("[SW SMU]: dpm enable uvd failed, state = %s, ret = %d. \n", | |
2445 | enable ? "true" : "false", ret); | |
2446 | } else if (adev->powerplay.pp_funcs->set_powergating_by_smu) { | |
e95a14a9 TSD |
2447 | /* enable/disable UVD */ |
2448 | mutex_lock(&adev->pm.mutex); | |
b92c6287 | 2449 | amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable); |
e95a14a9 | 2450 | mutex_unlock(&adev->pm.mutex); |
d38ceaf9 | 2451 | } |
8ca606de GS |
2452 | /* enable/disable Low Memory PState for UVD (4k videos) */ |
2453 | if (adev->asic_type == CHIP_STONEY && | |
2454 | adev->uvd.decode_image_width >= WIDTH_4K) { | |
2455 | struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; | |
2456 | ||
2457 | if (hwmgr && hwmgr->hwmgr_func && | |
2458 | hwmgr->hwmgr_func->update_nbdpm_pstate) | |
2459 | hwmgr->hwmgr_func->update_nbdpm_pstate(hwmgr, | |
2460 | !enable, | |
2461 | true); | |
2462 | } | |
d38ceaf9 AD |
2463 | } |
2464 | ||
2465 | void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable) | |
2466 | { | |
72e91f37 KW |
2467 | int ret = 0; |
2468 | if (is_support_sw_smu(adev)) { | |
2469 | ret = smu_dpm_set_power_gate(&adev->smu, AMD_IP_BLOCK_TYPE_VCE, enable); | |
2470 | if (ret) | |
2471 | DRM_ERROR("[SW SMU]: dpm enable vce failed, state = %s, ret = %d. \n", | |
2472 | enable ? "true" : "false", ret); | |
2473 | } else if (adev->powerplay.pp_funcs->set_powergating_by_smu) { | |
e95a14a9 TSD |
2474 | /* enable/disable VCE */ |
2475 | mutex_lock(&adev->pm.mutex); | |
b92c6287 | 2476 | amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable); |
e95a14a9 | 2477 | mutex_unlock(&adev->pm.mutex); |
b7a07769 | 2478 | } |
d38ceaf9 AD |
2479 | } |
2480 | ||
2481 | void amdgpu_pm_print_power_states(struct amdgpu_device *adev) | |
2482 | { | |
2483 | int i; | |
2484 | ||
cd4d7464 | 2485 | if (adev->powerplay.pp_funcs->print_power_state == NULL) |
1b5708ff RZ |
2486 | return; |
2487 | ||
2488 | for (i = 0; i < adev->pm.dpm.num_ps; i++) | |
d38ceaf9 | 2489 | amdgpu_dpm_print_power_state(adev, &adev->pm.dpm.ps[i]); |
1b5708ff | 2490 | |
d38ceaf9 AD |
2491 | } |
2492 | ||
19290598 PL |
2493 | int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version) |
2494 | { | |
f3a5231c | 2495 | int r; |
19290598 PL |
2496 | |
2497 | if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->load_firmware) { | |
2498 | r = adev->powerplay.pp_funcs->load_firmware(adev->powerplay.pp_handle); | |
2499 | if (r) { | |
2500 | pr_err("smu firmware loading failed\n"); | |
2501 | return r; | |
2502 | } | |
2503 | *smu_version = adev->pm.fw_version; | |
2504 | } | |
f3a5231c | 2505 | return 0; |
19290598 PL |
2506 | } |
2507 | ||
d38ceaf9 AD |
2508 | int amdgpu_pm_sysfs_init(struct amdgpu_device *adev) |
2509 | { | |
45b35ee0 | 2510 | struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; |
d38ceaf9 AD |
2511 | int ret; |
2512 | ||
c86f5ebf AD |
2513 | if (adev->pm.sysfs_initialized) |
2514 | return 0; | |
2515 | ||
d2f52ac8 RZ |
2516 | if (adev->pm.dpm_enabled == 0) |
2517 | return 0; | |
2518 | ||
d38ceaf9 AD |
2519 | adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev, |
2520 | DRIVER_NAME, adev, | |
2521 | hwmon_groups); | |
2522 | if (IS_ERR(adev->pm.int_hwmon_dev)) { | |
2523 | ret = PTR_ERR(adev->pm.int_hwmon_dev); | |
2524 | dev_err(adev->dev, | |
2525 | "Unable to register hwmon device: %d\n", ret); | |
2526 | return ret; | |
2527 | } | |
2528 | ||
2529 | ret = device_create_file(adev->dev, &dev_attr_power_dpm_state); | |
2530 | if (ret) { | |
2531 | DRM_ERROR("failed to create device file for dpm state\n"); | |
2532 | return ret; | |
2533 | } | |
2534 | ret = device_create_file(adev->dev, &dev_attr_power_dpm_force_performance_level); | |
2535 | if (ret) { | |
2536 | DRM_ERROR("failed to create device file for dpm state\n"); | |
2537 | return ret; | |
2538 | } | |
f3898ea1 | 2539 | |
6d07fe7b RZ |
2540 | |
2541 | ret = device_create_file(adev->dev, &dev_attr_pp_num_states); | |
2542 | if (ret) { | |
2543 | DRM_ERROR("failed to create device file pp_num_states\n"); | |
2544 | return ret; | |
2545 | } | |
2546 | ret = device_create_file(adev->dev, &dev_attr_pp_cur_state); | |
2547 | if (ret) { | |
2548 | DRM_ERROR("failed to create device file pp_cur_state\n"); | |
2549 | return ret; | |
2550 | } | |
2551 | ret = device_create_file(adev->dev, &dev_attr_pp_force_state); | |
2552 | if (ret) { | |
2553 | DRM_ERROR("failed to create device file pp_force_state\n"); | |
2554 | return ret; | |
2555 | } | |
2556 | ret = device_create_file(adev->dev, &dev_attr_pp_table); | |
2557 | if (ret) { | |
2558 | DRM_ERROR("failed to create device file pp_table\n"); | |
2559 | return ret; | |
f3898ea1 | 2560 | } |
c85e299f EH |
2561 | |
2562 | ret = device_create_file(adev->dev, &dev_attr_pp_dpm_sclk); | |
2563 | if (ret) { | |
2564 | DRM_ERROR("failed to create device file pp_dpm_sclk\n"); | |
2565 | return ret; | |
2566 | } | |
2567 | ret = device_create_file(adev->dev, &dev_attr_pp_dpm_mclk); | |
2568 | if (ret) { | |
2569 | DRM_ERROR("failed to create device file pp_dpm_mclk\n"); | |
2570 | return ret; | |
2571 | } | |
d7337ca2 EQ |
2572 | if (adev->asic_type >= CHIP_VEGA10) { |
2573 | ret = device_create_file(adev->dev, &dev_attr_pp_dpm_socclk); | |
2574 | if (ret) { | |
2575 | DRM_ERROR("failed to create device file pp_dpm_socclk\n"); | |
2576 | return ret; | |
2577 | } | |
d7e28e2d EQ |
2578 | ret = device_create_file(adev->dev, &dev_attr_pp_dpm_dcefclk); |
2579 | if (ret) { | |
2580 | DRM_ERROR("failed to create device file pp_dpm_dcefclk\n"); | |
2581 | return ret; | |
2582 | } | |
d7337ca2 | 2583 | } |
828e37ef EQ |
2584 | if (adev->asic_type >= CHIP_VEGA20) { |
2585 | ret = device_create_file(adev->dev, &dev_attr_pp_dpm_fclk); | |
2586 | if (ret) { | |
2587 | DRM_ERROR("failed to create device file pp_dpm_fclk\n"); | |
2588 | return ret; | |
2589 | } | |
2590 | } | |
c85e299f EH |
2591 | ret = device_create_file(adev->dev, &dev_attr_pp_dpm_pcie); |
2592 | if (ret) { | |
2593 | DRM_ERROR("failed to create device file pp_dpm_pcie\n"); | |
2594 | return ret; | |
2595 | } | |
8b2e574d EH |
2596 | ret = device_create_file(adev->dev, &dev_attr_pp_sclk_od); |
2597 | if (ret) { | |
2598 | DRM_ERROR("failed to create device file pp_sclk_od\n"); | |
2599 | return ret; | |
2600 | } | |
f2bdc05f EH |
2601 | ret = device_create_file(adev->dev, &dev_attr_pp_mclk_od); |
2602 | if (ret) { | |
2603 | DRM_ERROR("failed to create device file pp_mclk_od\n"); | |
2604 | return ret; | |
2605 | } | |
37c5c4db RZ |
2606 | ret = device_create_file(adev->dev, |
2607 | &dev_attr_pp_power_profile_mode); | |
2608 | if (ret) { | |
2609 | DRM_ERROR("failed to create device file " | |
2610 | "pp_power_profile_mode\n"); | |
2611 | return ret; | |
2612 | } | |
3b94fb10 LG |
2613 | if ((is_support_sw_smu(adev) && adev->smu.od_enabled) || |
2614 | (!is_support_sw_smu(adev) && hwmgr->od_enabled)) { | |
45b35ee0 EQ |
2615 | ret = device_create_file(adev->dev, |
2616 | &dev_attr_pp_od_clk_voltage); | |
2617 | if (ret) { | |
2618 | DRM_ERROR("failed to create device file " | |
2619 | "pp_od_clk_voltage\n"); | |
2620 | return ret; | |
2621 | } | |
e3933f26 | 2622 | } |
b374d82d TSD |
2623 | ret = device_create_file(adev->dev, |
2624 | &dev_attr_gpu_busy_percent); | |
2625 | if (ret) { | |
2626 | DRM_ERROR("failed to create device file " | |
2627 | "gpu_busy_level\n"); | |
2628 | return ret; | |
2629 | } | |
b45e18ac | 2630 | /* PCIe Perf counters won't work on APU nodes */ |
0208a105 | 2631 | if (!(adev->flags & AMD_IS_APU)) { |
b45e18ac KR |
2632 | ret = device_create_file(adev->dev, &dev_attr_pcie_bw); |
2633 | if (ret) { | |
2634 | DRM_ERROR("failed to create device file pcie_bw\n"); | |
2635 | return ret; | |
2636 | } | |
2637 | } | |
d38ceaf9 AD |
2638 | ret = amdgpu_debugfs_pm_init(adev); |
2639 | if (ret) { | |
2640 | DRM_ERROR("Failed to register debugfs file for dpm!\n"); | |
2641 | return ret; | |
2642 | } | |
2643 | ||
7ca881a8 EQ |
2644 | if ((adev->asic_type >= CHIP_VEGA10) && |
2645 | !(adev->flags & AMD_IS_APU)) { | |
2646 | ret = device_create_file(adev->dev, | |
2647 | &dev_attr_ppfeatures); | |
2648 | if (ret) { | |
2649 | DRM_ERROR("failed to create device file " | |
2650 | "ppfeatures\n"); | |
2651 | return ret; | |
2652 | } | |
2653 | } | |
2654 | ||
c86f5ebf AD |
2655 | adev->pm.sysfs_initialized = true; |
2656 | ||
d38ceaf9 AD |
2657 | return 0; |
2658 | } | |
2659 | ||
2660 | void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev) | |
2661 | { | |
45b35ee0 EQ |
2662 | struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; |
2663 | ||
d2f52ac8 RZ |
2664 | if (adev->pm.dpm_enabled == 0) |
2665 | return; | |
2666 | ||
d38ceaf9 AD |
2667 | if (adev->pm.int_hwmon_dev) |
2668 | hwmon_device_unregister(adev->pm.int_hwmon_dev); | |
2669 | device_remove_file(adev->dev, &dev_attr_power_dpm_state); | |
2670 | device_remove_file(adev->dev, &dev_attr_power_dpm_force_performance_level); | |
6d07fe7b RZ |
2671 | |
2672 | device_remove_file(adev->dev, &dev_attr_pp_num_states); | |
2673 | device_remove_file(adev->dev, &dev_attr_pp_cur_state); | |
2674 | device_remove_file(adev->dev, &dev_attr_pp_force_state); | |
2675 | device_remove_file(adev->dev, &dev_attr_pp_table); | |
2676 | ||
c85e299f EH |
2677 | device_remove_file(adev->dev, &dev_attr_pp_dpm_sclk); |
2678 | device_remove_file(adev->dev, &dev_attr_pp_dpm_mclk); | |
d7e28e2d | 2679 | if (adev->asic_type >= CHIP_VEGA10) { |
d7337ca2 | 2680 | device_remove_file(adev->dev, &dev_attr_pp_dpm_socclk); |
d7e28e2d EQ |
2681 | device_remove_file(adev->dev, &dev_attr_pp_dpm_dcefclk); |
2682 | } | |
c85e299f | 2683 | device_remove_file(adev->dev, &dev_attr_pp_dpm_pcie); |
828e37ef EQ |
2684 | if (adev->asic_type >= CHIP_VEGA20) |
2685 | device_remove_file(adev->dev, &dev_attr_pp_dpm_fclk); | |
8b2e574d | 2686 | device_remove_file(adev->dev, &dev_attr_pp_sclk_od); |
f2bdc05f | 2687 | device_remove_file(adev->dev, &dev_attr_pp_mclk_od); |
37c5c4db RZ |
2688 | device_remove_file(adev->dev, |
2689 | &dev_attr_pp_power_profile_mode); | |
3b94fb10 LG |
2690 | if ((is_support_sw_smu(adev) && adev->smu.od_enabled) || |
2691 | (!is_support_sw_smu(adev) && hwmgr->od_enabled)) | |
45b35ee0 EQ |
2692 | device_remove_file(adev->dev, |
2693 | &dev_attr_pp_od_clk_voltage); | |
b374d82d | 2694 | device_remove_file(adev->dev, &dev_attr_gpu_busy_percent); |
0208a105 | 2695 | if (!(adev->flags & AMD_IS_APU)) |
b45e18ac | 2696 | device_remove_file(adev->dev, &dev_attr_pcie_bw); |
7ca881a8 EQ |
2697 | if ((adev->asic_type >= CHIP_VEGA10) && |
2698 | !(adev->flags & AMD_IS_APU)) | |
2699 | device_remove_file(adev->dev, &dev_attr_ppfeatures); | |
d38ceaf9 AD |
2700 | } |
2701 | ||
2702 | void amdgpu_pm_compute_clocks(struct amdgpu_device *adev) | |
2703 | { | |
5e876c62 | 2704 | int i = 0; |
d38ceaf9 AD |
2705 | |
2706 | if (!adev->pm.dpm_enabled) | |
2707 | return; | |
2708 | ||
c10c8f7c AD |
2709 | if (adev->mode_info.num_crtc) |
2710 | amdgpu_display_bandwidth_update(adev); | |
1b5708ff | 2711 | |
5e876c62 RZ |
2712 | for (i = 0; i < AMDGPU_MAX_RINGS; i++) { |
2713 | struct amdgpu_ring *ring = adev->rings[i]; | |
c66ed765 | 2714 | if (ring && ring->sched.ready) |
5e876c62 RZ |
2715 | amdgpu_fence_wait_empty(ring); |
2716 | } | |
d38ceaf9 | 2717 | |
bc0fcffd LG |
2718 | if (is_support_sw_smu(adev)) { |
2719 | struct smu_context *smu = &adev->smu; | |
2720 | struct smu_dpm_context *smu_dpm = &adev->smu.smu_dpm; | |
2721 | mutex_lock(&(smu->mutex)); | |
2722 | smu_handle_task(&adev->smu, | |
2723 | smu_dpm->dpm_level, | |
2724 | AMD_PP_TASK_DISPLAY_CONFIG_CHANGE); | |
2725 | mutex_unlock(&(smu->mutex)); | |
2726 | } else { | |
2727 | if (adev->powerplay.pp_funcs->dispatch_tasks) { | |
2728 | if (!amdgpu_device_has_dc_support(adev)) { | |
2729 | mutex_lock(&adev->pm.mutex); | |
2730 | amdgpu_dpm_get_active_displays(adev); | |
2731 | adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtc_count; | |
2732 | adev->pm.pm_display_cfg.vrefresh = amdgpu_dpm_get_vrefresh(adev); | |
2733 | adev->pm.pm_display_cfg.min_vblank_time = amdgpu_dpm_get_vblank_time(adev); | |
2734 | /* we have issues with mclk switching with refresh rates over 120 hz on the non-DC code. */ | |
2735 | if (adev->pm.pm_display_cfg.vrefresh > 120) | |
2736 | adev->pm.pm_display_cfg.min_vblank_time = 0; | |
2737 | if (adev->powerplay.pp_funcs->display_configuration_change) | |
2738 | adev->powerplay.pp_funcs->display_configuration_change( | |
2739 | adev->powerplay.pp_handle, | |
2740 | &adev->pm.pm_display_cfg); | |
2741 | mutex_unlock(&adev->pm.mutex); | |
2742 | } | |
2743 | amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_DISPLAY_CONFIG_CHANGE, NULL); | |
2744 | } else { | |
51d45cbc RZ |
2745 | mutex_lock(&adev->pm.mutex); |
2746 | amdgpu_dpm_get_active_displays(adev); | |
bc0fcffd | 2747 | amdgpu_dpm_change_power_state_locked(adev); |
51d45cbc RZ |
2748 | mutex_unlock(&adev->pm.mutex); |
2749 | } | |
1b5708ff | 2750 | } |
d38ceaf9 AD |
2751 | } |
2752 | ||
2753 | /* | |
2754 | * Debugfs info | |
2755 | */ | |
2756 | #if defined(CONFIG_DEBUG_FS) | |
2757 | ||
3de4ec57 TSD |
2758 | static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *adev) |
2759 | { | |
cd7b0c66 | 2760 | uint32_t value; |
505f8dbb | 2761 | uint64_t value64; |
5b79d048 | 2762 | uint32_t query = 0; |
9f8df7d7 | 2763 | int size; |
3de4ec57 | 2764 | |
3de4ec57 | 2765 | /* GPU Clocks */ |
9f8df7d7 | 2766 | size = sizeof(value); |
3de4ec57 | 2767 | seq_printf(m, "GFX Clocks and Power:\n"); |
9f8df7d7 | 2768 | if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK, (void *)&value, &size)) |
3de4ec57 | 2769 | seq_printf(m, "\t%u MHz (MCLK)\n", value/100); |
9f8df7d7 | 2770 | if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK, (void *)&value, &size)) |
3de4ec57 | 2771 | seq_printf(m, "\t%u MHz (SCLK)\n", value/100); |
5ed8d656 RZ |
2772 | if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK, (void *)&value, &size)) |
2773 | seq_printf(m, "\t%u MHz (PSTATE_SCLK)\n", value/100); | |
2774 | if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK, (void *)&value, &size)) | |
2775 | seq_printf(m, "\t%u MHz (PSTATE_MCLK)\n", value/100); | |
9f8df7d7 | 2776 | if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX, (void *)&value, &size)) |
3de4ec57 | 2777 | seq_printf(m, "\t%u mV (VDDGFX)\n", value); |
9f8df7d7 | 2778 | if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB, (void *)&value, &size)) |
3de4ec57 | 2779 | seq_printf(m, "\t%u mV (VDDNB)\n", value); |
5b79d048 RZ |
2780 | size = sizeof(uint32_t); |
2781 | if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_POWER, (void *)&query, &size)) | |
2782 | seq_printf(m, "\t%u.%u W (average GPU)\n", query >> 8, query & 0xff); | |
9f8df7d7 | 2783 | size = sizeof(value); |
3de4ec57 TSD |
2784 | seq_printf(m, "\n"); |
2785 | ||
2786 | /* GPU Temp */ | |
9f8df7d7 | 2787 | if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP, (void *)&value, &size)) |
3de4ec57 TSD |
2788 | seq_printf(m, "GPU Temperature: %u C\n", value/1000); |
2789 | ||
2790 | /* GPU Load */ | |
9f8df7d7 | 2791 | if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD, (void *)&value, &size)) |
3de4ec57 TSD |
2792 | seq_printf(m, "GPU Load: %u %%\n", value); |
2793 | seq_printf(m, "\n"); | |
2794 | ||
505f8dbb AD |
2795 | /* SMC feature mask */ |
2796 | if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK, (void *)&value64, &size)) | |
2797 | seq_printf(m, "SMC Feature Mask: 0x%016llx\n", value64); | |
2798 | ||
3de4ec57 | 2799 | /* UVD clocks */ |
9f8df7d7 | 2800 | if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_POWER, (void *)&value, &size)) { |
3de4ec57 TSD |
2801 | if (!value) { |
2802 | seq_printf(m, "UVD: Disabled\n"); | |
2803 | } else { | |
2804 | seq_printf(m, "UVD: Enabled\n"); | |
9f8df7d7 | 2805 | if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size)) |
3de4ec57 | 2806 | seq_printf(m, "\t%u MHz (DCLK)\n", value/100); |
9f8df7d7 | 2807 | if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size)) |
3de4ec57 TSD |
2808 | seq_printf(m, "\t%u MHz (VCLK)\n", value/100); |
2809 | } | |
2810 | } | |
2811 | seq_printf(m, "\n"); | |
2812 | ||
2813 | /* VCE clocks */ | |
9f8df7d7 | 2814 | if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_POWER, (void *)&value, &size)) { |
3de4ec57 TSD |
2815 | if (!value) { |
2816 | seq_printf(m, "VCE: Disabled\n"); | |
2817 | } else { | |
2818 | seq_printf(m, "VCE: Enabled\n"); | |
9f8df7d7 | 2819 | if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_ECCLK, (void *)&value, &size)) |
3de4ec57 TSD |
2820 | seq_printf(m, "\t%u MHz (ECCLK)\n", value/100); |
2821 | } | |
2822 | } | |
2823 | ||
2824 | return 0; | |
2825 | } | |
2826 | ||
a8503b15 HR |
2827 | static void amdgpu_parse_cg_state(struct seq_file *m, u32 flags) |
2828 | { | |
2829 | int i; | |
2830 | ||
2831 | for (i = 0; clocks[i].flag; i++) | |
2832 | seq_printf(m, "\t%s: %s\n", clocks[i].name, | |
2833 | (flags & clocks[i].flag) ? "On" : "Off"); | |
2834 | } | |
2835 | ||
d38ceaf9 AD |
2836 | static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data) |
2837 | { | |
2838 | struct drm_info_node *node = (struct drm_info_node *) m->private; | |
2839 | struct drm_device *dev = node->minor->dev; | |
2840 | struct amdgpu_device *adev = dev->dev_private; | |
0c67df48 | 2841 | struct drm_device *ddev = adev->ddev; |
6cb2d4e4 HR |
2842 | u32 flags = 0; |
2843 | ||
2990a1fc | 2844 | amdgpu_device_ip_get_clockgating_state(adev, &flags); |
6cb2d4e4 | 2845 | seq_printf(m, "Clock Gating Flags Mask: 0x%x\n", flags); |
a8503b15 HR |
2846 | amdgpu_parse_cg_state(m, flags); |
2847 | seq_printf(m, "\n"); | |
d38ceaf9 | 2848 | |
1b5708ff RZ |
2849 | if (!adev->pm.dpm_enabled) { |
2850 | seq_printf(m, "dpm not enabled\n"); | |
2851 | return 0; | |
2852 | } | |
0c67df48 AD |
2853 | if ((adev->flags & AMD_IS_PX) && |
2854 | (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) { | |
2855 | seq_printf(m, "PX asic powered off\n"); | |
4a2700c5 | 2856 | } else if (!is_support_sw_smu(adev) && adev->powerplay.pp_funcs->debugfs_print_current_performance_level) { |
d38ceaf9 | 2857 | mutex_lock(&adev->pm.mutex); |
cd4d7464 RZ |
2858 | if (adev->powerplay.pp_funcs->debugfs_print_current_performance_level) |
2859 | adev->powerplay.pp_funcs->debugfs_print_current_performance_level(adev, m); | |
d38ceaf9 AD |
2860 | else |
2861 | seq_printf(m, "Debugfs support not implemented for this asic\n"); | |
2862 | mutex_unlock(&adev->pm.mutex); | |
6d07fe7b RZ |
2863 | } else { |
2864 | return amdgpu_debugfs_pm_info_pp(m, adev); | |
d38ceaf9 AD |
2865 | } |
2866 | ||
2867 | return 0; | |
2868 | } | |
2869 | ||
06ab6832 | 2870 | static const struct drm_info_list amdgpu_pm_info_list[] = { |
d38ceaf9 AD |
2871 | {"amdgpu_pm_info", amdgpu_debugfs_pm_info, 0, NULL}, |
2872 | }; | |
2873 | #endif | |
2874 | ||
2875 | static int amdgpu_debugfs_pm_init(struct amdgpu_device *adev) | |
2876 | { | |
2877 | #if defined(CONFIG_DEBUG_FS) | |
2878 | return amdgpu_debugfs_add_files(adev, amdgpu_pm_info_list, ARRAY_SIZE(amdgpu_pm_info_list)); | |
2879 | #else | |
2880 | return 0; | |
2881 | #endif | |
2882 | } |