2 * Copyright 2013 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 #include "amdgpu_pm.h"
29 #include "amdgpu_atombios.h"
30 #include "amdgpu_dpm.h"
33 #include <linux/seq_file.h>
35 #include "smu/smu_7_0_0_d.h"
36 #include "smu/smu_7_0_0_sh_mask.h"
38 #include "gca/gfx_7_2_d.h"
39 #include "gca/gfx_7_2_sh_mask.h"
41 #define KV_MAX_DEEPSLEEP_DIVIDER_ID 5
42 #define KV_MINIMUM_ENGINE_CLOCK 800
43 #define SMC_RAM_END 0x40000
45 static void kv_dpm_set_dpm_funcs(struct amdgpu_device
*adev
);
46 static void kv_dpm_set_irq_funcs(struct amdgpu_device
*adev
);
47 static int kv_enable_nb_dpm(struct amdgpu_device
*adev
,
49 static void kv_init_graphics_levels(struct amdgpu_device
*adev
);
50 static int kv_calculate_ds_divider(struct amdgpu_device
*adev
);
51 static int kv_calculate_nbps_level_settings(struct amdgpu_device
*adev
);
52 static int kv_calculate_dpm_settings(struct amdgpu_device
*adev
);
53 static void kv_enable_new_levels(struct amdgpu_device
*adev
);
54 static void kv_program_nbps_index_settings(struct amdgpu_device
*adev
,
55 struct amdgpu_ps
*new_rps
);
56 static int kv_set_enabled_level(struct amdgpu_device
*adev
, u32 level
);
57 static int kv_set_enabled_levels(struct amdgpu_device
*adev
);
58 static int kv_force_dpm_highest(struct amdgpu_device
*adev
);
59 static int kv_force_dpm_lowest(struct amdgpu_device
*adev
);
60 static void kv_apply_state_adjust_rules(struct amdgpu_device
*adev
,
61 struct amdgpu_ps
*new_rps
,
62 struct amdgpu_ps
*old_rps
);
63 static int kv_set_thermal_temperature_range(struct amdgpu_device
*adev
,
64 int min_temp
, int max_temp
);
65 static int kv_init_fps_limits(struct amdgpu_device
*adev
);
67 static void kv_dpm_powergate_uvd(struct amdgpu_device
*adev
, bool gate
);
68 static void kv_dpm_powergate_vce(struct amdgpu_device
*adev
, bool gate
);
69 static void kv_dpm_powergate_samu(struct amdgpu_device
*adev
, bool gate
);
70 static void kv_dpm_powergate_acp(struct amdgpu_device
*adev
, bool gate
);
73 static u32
kv_convert_vid2_to_vid7(struct amdgpu_device
*adev
,
74 struct sumo_vid_mapping_table
*vid_mapping_table
,
77 struct amdgpu_clock_voltage_dependency_table
*vddc_sclk_table
=
78 &adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
;
81 if (vddc_sclk_table
&& vddc_sclk_table
->count
) {
82 if (vid_2bit
< vddc_sclk_table
->count
)
83 return vddc_sclk_table
->entries
[vid_2bit
].v
;
85 return vddc_sclk_table
->entries
[vddc_sclk_table
->count
- 1].v
;
87 for (i
= 0; i
< vid_mapping_table
->num_entries
; i
++) {
88 if (vid_mapping_table
->entries
[i
].vid_2bit
== vid_2bit
)
89 return vid_mapping_table
->entries
[i
].vid_7bit
;
91 return vid_mapping_table
->entries
[vid_mapping_table
->num_entries
- 1].vid_7bit
;
95 static u32
kv_convert_vid7_to_vid2(struct amdgpu_device
*adev
,
96 struct sumo_vid_mapping_table
*vid_mapping_table
,
99 struct amdgpu_clock_voltage_dependency_table
*vddc_sclk_table
=
100 &adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
;
103 if (vddc_sclk_table
&& vddc_sclk_table
->count
) {
104 for (i
= 0; i
< vddc_sclk_table
->count
; i
++) {
105 if (vddc_sclk_table
->entries
[i
].v
== vid_7bit
)
108 return vddc_sclk_table
->count
- 1;
110 for (i
= 0; i
< vid_mapping_table
->num_entries
; i
++) {
111 if (vid_mapping_table
->entries
[i
].vid_7bit
== vid_7bit
)
112 return vid_mapping_table
->entries
[i
].vid_2bit
;
115 return vid_mapping_table
->entries
[vid_mapping_table
->num_entries
- 1].vid_2bit
;
119 static void sumo_take_smu_control(struct amdgpu_device
*adev
, bool enable
)
121 /* This bit selects who handles display phy powergating.
122 * Clear the bit to let atom handle it.
123 * Set it to let the driver handle it.
124 * For now we just let atom handle it.
127 u32 v
= RREG32(mmDOUT_SCRATCH3
);
134 WREG32(mmDOUT_SCRATCH3
, v
);
138 static void sumo_construct_sclk_voltage_mapping_table(struct amdgpu_device
*adev
,
139 struct sumo_sclk_voltage_mapping_table
*sclk_voltage_mapping_table
,
140 ATOM_AVAILABLE_SCLK_LIST
*table
)
146 for (i
= 0; i
< SUMO_MAX_HARDWARE_POWERLEVELS
; i
++) {
147 if (table
[i
].ulSupportedSCLK
> prev_sclk
) {
148 sclk_voltage_mapping_table
->entries
[n
].sclk_frequency
=
149 table
[i
].ulSupportedSCLK
;
150 sclk_voltage_mapping_table
->entries
[n
].vid_2bit
=
151 table
[i
].usVoltageIndex
;
152 prev_sclk
= table
[i
].ulSupportedSCLK
;
157 sclk_voltage_mapping_table
->num_max_dpm_entries
= n
;
160 static void sumo_construct_vid_mapping_table(struct amdgpu_device
*adev
,
161 struct sumo_vid_mapping_table
*vid_mapping_table
,
162 ATOM_AVAILABLE_SCLK_LIST
*table
)
166 for (i
= 0; i
< SUMO_MAX_HARDWARE_POWERLEVELS
; i
++) {
167 if (table
[i
].ulSupportedSCLK
!= 0) {
168 vid_mapping_table
->entries
[table
[i
].usVoltageIndex
].vid_7bit
=
169 table
[i
].usVoltageID
;
170 vid_mapping_table
->entries
[table
[i
].usVoltageIndex
].vid_2bit
=
171 table
[i
].usVoltageIndex
;
175 for (i
= 0; i
< SUMO_MAX_NUMBER_VOLTAGES
; i
++) {
176 if (vid_mapping_table
->entries
[i
].vid_7bit
== 0) {
177 for (j
= i
+ 1; j
< SUMO_MAX_NUMBER_VOLTAGES
; j
++) {
178 if (vid_mapping_table
->entries
[j
].vid_7bit
!= 0) {
179 vid_mapping_table
->entries
[i
] =
180 vid_mapping_table
->entries
[j
];
181 vid_mapping_table
->entries
[j
].vid_7bit
= 0;
186 if (j
== SUMO_MAX_NUMBER_VOLTAGES
)
191 vid_mapping_table
->num_entries
= i
;
195 static const struct kv_lcac_config_values sx_local_cac_cfg_kv
[] =
208 static const struct kv_lcac_config_values mc0_local_cac_cfg_kv
[] =
214 static const struct kv_lcac_config_values mc1_local_cac_cfg_kv
[] =
220 static const struct kv_lcac_config_values mc2_local_cac_cfg_kv
[] =
226 static const struct kv_lcac_config_values mc3_local_cac_cfg_kv
[] =
232 static const struct kv_lcac_config_values cpl_local_cac_cfg_kv
[] =
264 static const struct kv_lcac_config_reg sx0_cac_config_reg
[] =
266 { 0xc0400d00, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
269 static const struct kv_lcac_config_reg mc0_cac_config_reg
[] =
271 { 0xc0400d30, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
274 static const struct kv_lcac_config_reg mc1_cac_config_reg
[] =
276 { 0xc0400d3c, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
279 static const struct kv_lcac_config_reg mc2_cac_config_reg
[] =
281 { 0xc0400d48, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
284 static const struct kv_lcac_config_reg mc3_cac_config_reg
[] =
286 { 0xc0400d54, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
289 static const struct kv_lcac_config_reg cpl_cac_config_reg
[] =
291 { 0xc0400d80, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
295 static const struct kv_pt_config_reg didt_config_kv
[] =
297 { 0x10, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND
},
298 { 0x10, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND
},
299 { 0x10, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND
},
300 { 0x10, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND
},
301 { 0x11, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND
},
302 { 0x11, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND
},
303 { 0x11, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND
},
304 { 0x11, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND
},
305 { 0x12, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND
},
306 { 0x12, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND
},
307 { 0x12, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND
},
308 { 0x12, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND
},
309 { 0x2, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND
},
310 { 0x2, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND
},
311 { 0x2, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND
},
312 { 0x1, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND
},
313 { 0x1, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND
},
314 { 0x0, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND
},
315 { 0x30, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND
},
316 { 0x30, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND
},
317 { 0x30, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND
},
318 { 0x30, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND
},
319 { 0x31, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND
},
320 { 0x31, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND
},
321 { 0x31, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND
},
322 { 0x31, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND
},
323 { 0x32, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND
},
324 { 0x32, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND
},
325 { 0x32, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND
},
326 { 0x32, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND
},
327 { 0x22, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND
},
328 { 0x22, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND
},
329 { 0x22, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND
},
330 { 0x21, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND
},
331 { 0x21, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND
},
332 { 0x20, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND
},
333 { 0x50, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND
},
334 { 0x50, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND
},
335 { 0x50, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND
},
336 { 0x50, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND
},
337 { 0x51, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND
},
338 { 0x51, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND
},
339 { 0x51, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND
},
340 { 0x51, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND
},
341 { 0x52, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND
},
342 { 0x52, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND
},
343 { 0x52, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND
},
344 { 0x52, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND
},
345 { 0x42, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND
},
346 { 0x42, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND
},
347 { 0x42, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND
},
348 { 0x41, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND
},
349 { 0x41, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND
},
350 { 0x40, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND
},
351 { 0x70, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND
},
352 { 0x70, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND
},
353 { 0x70, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND
},
354 { 0x70, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND
},
355 { 0x71, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND
},
356 { 0x71, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND
},
357 { 0x71, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND
},
358 { 0x71, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND
},
359 { 0x72, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND
},
360 { 0x72, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND
},
361 { 0x72, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND
},
362 { 0x72, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND
},
363 { 0x62, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND
},
364 { 0x62, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND
},
365 { 0x62, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND
},
366 { 0x61, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND
},
367 { 0x61, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND
},
368 { 0x60, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND
},
372 static struct kv_ps
*kv_get_ps(struct amdgpu_ps
*rps
)
374 struct kv_ps
*ps
= rps
->ps_priv
;
379 static struct kv_power_info
*kv_get_pi(struct amdgpu_device
*adev
)
381 struct kv_power_info
*pi
= adev
->pm
.dpm
.priv
;
387 static void kv_program_local_cac_table(struct amdgpu_device
*adev
,
388 const struct kv_lcac_config_values
*local_cac_table
,
389 const struct kv_lcac_config_reg
*local_cac_reg
)
392 const struct kv_lcac_config_values
*values
= local_cac_table
;
394 while (values
->block_id
!= 0xffffffff) {
395 count
= values
->signal_id
;
396 for (i
= 0; i
< count
; i
++) {
397 data
= ((values
->block_id
<< local_cac_reg
->block_shift
) &
398 local_cac_reg
->block_mask
);
399 data
|= ((i
<< local_cac_reg
->signal_shift
) &
400 local_cac_reg
->signal_mask
);
401 data
|= ((values
->t
<< local_cac_reg
->t_shift
) &
402 local_cac_reg
->t_mask
);
403 data
|= ((1 << local_cac_reg
->enable_shift
) &
404 local_cac_reg
->enable_mask
);
405 WREG32_SMC(local_cac_reg
->cntl
, data
);
412 static int kv_program_pt_config_registers(struct amdgpu_device
*adev
,
413 const struct kv_pt_config_reg
*cac_config_regs
)
415 const struct kv_pt_config_reg
*config_regs
= cac_config_regs
;
419 if (config_regs
== NULL
)
422 while (config_regs
->offset
!= 0xFFFFFFFF) {
423 if (config_regs
->type
== KV_CONFIGREG_CACHE
) {
424 cache
|= ((config_regs
->value
<< config_regs
->shift
) & config_regs
->mask
);
426 switch (config_regs
->type
) {
427 case KV_CONFIGREG_SMC_IND
:
428 data
= RREG32_SMC(config_regs
->offset
);
430 case KV_CONFIGREG_DIDT_IND
:
431 data
= RREG32_DIDT(config_regs
->offset
);
434 data
= RREG32(config_regs
->offset
);
438 data
&= ~config_regs
->mask
;
439 data
|= ((config_regs
->value
<< config_regs
->shift
) & config_regs
->mask
);
443 switch (config_regs
->type
) {
444 case KV_CONFIGREG_SMC_IND
:
445 WREG32_SMC(config_regs
->offset
, data
);
447 case KV_CONFIGREG_DIDT_IND
:
448 WREG32_DIDT(config_regs
->offset
, data
);
451 WREG32(config_regs
->offset
, data
);
461 static void kv_do_enable_didt(struct amdgpu_device
*adev
, bool enable
)
463 struct kv_power_info
*pi
= kv_get_pi(adev
);
466 if (pi
->caps_sq_ramping
) {
467 data
= RREG32_DIDT(ixDIDT_SQ_CTRL0
);
469 data
|= DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK
;
471 data
&= ~DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK
;
472 WREG32_DIDT(ixDIDT_SQ_CTRL0
, data
);
475 if (pi
->caps_db_ramping
) {
476 data
= RREG32_DIDT(ixDIDT_DB_CTRL0
);
478 data
|= DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK
;
480 data
&= ~DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK
;
481 WREG32_DIDT(ixDIDT_DB_CTRL0
, data
);
484 if (pi
->caps_td_ramping
) {
485 data
= RREG32_DIDT(ixDIDT_TD_CTRL0
);
487 data
|= DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK
;
489 data
&= ~DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK
;
490 WREG32_DIDT(ixDIDT_TD_CTRL0
, data
);
493 if (pi
->caps_tcp_ramping
) {
494 data
= RREG32_DIDT(ixDIDT_TCP_CTRL0
);
496 data
|= DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK
;
498 data
&= ~DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK
;
499 WREG32_DIDT(ixDIDT_TCP_CTRL0
, data
);
503 static int kv_enable_didt(struct amdgpu_device
*adev
, bool enable
)
505 struct kv_power_info
*pi
= kv_get_pi(adev
);
508 if (pi
->caps_sq_ramping
||
509 pi
->caps_db_ramping
||
510 pi
->caps_td_ramping
||
511 pi
->caps_tcp_ramping
) {
512 adev
->gfx
.rlc
.funcs
->enter_safe_mode(adev
);
515 ret
= kv_program_pt_config_registers(adev
, didt_config_kv
);
517 adev
->gfx
.rlc
.funcs
->exit_safe_mode(adev
);
522 kv_do_enable_didt(adev
, enable
);
524 adev
->gfx
.rlc
.funcs
->exit_safe_mode(adev
);
531 static void kv_initialize_hardware_cac_manager(struct amdgpu_device
*adev
)
533 struct kv_power_info
*pi
= kv_get_pi(adev
);
536 WREG32_SMC(ixLCAC_SX0_OVR_SEL
, 0);
537 WREG32_SMC(ixLCAC_SX0_OVR_VAL
, 0);
538 kv_program_local_cac_table(adev
, sx_local_cac_cfg_kv
, sx0_cac_config_reg
);
540 WREG32_SMC(ixLCAC_MC0_OVR_SEL
, 0);
541 WREG32_SMC(ixLCAC_MC0_OVR_VAL
, 0);
542 kv_program_local_cac_table(adev
, mc0_local_cac_cfg_kv
, mc0_cac_config_reg
);
544 WREG32_SMC(ixLCAC_MC1_OVR_SEL
, 0);
545 WREG32_SMC(ixLCAC_MC1_OVR_VAL
, 0);
546 kv_program_local_cac_table(adev
, mc1_local_cac_cfg_kv
, mc1_cac_config_reg
);
548 WREG32_SMC(ixLCAC_MC2_OVR_SEL
, 0);
549 WREG32_SMC(ixLCAC_MC2_OVR_VAL
, 0);
550 kv_program_local_cac_table(adev
, mc2_local_cac_cfg_kv
, mc2_cac_config_reg
);
552 WREG32_SMC(ixLCAC_MC3_OVR_SEL
, 0);
553 WREG32_SMC(ixLCAC_MC3_OVR_VAL
, 0);
554 kv_program_local_cac_table(adev
, mc3_local_cac_cfg_kv
, mc3_cac_config_reg
);
556 WREG32_SMC(ixLCAC_CPL_OVR_SEL
, 0);
557 WREG32_SMC(ixLCAC_CPL_OVR_VAL
, 0);
558 kv_program_local_cac_table(adev
, cpl_local_cac_cfg_kv
, cpl_cac_config_reg
);
563 static int kv_enable_smc_cac(struct amdgpu_device
*adev
, bool enable
)
565 struct kv_power_info
*pi
= kv_get_pi(adev
);
570 ret
= amdgpu_kv_notify_message_to_smu(adev
, PPSMC_MSG_EnableCac
);
572 pi
->cac_enabled
= false;
574 pi
->cac_enabled
= true;
575 } else if (pi
->cac_enabled
) {
576 amdgpu_kv_notify_message_to_smu(adev
, PPSMC_MSG_DisableCac
);
577 pi
->cac_enabled
= false;
584 static int kv_process_firmware_header(struct amdgpu_device
*adev
)
586 struct kv_power_info
*pi
= kv_get_pi(adev
);
590 ret
= amdgpu_kv_read_smc_sram_dword(adev
, SMU7_FIRMWARE_HEADER_LOCATION
+
591 offsetof(SMU7_Firmware_Header
, DpmTable
),
595 pi
->dpm_table_start
= tmp
;
597 ret
= amdgpu_kv_read_smc_sram_dword(adev
, SMU7_FIRMWARE_HEADER_LOCATION
+
598 offsetof(SMU7_Firmware_Header
, SoftRegisters
),
602 pi
->soft_regs_start
= tmp
;
607 static int kv_enable_dpm_voltage_scaling(struct amdgpu_device
*adev
)
609 struct kv_power_info
*pi
= kv_get_pi(adev
);
612 pi
->graphics_voltage_change_enable
= 1;
614 ret
= amdgpu_kv_copy_bytes_to_smc(adev
,
615 pi
->dpm_table_start
+
616 offsetof(SMU7_Fusion_DpmTable
, GraphicsVoltageChangeEnable
),
617 &pi
->graphics_voltage_change_enable
,
618 sizeof(u8
), pi
->sram_end
);
623 static int kv_set_dpm_interval(struct amdgpu_device
*adev
)
625 struct kv_power_info
*pi
= kv_get_pi(adev
);
628 pi
->graphics_interval
= 1;
630 ret
= amdgpu_kv_copy_bytes_to_smc(adev
,
631 pi
->dpm_table_start
+
632 offsetof(SMU7_Fusion_DpmTable
, GraphicsInterval
),
633 &pi
->graphics_interval
,
634 sizeof(u8
), pi
->sram_end
);
639 static int kv_set_dpm_boot_state(struct amdgpu_device
*adev
)
641 struct kv_power_info
*pi
= kv_get_pi(adev
);
644 ret
= amdgpu_kv_copy_bytes_to_smc(adev
,
645 pi
->dpm_table_start
+
646 offsetof(SMU7_Fusion_DpmTable
, GraphicsBootLevel
),
647 &pi
->graphics_boot_level
,
648 sizeof(u8
), pi
->sram_end
);
653 static void kv_program_vc(struct amdgpu_device
*adev
)
655 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0
, 0x3FFFC100);
658 static void kv_clear_vc(struct amdgpu_device
*adev
)
660 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0
, 0);
663 static int kv_set_divider_value(struct amdgpu_device
*adev
,
666 struct kv_power_info
*pi
= kv_get_pi(adev
);
667 struct atom_clock_dividers dividers
;
670 ret
= amdgpu_atombios_get_clock_dividers(adev
, COMPUTE_ENGINE_PLL_PARAM
,
671 sclk
, false, ÷rs
);
675 pi
->graphics_level
[index
].SclkDid
= (u8
)dividers
.post_div
;
676 pi
->graphics_level
[index
].SclkFrequency
= cpu_to_be32(sclk
);
681 static u16
kv_convert_8bit_index_to_voltage(struct amdgpu_device
*adev
,
684 return 6200 - (voltage
* 25);
687 static u16
kv_convert_2bit_index_to_voltage(struct amdgpu_device
*adev
,
690 struct kv_power_info
*pi
= kv_get_pi(adev
);
691 u32 vid_8bit
= kv_convert_vid2_to_vid7(adev
,
692 &pi
->sys_info
.vid_mapping_table
,
695 return kv_convert_8bit_index_to_voltage(adev
, (u16
)vid_8bit
);
699 static int kv_set_vid(struct amdgpu_device
*adev
, u32 index
, u32 vid
)
701 struct kv_power_info
*pi
= kv_get_pi(adev
);
703 pi
->graphics_level
[index
].VoltageDownH
= (u8
)pi
->voltage_drop_t
;
704 pi
->graphics_level
[index
].MinVddNb
=
705 cpu_to_be32(kv_convert_2bit_index_to_voltage(adev
, vid
));
710 static int kv_set_at(struct amdgpu_device
*adev
, u32 index
, u32 at
)
712 struct kv_power_info
*pi
= kv_get_pi(adev
);
714 pi
->graphics_level
[index
].AT
= cpu_to_be16((u16
)at
);
719 static void kv_dpm_power_level_enable(struct amdgpu_device
*adev
,
720 u32 index
, bool enable
)
722 struct kv_power_info
*pi
= kv_get_pi(adev
);
724 pi
->graphics_level
[index
].EnabledForActivity
= enable
? 1 : 0;
727 static void kv_start_dpm(struct amdgpu_device
*adev
)
729 u32 tmp
= RREG32_SMC(ixGENERAL_PWRMGT
);
731 tmp
|= GENERAL_PWRMGT__GLOBAL_PWRMGT_EN_MASK
;
732 WREG32_SMC(ixGENERAL_PWRMGT
, tmp
);
734 amdgpu_kv_smc_dpm_enable(adev
, true);
737 static void kv_stop_dpm(struct amdgpu_device
*adev
)
739 amdgpu_kv_smc_dpm_enable(adev
, false);
742 static void kv_start_am(struct amdgpu_device
*adev
)
744 u32 sclk_pwrmgt_cntl
= RREG32_SMC(ixSCLK_PWRMGT_CNTL
);
746 sclk_pwrmgt_cntl
&= ~(SCLK_PWRMGT_CNTL__RESET_SCLK_CNT_MASK
|
747 SCLK_PWRMGT_CNTL__RESET_BUSY_CNT_MASK
);
748 sclk_pwrmgt_cntl
|= SCLK_PWRMGT_CNTL__DYNAMIC_PM_EN_MASK
;
750 WREG32_SMC(ixSCLK_PWRMGT_CNTL
, sclk_pwrmgt_cntl
);
753 static void kv_reset_am(struct amdgpu_device
*adev
)
755 u32 sclk_pwrmgt_cntl
= RREG32_SMC(ixSCLK_PWRMGT_CNTL
);
757 sclk_pwrmgt_cntl
|= (SCLK_PWRMGT_CNTL__RESET_SCLK_CNT_MASK
|
758 SCLK_PWRMGT_CNTL__RESET_BUSY_CNT_MASK
);
760 WREG32_SMC(ixSCLK_PWRMGT_CNTL
, sclk_pwrmgt_cntl
);
763 static int kv_freeze_sclk_dpm(struct amdgpu_device
*adev
, bool freeze
)
765 return amdgpu_kv_notify_message_to_smu(adev
, freeze
?
766 PPSMC_MSG_SCLKDPM_FreezeLevel
: PPSMC_MSG_SCLKDPM_UnfreezeLevel
);
769 static int kv_force_lowest_valid(struct amdgpu_device
*adev
)
771 return kv_force_dpm_lowest(adev
);
774 static int kv_unforce_levels(struct amdgpu_device
*adev
)
776 if (adev
->asic_type
== CHIP_KABINI
|| adev
->asic_type
== CHIP_MULLINS
)
777 return amdgpu_kv_notify_message_to_smu(adev
, PPSMC_MSG_NoForcedLevel
);
779 return kv_set_enabled_levels(adev
);
782 static int kv_update_sclk_t(struct amdgpu_device
*adev
)
784 struct kv_power_info
*pi
= kv_get_pi(adev
);
785 u32 low_sclk_interrupt_t
= 0;
788 if (pi
->caps_sclk_throttle_low_notification
) {
789 low_sclk_interrupt_t
= cpu_to_be32(pi
->low_sclk_interrupt_t
);
791 ret
= amdgpu_kv_copy_bytes_to_smc(adev
,
792 pi
->dpm_table_start
+
793 offsetof(SMU7_Fusion_DpmTable
, LowSclkInterruptT
),
794 (u8
*)&low_sclk_interrupt_t
,
795 sizeof(u32
), pi
->sram_end
);
800 static int kv_program_bootup_state(struct amdgpu_device
*adev
)
802 struct kv_power_info
*pi
= kv_get_pi(adev
);
804 struct amdgpu_clock_voltage_dependency_table
*table
=
805 &adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
;
807 if (table
&& table
->count
) {
808 for (i
= pi
->graphics_dpm_level_count
- 1; i
> 0; i
--) {
809 if (table
->entries
[i
].clk
== pi
->boot_pl
.sclk
)
813 pi
->graphics_boot_level
= (u8
)i
;
814 kv_dpm_power_level_enable(adev
, i
, true);
816 struct sumo_sclk_voltage_mapping_table
*table
=
817 &pi
->sys_info
.sclk_voltage_mapping_table
;
819 if (table
->num_max_dpm_entries
== 0)
822 for (i
= pi
->graphics_dpm_level_count
- 1; i
> 0; i
--) {
823 if (table
->entries
[i
].sclk_frequency
== pi
->boot_pl
.sclk
)
827 pi
->graphics_boot_level
= (u8
)i
;
828 kv_dpm_power_level_enable(adev
, i
, true);
833 static int kv_enable_auto_thermal_throttling(struct amdgpu_device
*adev
)
835 struct kv_power_info
*pi
= kv_get_pi(adev
);
838 pi
->graphics_therm_throttle_enable
= 1;
840 ret
= amdgpu_kv_copy_bytes_to_smc(adev
,
841 pi
->dpm_table_start
+
842 offsetof(SMU7_Fusion_DpmTable
, GraphicsThermThrottleEnable
),
843 &pi
->graphics_therm_throttle_enable
,
844 sizeof(u8
), pi
->sram_end
);
849 static int kv_upload_dpm_settings(struct amdgpu_device
*adev
)
851 struct kv_power_info
*pi
= kv_get_pi(adev
);
854 ret
= amdgpu_kv_copy_bytes_to_smc(adev
,
855 pi
->dpm_table_start
+
856 offsetof(SMU7_Fusion_DpmTable
, GraphicsLevel
),
857 (u8
*)&pi
->graphics_level
,
858 sizeof(SMU7_Fusion_GraphicsLevel
) * SMU7_MAX_LEVELS_GRAPHICS
,
864 ret
= amdgpu_kv_copy_bytes_to_smc(adev
,
865 pi
->dpm_table_start
+
866 offsetof(SMU7_Fusion_DpmTable
, GraphicsDpmLevelCount
),
867 &pi
->graphics_dpm_level_count
,
868 sizeof(u8
), pi
->sram_end
);
873 static u32
kv_get_clock_difference(u32 a
, u32 b
)
875 return (a
>= b
) ? a
- b
: b
- a
;
878 static u32
kv_get_clk_bypass(struct amdgpu_device
*adev
, u32 clk
)
880 struct kv_power_info
*pi
= kv_get_pi(adev
);
883 if (pi
->caps_enable_dfs_bypass
) {
884 if (kv_get_clock_difference(clk
, 40000) < 200)
886 else if (kv_get_clock_difference(clk
, 30000) < 200)
888 else if (kv_get_clock_difference(clk
, 20000) < 200)
890 else if (kv_get_clock_difference(clk
, 15000) < 200)
892 else if (kv_get_clock_difference(clk
, 10000) < 200)
903 static int kv_populate_uvd_table(struct amdgpu_device
*adev
)
905 struct kv_power_info
*pi
= kv_get_pi(adev
);
906 struct amdgpu_uvd_clock_voltage_dependency_table
*table
=
907 &adev
->pm
.dpm
.dyn_state
.uvd_clock_voltage_dependency_table
;
908 struct atom_clock_dividers dividers
;
912 if (table
== NULL
|| table
->count
== 0)
915 pi
->uvd_level_count
= 0;
916 for (i
= 0; i
< table
->count
; i
++) {
917 if (pi
->high_voltage_t
&&
918 (pi
->high_voltage_t
< table
->entries
[i
].v
))
921 pi
->uvd_level
[i
].VclkFrequency
= cpu_to_be32(table
->entries
[i
].vclk
);
922 pi
->uvd_level
[i
].DclkFrequency
= cpu_to_be32(table
->entries
[i
].dclk
);
923 pi
->uvd_level
[i
].MinVddNb
= cpu_to_be16(table
->entries
[i
].v
);
925 pi
->uvd_level
[i
].VClkBypassCntl
=
926 (u8
)kv_get_clk_bypass(adev
, table
->entries
[i
].vclk
);
927 pi
->uvd_level
[i
].DClkBypassCntl
=
928 (u8
)kv_get_clk_bypass(adev
, table
->entries
[i
].dclk
);
930 ret
= amdgpu_atombios_get_clock_dividers(adev
, COMPUTE_ENGINE_PLL_PARAM
,
931 table
->entries
[i
].vclk
, false, ÷rs
);
934 pi
->uvd_level
[i
].VclkDivider
= (u8
)dividers
.post_div
;
936 ret
= amdgpu_atombios_get_clock_dividers(adev
, COMPUTE_ENGINE_PLL_PARAM
,
937 table
->entries
[i
].dclk
, false, ÷rs
);
940 pi
->uvd_level
[i
].DclkDivider
= (u8
)dividers
.post_div
;
942 pi
->uvd_level_count
++;
945 ret
= amdgpu_kv_copy_bytes_to_smc(adev
,
946 pi
->dpm_table_start
+
947 offsetof(SMU7_Fusion_DpmTable
, UvdLevelCount
),
948 (u8
*)&pi
->uvd_level_count
,
949 sizeof(u8
), pi
->sram_end
);
953 pi
->uvd_interval
= 1;
955 ret
= amdgpu_kv_copy_bytes_to_smc(adev
,
956 pi
->dpm_table_start
+
957 offsetof(SMU7_Fusion_DpmTable
, UVDInterval
),
959 sizeof(u8
), pi
->sram_end
);
963 ret
= amdgpu_kv_copy_bytes_to_smc(adev
,
964 pi
->dpm_table_start
+
965 offsetof(SMU7_Fusion_DpmTable
, UvdLevel
),
966 (u8
*)&pi
->uvd_level
,
967 sizeof(SMU7_Fusion_UvdLevel
) * SMU7_MAX_LEVELS_UVD
,
974 static int kv_populate_vce_table(struct amdgpu_device
*adev
)
976 struct kv_power_info
*pi
= kv_get_pi(adev
);
979 struct amdgpu_vce_clock_voltage_dependency_table
*table
=
980 &adev
->pm
.dpm
.dyn_state
.vce_clock_voltage_dependency_table
;
981 struct atom_clock_dividers dividers
;
983 if (table
== NULL
|| table
->count
== 0)
986 pi
->vce_level_count
= 0;
987 for (i
= 0; i
< table
->count
; i
++) {
988 if (pi
->high_voltage_t
&&
989 pi
->high_voltage_t
< table
->entries
[i
].v
)
992 pi
->vce_level
[i
].Frequency
= cpu_to_be32(table
->entries
[i
].evclk
);
993 pi
->vce_level
[i
].MinVoltage
= cpu_to_be16(table
->entries
[i
].v
);
995 pi
->vce_level
[i
].ClkBypassCntl
=
996 (u8
)kv_get_clk_bypass(adev
, table
->entries
[i
].evclk
);
998 ret
= amdgpu_atombios_get_clock_dividers(adev
, COMPUTE_ENGINE_PLL_PARAM
,
999 table
->entries
[i
].evclk
, false, ÷rs
);
1002 pi
->vce_level
[i
].Divider
= (u8
)dividers
.post_div
;
1004 pi
->vce_level_count
++;
1007 ret
= amdgpu_kv_copy_bytes_to_smc(adev
,
1008 pi
->dpm_table_start
+
1009 offsetof(SMU7_Fusion_DpmTable
, VceLevelCount
),
1010 (u8
*)&pi
->vce_level_count
,
1016 pi
->vce_interval
= 1;
1018 ret
= amdgpu_kv_copy_bytes_to_smc(adev
,
1019 pi
->dpm_table_start
+
1020 offsetof(SMU7_Fusion_DpmTable
, VCEInterval
),
1021 (u8
*)&pi
->vce_interval
,
1027 ret
= amdgpu_kv_copy_bytes_to_smc(adev
,
1028 pi
->dpm_table_start
+
1029 offsetof(SMU7_Fusion_DpmTable
, VceLevel
),
1030 (u8
*)&pi
->vce_level
,
1031 sizeof(SMU7_Fusion_ExtClkLevel
) * SMU7_MAX_LEVELS_VCE
,
1037 static int kv_populate_samu_table(struct amdgpu_device
*adev
)
1039 struct kv_power_info
*pi
= kv_get_pi(adev
);
1040 struct amdgpu_clock_voltage_dependency_table
*table
=
1041 &adev
->pm
.dpm
.dyn_state
.samu_clock_voltage_dependency_table
;
1042 struct atom_clock_dividers dividers
;
1046 if (table
== NULL
|| table
->count
== 0)
1049 pi
->samu_level_count
= 0;
1050 for (i
= 0; i
< table
->count
; i
++) {
1051 if (pi
->high_voltage_t
&&
1052 pi
->high_voltage_t
< table
->entries
[i
].v
)
1055 pi
->samu_level
[i
].Frequency
= cpu_to_be32(table
->entries
[i
].clk
);
1056 pi
->samu_level
[i
].MinVoltage
= cpu_to_be16(table
->entries
[i
].v
);
1058 pi
->samu_level
[i
].ClkBypassCntl
=
1059 (u8
)kv_get_clk_bypass(adev
, table
->entries
[i
].clk
);
1061 ret
= amdgpu_atombios_get_clock_dividers(adev
, COMPUTE_ENGINE_PLL_PARAM
,
1062 table
->entries
[i
].clk
, false, ÷rs
);
1065 pi
->samu_level
[i
].Divider
= (u8
)dividers
.post_div
;
1067 pi
->samu_level_count
++;
1070 ret
= amdgpu_kv_copy_bytes_to_smc(adev
,
1071 pi
->dpm_table_start
+
1072 offsetof(SMU7_Fusion_DpmTable
, SamuLevelCount
),
1073 (u8
*)&pi
->samu_level_count
,
1079 pi
->samu_interval
= 1;
1081 ret
= amdgpu_kv_copy_bytes_to_smc(adev
,
1082 pi
->dpm_table_start
+
1083 offsetof(SMU7_Fusion_DpmTable
, SAMUInterval
),
1084 (u8
*)&pi
->samu_interval
,
1090 ret
= amdgpu_kv_copy_bytes_to_smc(adev
,
1091 pi
->dpm_table_start
+
1092 offsetof(SMU7_Fusion_DpmTable
, SamuLevel
),
1093 (u8
*)&pi
->samu_level
,
1094 sizeof(SMU7_Fusion_ExtClkLevel
) * SMU7_MAX_LEVELS_SAMU
,
1103 static int kv_populate_acp_table(struct amdgpu_device
*adev
)
1105 struct kv_power_info
*pi
= kv_get_pi(adev
);
1106 struct amdgpu_clock_voltage_dependency_table
*table
=
1107 &adev
->pm
.dpm
.dyn_state
.acp_clock_voltage_dependency_table
;
1108 struct atom_clock_dividers dividers
;
1112 if (table
== NULL
|| table
->count
== 0)
1115 pi
->acp_level_count
= 0;
1116 for (i
= 0; i
< table
->count
; i
++) {
1117 pi
->acp_level
[i
].Frequency
= cpu_to_be32(table
->entries
[i
].clk
);
1118 pi
->acp_level
[i
].MinVoltage
= cpu_to_be16(table
->entries
[i
].v
);
1120 ret
= amdgpu_atombios_get_clock_dividers(adev
, COMPUTE_ENGINE_PLL_PARAM
,
1121 table
->entries
[i
].clk
, false, ÷rs
);
1124 pi
->acp_level
[i
].Divider
= (u8
)dividers
.post_div
;
1126 pi
->acp_level_count
++;
1129 ret
= amdgpu_kv_copy_bytes_to_smc(adev
,
1130 pi
->dpm_table_start
+
1131 offsetof(SMU7_Fusion_DpmTable
, AcpLevelCount
),
1132 (u8
*)&pi
->acp_level_count
,
1138 pi
->acp_interval
= 1;
1140 ret
= amdgpu_kv_copy_bytes_to_smc(adev
,
1141 pi
->dpm_table_start
+
1142 offsetof(SMU7_Fusion_DpmTable
, ACPInterval
),
1143 (u8
*)&pi
->acp_interval
,
1149 ret
= amdgpu_kv_copy_bytes_to_smc(adev
,
1150 pi
->dpm_table_start
+
1151 offsetof(SMU7_Fusion_DpmTable
, AcpLevel
),
1152 (u8
*)&pi
->acp_level
,
1153 sizeof(SMU7_Fusion_ExtClkLevel
) * SMU7_MAX_LEVELS_ACP
,
1161 static void kv_calculate_dfs_bypass_settings(struct amdgpu_device
*adev
)
1163 struct kv_power_info
*pi
= kv_get_pi(adev
);
1165 struct amdgpu_clock_voltage_dependency_table
*table
=
1166 &adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
;
1168 if (table
&& table
->count
) {
1169 for (i
= 0; i
< pi
->graphics_dpm_level_count
; i
++) {
1170 if (pi
->caps_enable_dfs_bypass
) {
1171 if (kv_get_clock_difference(table
->entries
[i
].clk
, 40000) < 200)
1172 pi
->graphics_level
[i
].ClkBypassCntl
= 3;
1173 else if (kv_get_clock_difference(table
->entries
[i
].clk
, 30000) < 200)
1174 pi
->graphics_level
[i
].ClkBypassCntl
= 2;
1175 else if (kv_get_clock_difference(table
->entries
[i
].clk
, 26600) < 200)
1176 pi
->graphics_level
[i
].ClkBypassCntl
= 7;
1177 else if (kv_get_clock_difference(table
->entries
[i
].clk
, 20000) < 200)
1178 pi
->graphics_level
[i
].ClkBypassCntl
= 6;
1179 else if (kv_get_clock_difference(table
->entries
[i
].clk
, 10000) < 200)
1180 pi
->graphics_level
[i
].ClkBypassCntl
= 8;
1182 pi
->graphics_level
[i
].ClkBypassCntl
= 0;
1184 pi
->graphics_level
[i
].ClkBypassCntl
= 0;
1188 struct sumo_sclk_voltage_mapping_table
*table
=
1189 &pi
->sys_info
.sclk_voltage_mapping_table
;
1190 for (i
= 0; i
< pi
->graphics_dpm_level_count
; i
++) {
1191 if (pi
->caps_enable_dfs_bypass
) {
1192 if (kv_get_clock_difference(table
->entries
[i
].sclk_frequency
, 40000) < 200)
1193 pi
->graphics_level
[i
].ClkBypassCntl
= 3;
1194 else if (kv_get_clock_difference(table
->entries
[i
].sclk_frequency
, 30000) < 200)
1195 pi
->graphics_level
[i
].ClkBypassCntl
= 2;
1196 else if (kv_get_clock_difference(table
->entries
[i
].sclk_frequency
, 26600) < 200)
1197 pi
->graphics_level
[i
].ClkBypassCntl
= 7;
1198 else if (kv_get_clock_difference(table
->entries
[i
].sclk_frequency
, 20000) < 200)
1199 pi
->graphics_level
[i
].ClkBypassCntl
= 6;
1200 else if (kv_get_clock_difference(table
->entries
[i
].sclk_frequency
, 10000) < 200)
1201 pi
->graphics_level
[i
].ClkBypassCntl
= 8;
1203 pi
->graphics_level
[i
].ClkBypassCntl
= 0;
1205 pi
->graphics_level
[i
].ClkBypassCntl
= 0;
1211 static int kv_enable_ulv(struct amdgpu_device
*adev
, bool enable
)
1213 return amdgpu_kv_notify_message_to_smu(adev
, enable
?
1214 PPSMC_MSG_EnableULV
: PPSMC_MSG_DisableULV
);
1217 static void kv_reset_acp_boot_level(struct amdgpu_device
*adev
)
1219 struct kv_power_info
*pi
= kv_get_pi(adev
);
1221 pi
->acp_boot_level
= 0xff;
1224 static void kv_update_current_ps(struct amdgpu_device
*adev
,
1225 struct amdgpu_ps
*rps
)
1227 struct kv_ps
*new_ps
= kv_get_ps(rps
);
1228 struct kv_power_info
*pi
= kv_get_pi(adev
);
1230 pi
->current_rps
= *rps
;
1231 pi
->current_ps
= *new_ps
;
1232 pi
->current_rps
.ps_priv
= &pi
->current_ps
;
1233 adev
->pm
.dpm
.current_ps
= &pi
->current_rps
;
1236 static void kv_update_requested_ps(struct amdgpu_device
*adev
,
1237 struct amdgpu_ps
*rps
)
1239 struct kv_ps
*new_ps
= kv_get_ps(rps
);
1240 struct kv_power_info
*pi
= kv_get_pi(adev
);
1242 pi
->requested_rps
= *rps
;
1243 pi
->requested_ps
= *new_ps
;
1244 pi
->requested_rps
.ps_priv
= &pi
->requested_ps
;
1245 adev
->pm
.dpm
.requested_ps
= &pi
->requested_rps
;
1248 static void kv_dpm_enable_bapm(struct amdgpu_device
*adev
, bool enable
)
1250 struct kv_power_info
*pi
= kv_get_pi(adev
);
1253 if (pi
->bapm_enable
) {
1254 ret
= amdgpu_kv_smc_bapm_enable(adev
, enable
);
1256 DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n");
1260 static int kv_dpm_enable(struct amdgpu_device
*adev
)
1262 struct kv_power_info
*pi
= kv_get_pi(adev
);
1265 ret
= kv_process_firmware_header(adev
);
1267 DRM_ERROR("kv_process_firmware_header failed\n");
1270 kv_init_fps_limits(adev
);
1271 kv_init_graphics_levels(adev
);
1272 ret
= kv_program_bootup_state(adev
);
1274 DRM_ERROR("kv_program_bootup_state failed\n");
1277 kv_calculate_dfs_bypass_settings(adev
);
1278 ret
= kv_upload_dpm_settings(adev
);
1280 DRM_ERROR("kv_upload_dpm_settings failed\n");
1283 ret
= kv_populate_uvd_table(adev
);
1285 DRM_ERROR("kv_populate_uvd_table failed\n");
1288 ret
= kv_populate_vce_table(adev
);
1290 DRM_ERROR("kv_populate_vce_table failed\n");
1293 ret
= kv_populate_samu_table(adev
);
1295 DRM_ERROR("kv_populate_samu_table failed\n");
1298 ret
= kv_populate_acp_table(adev
);
1300 DRM_ERROR("kv_populate_acp_table failed\n");
1303 kv_program_vc(adev
);
1305 kv_initialize_hardware_cac_manager(adev
);
1308 if (pi
->enable_auto_thermal_throttling
) {
1309 ret
= kv_enable_auto_thermal_throttling(adev
);
1311 DRM_ERROR("kv_enable_auto_thermal_throttling failed\n");
1315 ret
= kv_enable_dpm_voltage_scaling(adev
);
1317 DRM_ERROR("kv_enable_dpm_voltage_scaling failed\n");
1320 ret
= kv_set_dpm_interval(adev
);
1322 DRM_ERROR("kv_set_dpm_interval failed\n");
1325 ret
= kv_set_dpm_boot_state(adev
);
1327 DRM_ERROR("kv_set_dpm_boot_state failed\n");
1330 ret
= kv_enable_ulv(adev
, true);
1332 DRM_ERROR("kv_enable_ulv failed\n");
1336 ret
= kv_enable_didt(adev
, true);
1338 DRM_ERROR("kv_enable_didt failed\n");
1341 ret
= kv_enable_smc_cac(adev
, true);
1343 DRM_ERROR("kv_enable_smc_cac failed\n");
1347 kv_reset_acp_boot_level(adev
);
1349 ret
= amdgpu_kv_smc_bapm_enable(adev
, false);
1351 DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n");
1355 kv_update_current_ps(adev
, adev
->pm
.dpm
.boot_ps
);
1357 if (adev
->irq
.installed
&&
1358 amdgpu_is_internal_thermal_sensor(adev
->pm
.int_thermal_type
)) {
1359 ret
= kv_set_thermal_temperature_range(adev
, KV_TEMP_RANGE_MIN
, KV_TEMP_RANGE_MAX
);
1361 DRM_ERROR("kv_set_thermal_temperature_range failed\n");
1364 amdgpu_irq_get(adev
, &adev
->pm
.dpm
.thermal
.irq
,
1365 AMDGPU_THERMAL_IRQ_LOW_TO_HIGH
);
1366 amdgpu_irq_get(adev
, &adev
->pm
.dpm
.thermal
.irq
,
1367 AMDGPU_THERMAL_IRQ_HIGH_TO_LOW
);
1373 static void kv_dpm_disable(struct amdgpu_device
*adev
)
1375 amdgpu_irq_put(adev
, &adev
->pm
.dpm
.thermal
.irq
,
1376 AMDGPU_THERMAL_IRQ_LOW_TO_HIGH
);
1377 amdgpu_irq_put(adev
, &adev
->pm
.dpm
.thermal
.irq
,
1378 AMDGPU_THERMAL_IRQ_HIGH_TO_LOW
);
1380 amdgpu_kv_smc_bapm_enable(adev
, false);
1382 if (adev
->asic_type
== CHIP_MULLINS
)
1383 kv_enable_nb_dpm(adev
, false);
1385 /* powerup blocks */
1386 kv_dpm_powergate_acp(adev
, false);
1387 kv_dpm_powergate_samu(adev
, false);
1388 kv_dpm_powergate_vce(adev
, false);
1389 kv_dpm_powergate_uvd(adev
, false);
1391 kv_enable_smc_cac(adev
, false);
1392 kv_enable_didt(adev
, false);
1395 kv_enable_ulv(adev
, false);
1398 kv_update_current_ps(adev
, adev
->pm
.dpm
.boot_ps
);
1402 static int kv_write_smc_soft_register(struct amdgpu_device
*adev
,
1403 u16 reg_offset
, u32 value
)
1405 struct kv_power_info
*pi
= kv_get_pi(adev
);
1407 return amdgpu_kv_copy_bytes_to_smc(adev
, pi
->soft_regs_start
+ reg_offset
,
1408 (u8
*)&value
, sizeof(u16
), pi
->sram_end
);
1411 static int kv_read_smc_soft_register(struct amdgpu_device
*adev
,
1412 u16 reg_offset
, u32
*value
)
1414 struct kv_power_info
*pi
= kv_get_pi(adev
);
1416 return amdgpu_kv_read_smc_sram_dword(adev
, pi
->soft_regs_start
+ reg_offset
,
1417 value
, pi
->sram_end
);
1421 static void kv_init_sclk_t(struct amdgpu_device
*adev
)
1423 struct kv_power_info
*pi
= kv_get_pi(adev
);
1425 pi
->low_sclk_interrupt_t
= 0;
1428 static int kv_init_fps_limits(struct amdgpu_device
*adev
)
1430 struct kv_power_info
*pi
= kv_get_pi(adev
);
1437 pi
->fps_high_t
= cpu_to_be16(tmp
);
1438 ret
= amdgpu_kv_copy_bytes_to_smc(adev
,
1439 pi
->dpm_table_start
+
1440 offsetof(SMU7_Fusion_DpmTable
, FpsHighT
),
1441 (u8
*)&pi
->fps_high_t
,
1442 sizeof(u16
), pi
->sram_end
);
1445 pi
->fps_low_t
= cpu_to_be16(tmp
);
1447 ret
= amdgpu_kv_copy_bytes_to_smc(adev
,
1448 pi
->dpm_table_start
+
1449 offsetof(SMU7_Fusion_DpmTable
, FpsLowT
),
1450 (u8
*)&pi
->fps_low_t
,
1451 sizeof(u16
), pi
->sram_end
);
1457 static void kv_init_powergate_state(struct amdgpu_device
*adev
)
1459 struct kv_power_info
*pi
= kv_get_pi(adev
);
1461 pi
->uvd_power_gated
= false;
1462 pi
->vce_power_gated
= false;
1463 pi
->samu_power_gated
= false;
1464 pi
->acp_power_gated
= false;
1468 static int kv_enable_uvd_dpm(struct amdgpu_device
*adev
, bool enable
)
1470 return amdgpu_kv_notify_message_to_smu(adev
, enable
?
1471 PPSMC_MSG_UVDDPM_Enable
: PPSMC_MSG_UVDDPM_Disable
);
1474 static int kv_enable_vce_dpm(struct amdgpu_device
*adev
, bool enable
)
1476 return amdgpu_kv_notify_message_to_smu(adev
, enable
?
1477 PPSMC_MSG_VCEDPM_Enable
: PPSMC_MSG_VCEDPM_Disable
);
1480 static int kv_enable_samu_dpm(struct amdgpu_device
*adev
, bool enable
)
1482 return amdgpu_kv_notify_message_to_smu(adev
, enable
?
1483 PPSMC_MSG_SAMUDPM_Enable
: PPSMC_MSG_SAMUDPM_Disable
);
1486 static int kv_enable_acp_dpm(struct amdgpu_device
*adev
, bool enable
)
1488 return amdgpu_kv_notify_message_to_smu(adev
, enable
?
1489 PPSMC_MSG_ACPDPM_Enable
: PPSMC_MSG_ACPDPM_Disable
);
1492 static int kv_update_uvd_dpm(struct amdgpu_device
*adev
, bool gate
)
1494 struct kv_power_info
*pi
= kv_get_pi(adev
);
1495 struct amdgpu_uvd_clock_voltage_dependency_table
*table
=
1496 &adev
->pm
.dpm
.dyn_state
.uvd_clock_voltage_dependency_table
;
1502 pi
->uvd_boot_level
= table
->count
- 1;
1504 pi
->uvd_boot_level
= 0;
1506 if (!pi
->caps_uvd_dpm
|| pi
->caps_stable_p_state
) {
1507 mask
= 1 << pi
->uvd_boot_level
;
1512 ret
= amdgpu_kv_copy_bytes_to_smc(adev
,
1513 pi
->dpm_table_start
+
1514 offsetof(SMU7_Fusion_DpmTable
, UvdBootLevel
),
1515 (uint8_t *)&pi
->uvd_boot_level
,
1516 sizeof(u8
), pi
->sram_end
);
1520 amdgpu_kv_send_msg_to_smc_with_parameter(adev
,
1521 PPSMC_MSG_UVDDPM_SetEnabledMask
,
1525 return kv_enable_uvd_dpm(adev
, !gate
);
1528 static u8
kv_get_vce_boot_level(struct amdgpu_device
*adev
, u32 evclk
)
1531 struct amdgpu_vce_clock_voltage_dependency_table
*table
=
1532 &adev
->pm
.dpm
.dyn_state
.vce_clock_voltage_dependency_table
;
1534 for (i
= 0; i
< table
->count
; i
++) {
1535 if (table
->entries
[i
].evclk
>= evclk
)
1542 static int kv_update_vce_dpm(struct amdgpu_device
*adev
,
1543 struct amdgpu_ps
*amdgpu_new_state
,
1544 struct amdgpu_ps
*amdgpu_current_state
)
1546 struct kv_power_info
*pi
= kv_get_pi(adev
);
1547 struct amdgpu_vce_clock_voltage_dependency_table
*table
=
1548 &adev
->pm
.dpm
.dyn_state
.vce_clock_voltage_dependency_table
;
1551 if (amdgpu_new_state
->evclk
> 0 && amdgpu_current_state
->evclk
== 0) {
1552 kv_dpm_powergate_vce(adev
, false);
1553 if (pi
->caps_stable_p_state
)
1554 pi
->vce_boot_level
= table
->count
- 1;
1556 pi
->vce_boot_level
= kv_get_vce_boot_level(adev
, amdgpu_new_state
->evclk
);
1558 ret
= amdgpu_kv_copy_bytes_to_smc(adev
,
1559 pi
->dpm_table_start
+
1560 offsetof(SMU7_Fusion_DpmTable
, VceBootLevel
),
1561 (u8
*)&pi
->vce_boot_level
,
1567 if (pi
->caps_stable_p_state
)
1568 amdgpu_kv_send_msg_to_smc_with_parameter(adev
,
1569 PPSMC_MSG_VCEDPM_SetEnabledMask
,
1570 (1 << pi
->vce_boot_level
));
1571 kv_enable_vce_dpm(adev
, true);
1572 } else if (amdgpu_new_state
->evclk
== 0 && amdgpu_current_state
->evclk
> 0) {
1573 kv_enable_vce_dpm(adev
, false);
1574 kv_dpm_powergate_vce(adev
, true);
1580 static int kv_update_samu_dpm(struct amdgpu_device
*adev
, bool gate
)
1582 struct kv_power_info
*pi
= kv_get_pi(adev
);
1583 struct amdgpu_clock_voltage_dependency_table
*table
=
1584 &adev
->pm
.dpm
.dyn_state
.samu_clock_voltage_dependency_table
;
1588 if (pi
->caps_stable_p_state
)
1589 pi
->samu_boot_level
= table
->count
- 1;
1591 pi
->samu_boot_level
= 0;
1593 ret
= amdgpu_kv_copy_bytes_to_smc(adev
,
1594 pi
->dpm_table_start
+
1595 offsetof(SMU7_Fusion_DpmTable
, SamuBootLevel
),
1596 (u8
*)&pi
->samu_boot_level
,
1602 if (pi
->caps_stable_p_state
)
1603 amdgpu_kv_send_msg_to_smc_with_parameter(adev
,
1604 PPSMC_MSG_SAMUDPM_SetEnabledMask
,
1605 (1 << pi
->samu_boot_level
));
1608 return kv_enable_samu_dpm(adev
, !gate
);
1611 static u8
kv_get_acp_boot_level(struct amdgpu_device
*adev
)
1614 struct amdgpu_clock_voltage_dependency_table
*table
=
1615 &adev
->pm
.dpm
.dyn_state
.acp_clock_voltage_dependency_table
;
1617 for (i
= 0; i
< table
->count
; i
++) {
1618 if (table
->entries
[i
].clk
>= 0) /* XXX */
1622 if (i
>= table
->count
)
1623 i
= table
->count
- 1;
1628 static void kv_update_acp_boot_level(struct amdgpu_device
*adev
)
1630 struct kv_power_info
*pi
= kv_get_pi(adev
);
1633 if (!pi
->caps_stable_p_state
) {
1634 acp_boot_level
= kv_get_acp_boot_level(adev
);
1635 if (acp_boot_level
!= pi
->acp_boot_level
) {
1636 pi
->acp_boot_level
= acp_boot_level
;
1637 amdgpu_kv_send_msg_to_smc_with_parameter(adev
,
1638 PPSMC_MSG_ACPDPM_SetEnabledMask
,
1639 (1 << pi
->acp_boot_level
));
1644 static int kv_update_acp_dpm(struct amdgpu_device
*adev
, bool gate
)
1646 struct kv_power_info
*pi
= kv_get_pi(adev
);
1647 struct amdgpu_clock_voltage_dependency_table
*table
=
1648 &adev
->pm
.dpm
.dyn_state
.acp_clock_voltage_dependency_table
;
1652 if (pi
->caps_stable_p_state
)
1653 pi
->acp_boot_level
= table
->count
- 1;
1655 pi
->acp_boot_level
= kv_get_acp_boot_level(adev
);
1657 ret
= amdgpu_kv_copy_bytes_to_smc(adev
,
1658 pi
->dpm_table_start
+
1659 offsetof(SMU7_Fusion_DpmTable
, AcpBootLevel
),
1660 (u8
*)&pi
->acp_boot_level
,
1666 if (pi
->caps_stable_p_state
)
1667 amdgpu_kv_send_msg_to_smc_with_parameter(adev
,
1668 PPSMC_MSG_ACPDPM_SetEnabledMask
,
1669 (1 << pi
->acp_boot_level
));
1672 return kv_enable_acp_dpm(adev
, !gate
);
1675 static void kv_dpm_powergate_uvd(struct amdgpu_device
*adev
, bool gate
)
1677 struct kv_power_info
*pi
= kv_get_pi(adev
);
1680 pi
->uvd_power_gated
= gate
;
1683 /* stop the UVD block */
1684 ret
= amdgpu_set_powergating_state(adev
, AMD_IP_BLOCK_TYPE_UVD
,
1686 kv_update_uvd_dpm(adev
, gate
);
1687 if (pi
->caps_uvd_pg
)
1688 /* power off the UVD block */
1689 amdgpu_kv_notify_message_to_smu(adev
, PPSMC_MSG_UVDPowerOFF
);
1691 if (pi
->caps_uvd_pg
)
1692 /* power on the UVD block */
1693 amdgpu_kv_notify_message_to_smu(adev
, PPSMC_MSG_UVDPowerON
);
1694 /* re-init the UVD block */
1695 kv_update_uvd_dpm(adev
, gate
);
1697 ret
= amdgpu_set_powergating_state(adev
, AMD_IP_BLOCK_TYPE_UVD
,
1698 AMD_PG_STATE_UNGATE
);
1702 static void kv_dpm_powergate_vce(struct amdgpu_device
*adev
, bool gate
)
1704 struct kv_power_info
*pi
= kv_get_pi(adev
);
1706 if (pi
->vce_power_gated
== gate
)
1709 pi
->vce_power_gated
= gate
;
1711 if (!pi
->caps_vce_pg
)
1715 amdgpu_kv_notify_message_to_smu(adev
, PPSMC_MSG_VCEPowerOFF
);
1717 amdgpu_kv_notify_message_to_smu(adev
, PPSMC_MSG_VCEPowerON
);
1720 static void kv_dpm_powergate_samu(struct amdgpu_device
*adev
, bool gate
)
1722 struct kv_power_info
*pi
= kv_get_pi(adev
);
1724 if (pi
->samu_power_gated
== gate
)
1727 pi
->samu_power_gated
= gate
;
1730 kv_update_samu_dpm(adev
, true);
1731 if (pi
->caps_samu_pg
)
1732 amdgpu_kv_notify_message_to_smu(adev
, PPSMC_MSG_SAMPowerOFF
);
1734 if (pi
->caps_samu_pg
)
1735 amdgpu_kv_notify_message_to_smu(adev
, PPSMC_MSG_SAMPowerON
);
1736 kv_update_samu_dpm(adev
, false);
1740 static void kv_dpm_powergate_acp(struct amdgpu_device
*adev
, bool gate
)
1742 struct kv_power_info
*pi
= kv_get_pi(adev
);
1744 if (pi
->acp_power_gated
== gate
)
1747 if (adev
->asic_type
== CHIP_KABINI
|| adev
->asic_type
== CHIP_MULLINS
)
1750 pi
->acp_power_gated
= gate
;
1753 kv_update_acp_dpm(adev
, true);
1754 if (pi
->caps_acp_pg
)
1755 amdgpu_kv_notify_message_to_smu(adev
, PPSMC_MSG_ACPPowerOFF
);
1757 if (pi
->caps_acp_pg
)
1758 amdgpu_kv_notify_message_to_smu(adev
, PPSMC_MSG_ACPPowerON
);
1759 kv_update_acp_dpm(adev
, false);
1763 static void kv_set_valid_clock_range(struct amdgpu_device
*adev
,
1764 struct amdgpu_ps
*new_rps
)
1766 struct kv_ps
*new_ps
= kv_get_ps(new_rps
);
1767 struct kv_power_info
*pi
= kv_get_pi(adev
);
1769 struct amdgpu_clock_voltage_dependency_table
*table
=
1770 &adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
;
1772 if (table
&& table
->count
) {
1773 for (i
= 0; i
< pi
->graphics_dpm_level_count
; i
++) {
1774 if ((table
->entries
[i
].clk
>= new_ps
->levels
[0].sclk
) ||
1775 (i
== (pi
->graphics_dpm_level_count
- 1))) {
1776 pi
->lowest_valid
= i
;
1781 for (i
= pi
->graphics_dpm_level_count
- 1; i
> 0; i
--) {
1782 if (table
->entries
[i
].clk
<= new_ps
->levels
[new_ps
->num_levels
- 1].sclk
)
1785 pi
->highest_valid
= i
;
1787 if (pi
->lowest_valid
> pi
->highest_valid
) {
1788 if ((new_ps
->levels
[0].sclk
- table
->entries
[pi
->highest_valid
].clk
) >
1789 (table
->entries
[pi
->lowest_valid
].clk
- new_ps
->levels
[new_ps
->num_levels
- 1].sclk
))
1790 pi
->highest_valid
= pi
->lowest_valid
;
1792 pi
->lowest_valid
= pi
->highest_valid
;
1795 struct sumo_sclk_voltage_mapping_table
*table
=
1796 &pi
->sys_info
.sclk_voltage_mapping_table
;
1798 for (i
= 0; i
< (int)pi
->graphics_dpm_level_count
; i
++) {
1799 if (table
->entries
[i
].sclk_frequency
>= new_ps
->levels
[0].sclk
||
1800 i
== (int)(pi
->graphics_dpm_level_count
- 1)) {
1801 pi
->lowest_valid
= i
;
1806 for (i
= pi
->graphics_dpm_level_count
- 1; i
> 0; i
--) {
1807 if (table
->entries
[i
].sclk_frequency
<=
1808 new_ps
->levels
[new_ps
->num_levels
- 1].sclk
)
1811 pi
->highest_valid
= i
;
1813 if (pi
->lowest_valid
> pi
->highest_valid
) {
1814 if ((new_ps
->levels
[0].sclk
-
1815 table
->entries
[pi
->highest_valid
].sclk_frequency
) >
1816 (table
->entries
[pi
->lowest_valid
].sclk_frequency
-
1817 new_ps
->levels
[new_ps
->num_levels
-1].sclk
))
1818 pi
->highest_valid
= pi
->lowest_valid
;
1820 pi
->lowest_valid
= pi
->highest_valid
;
1825 static int kv_update_dfs_bypass_settings(struct amdgpu_device
*adev
,
1826 struct amdgpu_ps
*new_rps
)
1828 struct kv_ps
*new_ps
= kv_get_ps(new_rps
);
1829 struct kv_power_info
*pi
= kv_get_pi(adev
);
1833 if (pi
->caps_enable_dfs_bypass
) {
1834 clk_bypass_cntl
= new_ps
->need_dfs_bypass
?
1835 pi
->graphics_level
[pi
->graphics_boot_level
].ClkBypassCntl
: 0;
1836 ret
= amdgpu_kv_copy_bytes_to_smc(adev
,
1837 (pi
->dpm_table_start
+
1838 offsetof(SMU7_Fusion_DpmTable
, GraphicsLevel
) +
1839 (pi
->graphics_boot_level
* sizeof(SMU7_Fusion_GraphicsLevel
)) +
1840 offsetof(SMU7_Fusion_GraphicsLevel
, ClkBypassCntl
)),
1842 sizeof(u8
), pi
->sram_end
);
1848 static int kv_enable_nb_dpm(struct amdgpu_device
*adev
,
1851 struct kv_power_info
*pi
= kv_get_pi(adev
);
1855 if (pi
->enable_nb_dpm
&& !pi
->nb_dpm_enabled
) {
1856 ret
= amdgpu_kv_notify_message_to_smu(adev
, PPSMC_MSG_NBDPM_Enable
);
1858 pi
->nb_dpm_enabled
= true;
1861 if (pi
->enable_nb_dpm
&& pi
->nb_dpm_enabled
) {
1862 ret
= amdgpu_kv_notify_message_to_smu(adev
, PPSMC_MSG_NBDPM_Disable
);
1864 pi
->nb_dpm_enabled
= false;
1871 static int kv_dpm_force_performance_level(struct amdgpu_device
*adev
,
1872 enum amd_dpm_forced_level level
)
1876 if (level
== AMD_DPM_FORCED_LEVEL_HIGH
) {
1877 ret
= kv_force_dpm_highest(adev
);
1880 } else if (level
== AMD_DPM_FORCED_LEVEL_LOW
) {
1881 ret
= kv_force_dpm_lowest(adev
);
1884 } else if (level
== AMD_DPM_FORCED_LEVEL_AUTO
) {
1885 ret
= kv_unforce_levels(adev
);
1890 adev
->pm
.dpm
.forced_level
= level
;
1895 static int kv_dpm_pre_set_power_state(struct amdgpu_device
*adev
)
1897 struct kv_power_info
*pi
= kv_get_pi(adev
);
1898 struct amdgpu_ps requested_ps
= *adev
->pm
.dpm
.requested_ps
;
1899 struct amdgpu_ps
*new_ps
= &requested_ps
;
1901 kv_update_requested_ps(adev
, new_ps
);
1903 kv_apply_state_adjust_rules(adev
,
1910 static int kv_dpm_set_power_state(struct amdgpu_device
*adev
)
1912 struct kv_power_info
*pi
= kv_get_pi(adev
);
1913 struct amdgpu_ps
*new_ps
= &pi
->requested_rps
;
1914 struct amdgpu_ps
*old_ps
= &pi
->current_rps
;
1917 if (pi
->bapm_enable
) {
1918 ret
= amdgpu_kv_smc_bapm_enable(adev
, adev
->pm
.dpm
.ac_power
);
1920 DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n");
1925 if (adev
->asic_type
== CHIP_KABINI
|| adev
->asic_type
== CHIP_MULLINS
) {
1926 if (pi
->enable_dpm
) {
1927 kv_set_valid_clock_range(adev
, new_ps
);
1928 kv_update_dfs_bypass_settings(adev
, new_ps
);
1929 ret
= kv_calculate_ds_divider(adev
);
1931 DRM_ERROR("kv_calculate_ds_divider failed\n");
1934 kv_calculate_nbps_level_settings(adev
);
1935 kv_calculate_dpm_settings(adev
);
1936 kv_force_lowest_valid(adev
);
1937 kv_enable_new_levels(adev
);
1938 kv_upload_dpm_settings(adev
);
1939 kv_program_nbps_index_settings(adev
, new_ps
);
1940 kv_unforce_levels(adev
);
1941 kv_set_enabled_levels(adev
);
1942 kv_force_lowest_valid(adev
);
1943 kv_unforce_levels(adev
);
1945 ret
= kv_update_vce_dpm(adev
, new_ps
, old_ps
);
1947 DRM_ERROR("kv_update_vce_dpm failed\n");
1950 kv_update_sclk_t(adev
);
1951 if (adev
->asic_type
== CHIP_MULLINS
)
1952 kv_enable_nb_dpm(adev
, true);
1955 if (pi
->enable_dpm
) {
1956 kv_set_valid_clock_range(adev
, new_ps
);
1957 kv_update_dfs_bypass_settings(adev
, new_ps
);
1958 ret
= kv_calculate_ds_divider(adev
);
1960 DRM_ERROR("kv_calculate_ds_divider failed\n");
1963 kv_calculate_nbps_level_settings(adev
);
1964 kv_calculate_dpm_settings(adev
);
1965 kv_freeze_sclk_dpm(adev
, true);
1966 kv_upload_dpm_settings(adev
);
1967 kv_program_nbps_index_settings(adev
, new_ps
);
1968 kv_freeze_sclk_dpm(adev
, false);
1969 kv_set_enabled_levels(adev
);
1970 ret
= kv_update_vce_dpm(adev
, new_ps
, old_ps
);
1972 DRM_ERROR("kv_update_vce_dpm failed\n");
1975 kv_update_acp_boot_level(adev
);
1976 kv_update_sclk_t(adev
);
1977 kv_enable_nb_dpm(adev
, true);
1984 static void kv_dpm_post_set_power_state(struct amdgpu_device
*adev
)
1986 struct kv_power_info
*pi
= kv_get_pi(adev
);
1987 struct amdgpu_ps
*new_ps
= &pi
->requested_rps
;
1989 kv_update_current_ps(adev
, new_ps
);
1992 static void kv_dpm_setup_asic(struct amdgpu_device
*adev
)
1994 sumo_take_smu_control(adev
, true);
1995 kv_init_powergate_state(adev
);
1996 kv_init_sclk_t(adev
);
2000 static void kv_dpm_reset_asic(struct amdgpu_device
*adev
)
2002 struct kv_power_info
*pi
= kv_get_pi(adev
);
2004 if (adev
->asic_type
== CHIP_KABINI
|| adev
->asic_type
== CHIP_MULLINS
) {
2005 kv_force_lowest_valid(adev
);
2006 kv_init_graphics_levels(adev
);
2007 kv_program_bootup_state(adev
);
2008 kv_upload_dpm_settings(adev
);
2009 kv_force_lowest_valid(adev
);
2010 kv_unforce_levels(adev
);
2012 kv_init_graphics_levels(adev
);
2013 kv_program_bootup_state(adev
);
2014 kv_freeze_sclk_dpm(adev
, true);
2015 kv_upload_dpm_settings(adev
);
2016 kv_freeze_sclk_dpm(adev
, false);
2017 kv_set_enabled_level(adev
, pi
->graphics_boot_level
);
2022 static void kv_construct_max_power_limits_table(struct amdgpu_device
*adev
,
2023 struct amdgpu_clock_and_voltage_limits
*table
)
2025 struct kv_power_info
*pi
= kv_get_pi(adev
);
2027 if (pi
->sys_info
.sclk_voltage_mapping_table
.num_max_dpm_entries
> 0) {
2028 int idx
= pi
->sys_info
.sclk_voltage_mapping_table
.num_max_dpm_entries
- 1;
2030 pi
->sys_info
.sclk_voltage_mapping_table
.entries
[idx
].sclk_frequency
;
2032 kv_convert_2bit_index_to_voltage(adev
,
2033 pi
->sys_info
.sclk_voltage_mapping_table
.entries
[idx
].vid_2bit
);
2036 table
->mclk
= pi
->sys_info
.nbp_memory_clock
[0];
2039 static void kv_patch_voltage_values(struct amdgpu_device
*adev
)
2042 struct amdgpu_uvd_clock_voltage_dependency_table
*uvd_table
=
2043 &adev
->pm
.dpm
.dyn_state
.uvd_clock_voltage_dependency_table
;
2044 struct amdgpu_vce_clock_voltage_dependency_table
*vce_table
=
2045 &adev
->pm
.dpm
.dyn_state
.vce_clock_voltage_dependency_table
;
2046 struct amdgpu_clock_voltage_dependency_table
*samu_table
=
2047 &adev
->pm
.dpm
.dyn_state
.samu_clock_voltage_dependency_table
;
2048 struct amdgpu_clock_voltage_dependency_table
*acp_table
=
2049 &adev
->pm
.dpm
.dyn_state
.acp_clock_voltage_dependency_table
;
2051 if (uvd_table
->count
) {
2052 for (i
= 0; i
< uvd_table
->count
; i
++)
2053 uvd_table
->entries
[i
].v
=
2054 kv_convert_8bit_index_to_voltage(adev
,
2055 uvd_table
->entries
[i
].v
);
2058 if (vce_table
->count
) {
2059 for (i
= 0; i
< vce_table
->count
; i
++)
2060 vce_table
->entries
[i
].v
=
2061 kv_convert_8bit_index_to_voltage(adev
,
2062 vce_table
->entries
[i
].v
);
2065 if (samu_table
->count
) {
2066 for (i
= 0; i
< samu_table
->count
; i
++)
2067 samu_table
->entries
[i
].v
=
2068 kv_convert_8bit_index_to_voltage(adev
,
2069 samu_table
->entries
[i
].v
);
2072 if (acp_table
->count
) {
2073 for (i
= 0; i
< acp_table
->count
; i
++)
2074 acp_table
->entries
[i
].v
=
2075 kv_convert_8bit_index_to_voltage(adev
,
2076 acp_table
->entries
[i
].v
);
2081 static void kv_construct_boot_state(struct amdgpu_device
*adev
)
2083 struct kv_power_info
*pi
= kv_get_pi(adev
);
2085 pi
->boot_pl
.sclk
= pi
->sys_info
.bootup_sclk
;
2086 pi
->boot_pl
.vddc_index
= pi
->sys_info
.bootup_nb_voltage_index
;
2087 pi
->boot_pl
.ds_divider_index
= 0;
2088 pi
->boot_pl
.ss_divider_index
= 0;
2089 pi
->boot_pl
.allow_gnb_slow
= 1;
2090 pi
->boot_pl
.force_nbp_state
= 0;
2091 pi
->boot_pl
.display_wm
= 0;
2092 pi
->boot_pl
.vce_wm
= 0;
2095 static int kv_force_dpm_highest(struct amdgpu_device
*adev
)
2100 ret
= amdgpu_kv_dpm_get_enable_mask(adev
, &enable_mask
);
2104 for (i
= SMU7_MAX_LEVELS_GRAPHICS
- 1; i
> 0; i
--) {
2105 if (enable_mask
& (1 << i
))
2109 if (adev
->asic_type
== CHIP_KABINI
|| adev
->asic_type
== CHIP_MULLINS
)
2110 return amdgpu_kv_send_msg_to_smc_with_parameter(adev
, PPSMC_MSG_DPM_ForceState
, i
);
2112 return kv_set_enabled_level(adev
, i
);
2115 static int kv_force_dpm_lowest(struct amdgpu_device
*adev
)
2120 ret
= amdgpu_kv_dpm_get_enable_mask(adev
, &enable_mask
);
2124 for (i
= 0; i
< SMU7_MAX_LEVELS_GRAPHICS
; i
++) {
2125 if (enable_mask
& (1 << i
))
2129 if (adev
->asic_type
== CHIP_KABINI
|| adev
->asic_type
== CHIP_MULLINS
)
2130 return amdgpu_kv_send_msg_to_smc_with_parameter(adev
, PPSMC_MSG_DPM_ForceState
, i
);
2132 return kv_set_enabled_level(adev
, i
);
2135 static u8
kv_get_sleep_divider_id_from_clock(struct amdgpu_device
*adev
,
2136 u32 sclk
, u32 min_sclk_in_sr
)
2138 struct kv_power_info
*pi
= kv_get_pi(adev
);
2141 u32 min
= max(min_sclk_in_sr
, (u32
)KV_MINIMUM_ENGINE_CLOCK
);
2146 if (!pi
->caps_sclk_ds
)
2149 for (i
= KV_MAX_DEEPSLEEP_DIVIDER_ID
; i
> 0; i
--) {
2158 static int kv_get_high_voltage_limit(struct amdgpu_device
*adev
, int *limit
)
2160 struct kv_power_info
*pi
= kv_get_pi(adev
);
2161 struct amdgpu_clock_voltage_dependency_table
*table
=
2162 &adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
;
2165 if (table
&& table
->count
) {
2166 for (i
= table
->count
- 1; i
>= 0; i
--) {
2167 if (pi
->high_voltage_t
&&
2168 (kv_convert_8bit_index_to_voltage(adev
, table
->entries
[i
].v
) <=
2169 pi
->high_voltage_t
)) {
2175 struct sumo_sclk_voltage_mapping_table
*table
=
2176 &pi
->sys_info
.sclk_voltage_mapping_table
;
2178 for (i
= table
->num_max_dpm_entries
- 1; i
>= 0; i
--) {
2179 if (pi
->high_voltage_t
&&
2180 (kv_convert_2bit_index_to_voltage(adev
, table
->entries
[i
].vid_2bit
) <=
2181 pi
->high_voltage_t
)) {
2192 static void kv_apply_state_adjust_rules(struct amdgpu_device
*adev
,
2193 struct amdgpu_ps
*new_rps
,
2194 struct amdgpu_ps
*old_rps
)
2196 struct kv_ps
*ps
= kv_get_ps(new_rps
);
2197 struct kv_power_info
*pi
= kv_get_pi(adev
);
2198 u32 min_sclk
= 10000; /* ??? */
2202 struct amdgpu_clock_voltage_dependency_table
*table
=
2203 &adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
;
2204 u32 stable_p_state_sclk
= 0;
2205 struct amdgpu_clock_and_voltage_limits
*max_limits
=
2206 &adev
->pm
.dpm
.dyn_state
.max_clock_voltage_on_ac
;
2208 if (new_rps
->vce_active
) {
2209 new_rps
->evclk
= adev
->pm
.dpm
.vce_states
[adev
->pm
.dpm
.vce_level
].evclk
;
2210 new_rps
->ecclk
= adev
->pm
.dpm
.vce_states
[adev
->pm
.dpm
.vce_level
].ecclk
;
2216 mclk
= max_limits
->mclk
;
2219 if (pi
->caps_stable_p_state
) {
2220 stable_p_state_sclk
= (max_limits
->sclk
* 75) / 100;
2222 for (i
= table
->count
- 1; i
>= 0; i
--) {
2223 if (stable_p_state_sclk
>= table
->entries
[i
].clk
) {
2224 stable_p_state_sclk
= table
->entries
[i
].clk
;
2230 stable_p_state_sclk
= table
->entries
[0].clk
;
2232 sclk
= stable_p_state_sclk
;
2235 if (new_rps
->vce_active
) {
2236 if (sclk
< adev
->pm
.dpm
.vce_states
[adev
->pm
.dpm
.vce_level
].sclk
)
2237 sclk
= adev
->pm
.dpm
.vce_states
[adev
->pm
.dpm
.vce_level
].sclk
;
2240 ps
->need_dfs_bypass
= true;
2242 for (i
= 0; i
< ps
->num_levels
; i
++) {
2243 if (ps
->levels
[i
].sclk
< sclk
)
2244 ps
->levels
[i
].sclk
= sclk
;
2247 if (table
&& table
->count
) {
2248 for (i
= 0; i
< ps
->num_levels
; i
++) {
2249 if (pi
->high_voltage_t
&&
2250 (pi
->high_voltage_t
<
2251 kv_convert_8bit_index_to_voltage(adev
, ps
->levels
[i
].vddc_index
))) {
2252 kv_get_high_voltage_limit(adev
, &limit
);
2253 ps
->levels
[i
].sclk
= table
->entries
[limit
].clk
;
2257 struct sumo_sclk_voltage_mapping_table
*table
=
2258 &pi
->sys_info
.sclk_voltage_mapping_table
;
2260 for (i
= 0; i
< ps
->num_levels
; i
++) {
2261 if (pi
->high_voltage_t
&&
2262 (pi
->high_voltage_t
<
2263 kv_convert_8bit_index_to_voltage(adev
, ps
->levels
[i
].vddc_index
))) {
2264 kv_get_high_voltage_limit(adev
, &limit
);
2265 ps
->levels
[i
].sclk
= table
->entries
[limit
].sclk_frequency
;
2270 if (pi
->caps_stable_p_state
) {
2271 for (i
= 0; i
< ps
->num_levels
; i
++) {
2272 ps
->levels
[i
].sclk
= stable_p_state_sclk
;
2276 pi
->video_start
= new_rps
->dclk
|| new_rps
->vclk
||
2277 new_rps
->evclk
|| new_rps
->ecclk
;
2279 if ((new_rps
->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK
) ==
2280 ATOM_PPLIB_CLASSIFICATION_UI_BATTERY
)
2281 pi
->battery_state
= true;
2283 pi
->battery_state
= false;
2285 if (adev
->asic_type
== CHIP_KABINI
|| adev
->asic_type
== CHIP_MULLINS
) {
2286 ps
->dpm0_pg_nb_ps_lo
= 0x1;
2287 ps
->dpm0_pg_nb_ps_hi
= 0x0;
2288 ps
->dpmx_nb_ps_lo
= 0x1;
2289 ps
->dpmx_nb_ps_hi
= 0x0;
2291 ps
->dpm0_pg_nb_ps_lo
= 0x3;
2292 ps
->dpm0_pg_nb_ps_hi
= 0x0;
2293 ps
->dpmx_nb_ps_lo
= 0x3;
2294 ps
->dpmx_nb_ps_hi
= 0x0;
2296 if (pi
->sys_info
.nb_dpm_enable
) {
2297 force_high
= (mclk
>= pi
->sys_info
.nbp_memory_clock
[3]) ||
2298 pi
->video_start
|| (adev
->pm
.dpm
.new_active_crtc_count
>= 3) ||
2299 pi
->disable_nb_ps3_in_battery
;
2300 ps
->dpm0_pg_nb_ps_lo
= force_high
? 0x2 : 0x3;
2301 ps
->dpm0_pg_nb_ps_hi
= 0x2;
2302 ps
->dpmx_nb_ps_lo
= force_high
? 0x2 : 0x3;
2303 ps
->dpmx_nb_ps_hi
= 0x2;
2308 static void kv_dpm_power_level_enabled_for_throttle(struct amdgpu_device
*adev
,
2309 u32 index
, bool enable
)
2311 struct kv_power_info
*pi
= kv_get_pi(adev
);
2313 pi
->graphics_level
[index
].EnabledForThrottle
= enable
? 1 : 0;
2316 static int kv_calculate_ds_divider(struct amdgpu_device
*adev
)
2318 struct kv_power_info
*pi
= kv_get_pi(adev
);
2319 u32 sclk_in_sr
= 10000; /* ??? */
2322 if (pi
->lowest_valid
> pi
->highest_valid
)
2325 for (i
= pi
->lowest_valid
; i
<= pi
->highest_valid
; i
++) {
2326 pi
->graphics_level
[i
].DeepSleepDivId
=
2327 kv_get_sleep_divider_id_from_clock(adev
,
2328 be32_to_cpu(pi
->graphics_level
[i
].SclkFrequency
),
2334 static int kv_calculate_nbps_level_settings(struct amdgpu_device
*adev
)
2336 struct kv_power_info
*pi
= kv_get_pi(adev
);
2339 struct amdgpu_clock_and_voltage_limits
*max_limits
=
2340 &adev
->pm
.dpm
.dyn_state
.max_clock_voltage_on_ac
;
2341 u32 mclk
= max_limits
->mclk
;
2343 if (pi
->lowest_valid
> pi
->highest_valid
)
2346 if (adev
->asic_type
== CHIP_KABINI
|| adev
->asic_type
== CHIP_MULLINS
) {
2347 for (i
= pi
->lowest_valid
; i
<= pi
->highest_valid
; i
++) {
2348 pi
->graphics_level
[i
].GnbSlow
= 1;
2349 pi
->graphics_level
[i
].ForceNbPs1
= 0;
2350 pi
->graphics_level
[i
].UpH
= 0;
2353 if (!pi
->sys_info
.nb_dpm_enable
)
2356 force_high
= ((mclk
>= pi
->sys_info
.nbp_memory_clock
[3]) ||
2357 (adev
->pm
.dpm
.new_active_crtc_count
>= 3) || pi
->video_start
);
2360 for (i
= pi
->lowest_valid
; i
<= pi
->highest_valid
; i
++)
2361 pi
->graphics_level
[i
].GnbSlow
= 0;
2363 if (pi
->battery_state
)
2364 pi
->graphics_level
[0].ForceNbPs1
= 1;
2366 pi
->graphics_level
[1].GnbSlow
= 0;
2367 pi
->graphics_level
[2].GnbSlow
= 0;
2368 pi
->graphics_level
[3].GnbSlow
= 0;
2369 pi
->graphics_level
[4].GnbSlow
= 0;
2372 for (i
= pi
->lowest_valid
; i
<= pi
->highest_valid
; i
++) {
2373 pi
->graphics_level
[i
].GnbSlow
= 1;
2374 pi
->graphics_level
[i
].ForceNbPs1
= 0;
2375 pi
->graphics_level
[i
].UpH
= 0;
2378 if (pi
->sys_info
.nb_dpm_enable
&& pi
->battery_state
) {
2379 pi
->graphics_level
[pi
->lowest_valid
].UpH
= 0x28;
2380 pi
->graphics_level
[pi
->lowest_valid
].GnbSlow
= 0;
2381 if (pi
->lowest_valid
!= pi
->highest_valid
)
2382 pi
->graphics_level
[pi
->lowest_valid
].ForceNbPs1
= 1;
2388 static int kv_calculate_dpm_settings(struct amdgpu_device
*adev
)
2390 struct kv_power_info
*pi
= kv_get_pi(adev
);
2393 if (pi
->lowest_valid
> pi
->highest_valid
)
2396 for (i
= pi
->lowest_valid
; i
<= pi
->highest_valid
; i
++)
2397 pi
->graphics_level
[i
].DisplayWatermark
= (i
== pi
->highest_valid
) ? 1 : 0;
2402 static void kv_init_graphics_levels(struct amdgpu_device
*adev
)
2404 struct kv_power_info
*pi
= kv_get_pi(adev
);
2406 struct amdgpu_clock_voltage_dependency_table
*table
=
2407 &adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
;
2409 if (table
&& table
->count
) {
2412 pi
->graphics_dpm_level_count
= 0;
2413 for (i
= 0; i
< table
->count
; i
++) {
2414 if (pi
->high_voltage_t
&&
2415 (pi
->high_voltage_t
<
2416 kv_convert_8bit_index_to_voltage(adev
, table
->entries
[i
].v
)))
2419 kv_set_divider_value(adev
, i
, table
->entries
[i
].clk
);
2420 vid_2bit
= kv_convert_vid7_to_vid2(adev
,
2421 &pi
->sys_info
.vid_mapping_table
,
2422 table
->entries
[i
].v
);
2423 kv_set_vid(adev
, i
, vid_2bit
);
2424 kv_set_at(adev
, i
, pi
->at
[i
]);
2425 kv_dpm_power_level_enabled_for_throttle(adev
, i
, true);
2426 pi
->graphics_dpm_level_count
++;
2429 struct sumo_sclk_voltage_mapping_table
*table
=
2430 &pi
->sys_info
.sclk_voltage_mapping_table
;
2432 pi
->graphics_dpm_level_count
= 0;
2433 for (i
= 0; i
< table
->num_max_dpm_entries
; i
++) {
2434 if (pi
->high_voltage_t
&&
2435 pi
->high_voltage_t
<
2436 kv_convert_2bit_index_to_voltage(adev
, table
->entries
[i
].vid_2bit
))
2439 kv_set_divider_value(adev
, i
, table
->entries
[i
].sclk_frequency
);
2440 kv_set_vid(adev
, i
, table
->entries
[i
].vid_2bit
);
2441 kv_set_at(adev
, i
, pi
->at
[i
]);
2442 kv_dpm_power_level_enabled_for_throttle(adev
, i
, true);
2443 pi
->graphics_dpm_level_count
++;
2447 for (i
= 0; i
< SMU7_MAX_LEVELS_GRAPHICS
; i
++)
2448 kv_dpm_power_level_enable(adev
, i
, false);
2451 static void kv_enable_new_levels(struct amdgpu_device
*adev
)
2453 struct kv_power_info
*pi
= kv_get_pi(adev
);
2456 for (i
= 0; i
< SMU7_MAX_LEVELS_GRAPHICS
; i
++) {
2457 if (i
>= pi
->lowest_valid
&& i
<= pi
->highest_valid
)
2458 kv_dpm_power_level_enable(adev
, i
, true);
2462 static int kv_set_enabled_level(struct amdgpu_device
*adev
, u32 level
)
2464 u32 new_mask
= (1 << level
);
2466 return amdgpu_kv_send_msg_to_smc_with_parameter(adev
,
2467 PPSMC_MSG_SCLKDPM_SetEnabledMask
,
2471 static int kv_set_enabled_levels(struct amdgpu_device
*adev
)
2473 struct kv_power_info
*pi
= kv_get_pi(adev
);
2474 u32 i
, new_mask
= 0;
2476 for (i
= pi
->lowest_valid
; i
<= pi
->highest_valid
; i
++)
2477 new_mask
|= (1 << i
);
2479 return amdgpu_kv_send_msg_to_smc_with_parameter(adev
,
2480 PPSMC_MSG_SCLKDPM_SetEnabledMask
,
2484 static void kv_program_nbps_index_settings(struct amdgpu_device
*adev
,
2485 struct amdgpu_ps
*new_rps
)
2487 struct kv_ps
*new_ps
= kv_get_ps(new_rps
);
2488 struct kv_power_info
*pi
= kv_get_pi(adev
);
2491 if (adev
->asic_type
== CHIP_KABINI
|| adev
->asic_type
== CHIP_MULLINS
)
2494 if (pi
->sys_info
.nb_dpm_enable
) {
2495 nbdpmconfig1
= RREG32_SMC(ixNB_DPM_CONFIG_1
);
2496 nbdpmconfig1
&= ~(NB_DPM_CONFIG_1__Dpm0PgNbPsLo_MASK
|
2497 NB_DPM_CONFIG_1__Dpm0PgNbPsHi_MASK
|
2498 NB_DPM_CONFIG_1__DpmXNbPsLo_MASK
|
2499 NB_DPM_CONFIG_1__DpmXNbPsHi_MASK
);
2500 nbdpmconfig1
|= (new_ps
->dpm0_pg_nb_ps_lo
<< NB_DPM_CONFIG_1__Dpm0PgNbPsLo__SHIFT
) |
2501 (new_ps
->dpm0_pg_nb_ps_hi
<< NB_DPM_CONFIG_1__Dpm0PgNbPsHi__SHIFT
) |
2502 (new_ps
->dpmx_nb_ps_lo
<< NB_DPM_CONFIG_1__DpmXNbPsLo__SHIFT
) |
2503 (new_ps
->dpmx_nb_ps_hi
<< NB_DPM_CONFIG_1__DpmXNbPsHi__SHIFT
);
2504 WREG32_SMC(ixNB_DPM_CONFIG_1
, nbdpmconfig1
);
2508 static int kv_set_thermal_temperature_range(struct amdgpu_device
*adev
,
2509 int min_temp
, int max_temp
)
2511 int low_temp
= 0 * 1000;
2512 int high_temp
= 255 * 1000;
2515 if (low_temp
< min_temp
)
2516 low_temp
= min_temp
;
2517 if (high_temp
> max_temp
)
2518 high_temp
= max_temp
;
2519 if (high_temp
< low_temp
) {
2520 DRM_ERROR("invalid thermal range: %d - %d\n", low_temp
, high_temp
);
2524 tmp
= RREG32_SMC(ixCG_THERMAL_INT_CTRL
);
2525 tmp
&= ~(CG_THERMAL_INT_CTRL__DIG_THERM_INTH_MASK
|
2526 CG_THERMAL_INT_CTRL__DIG_THERM_INTL_MASK
);
2527 tmp
|= ((49 + (high_temp
/ 1000)) << CG_THERMAL_INT_CTRL__DIG_THERM_INTH__SHIFT
) |
2528 ((49 + (low_temp
/ 1000)) << CG_THERMAL_INT_CTRL__DIG_THERM_INTL__SHIFT
);
2529 WREG32_SMC(ixCG_THERMAL_INT_CTRL
, tmp
);
2531 adev
->pm
.dpm
.thermal
.min_temp
= low_temp
;
2532 adev
->pm
.dpm
.thermal
.max_temp
= high_temp
;
2538 struct _ATOM_INTEGRATED_SYSTEM_INFO info
;
2539 struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2
;
2540 struct _ATOM_INTEGRATED_SYSTEM_INFO_V5 info_5
;
2541 struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6
;
2542 struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7
;
2543 struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_8 info_8
;
2546 static int kv_parse_sys_info_table(struct amdgpu_device
*adev
)
2548 struct kv_power_info
*pi
= kv_get_pi(adev
);
2549 struct amdgpu_mode_info
*mode_info
= &adev
->mode_info
;
2550 int index
= GetIndexIntoMasterTable(DATA
, IntegratedSystemInfo
);
2551 union igp_info
*igp_info
;
2556 if (amdgpu_atom_parse_data_header(mode_info
->atom_context
, index
, NULL
,
2557 &frev
, &crev
, &data_offset
)) {
2558 igp_info
= (union igp_info
*)(mode_info
->atom_context
->bios
+
2562 DRM_ERROR("Unsupported IGP table: %d %d\n", frev
, crev
);
2565 pi
->sys_info
.bootup_sclk
= le32_to_cpu(igp_info
->info_8
.ulBootUpEngineClock
);
2566 pi
->sys_info
.bootup_uma_clk
= le32_to_cpu(igp_info
->info_8
.ulBootUpUMAClock
);
2567 pi
->sys_info
.bootup_nb_voltage_index
=
2568 le16_to_cpu(igp_info
->info_8
.usBootUpNBVoltage
);
2569 if (igp_info
->info_8
.ucHtcTmpLmt
== 0)
2570 pi
->sys_info
.htc_tmp_lmt
= 203;
2572 pi
->sys_info
.htc_tmp_lmt
= igp_info
->info_8
.ucHtcTmpLmt
;
2573 if (igp_info
->info_8
.ucHtcHystLmt
== 0)
2574 pi
->sys_info
.htc_hyst_lmt
= 5;
2576 pi
->sys_info
.htc_hyst_lmt
= igp_info
->info_8
.ucHtcHystLmt
;
2577 if (pi
->sys_info
.htc_tmp_lmt
<= pi
->sys_info
.htc_hyst_lmt
) {
2578 DRM_ERROR("The htcTmpLmt should be larger than htcHystLmt.\n");
2581 if (le32_to_cpu(igp_info
->info_8
.ulSystemConfig
) & (1 << 3))
2582 pi
->sys_info
.nb_dpm_enable
= true;
2584 pi
->sys_info
.nb_dpm_enable
= false;
2586 for (i
= 0; i
< KV_NUM_NBPSTATES
; i
++) {
2587 pi
->sys_info
.nbp_memory_clock
[i
] =
2588 le32_to_cpu(igp_info
->info_8
.ulNbpStateMemclkFreq
[i
]);
2589 pi
->sys_info
.nbp_n_clock
[i
] =
2590 le32_to_cpu(igp_info
->info_8
.ulNbpStateNClkFreq
[i
]);
2592 if (le32_to_cpu(igp_info
->info_8
.ulGPUCapInfo
) &
2593 SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS
)
2594 pi
->caps_enable_dfs_bypass
= true;
2596 sumo_construct_sclk_voltage_mapping_table(adev
,
2597 &pi
->sys_info
.sclk_voltage_mapping_table
,
2598 igp_info
->info_8
.sAvail_SCLK
);
2600 sumo_construct_vid_mapping_table(adev
,
2601 &pi
->sys_info
.vid_mapping_table
,
2602 igp_info
->info_8
.sAvail_SCLK
);
2604 kv_construct_max_power_limits_table(adev
,
2605 &adev
->pm
.dpm
.dyn_state
.max_clock_voltage_on_ac
);
2611 struct _ATOM_POWERPLAY_INFO info
;
2612 struct _ATOM_POWERPLAY_INFO_V2 info_2
;
2613 struct _ATOM_POWERPLAY_INFO_V3 info_3
;
2614 struct _ATOM_PPLIB_POWERPLAYTABLE pplib
;
2615 struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2
;
2616 struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3
;
2619 union pplib_clock_info
{
2620 struct _ATOM_PPLIB_R600_CLOCK_INFO r600
;
2621 struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780
;
2622 struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen
;
2623 struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo
;
2626 union pplib_power_state
{
2627 struct _ATOM_PPLIB_STATE v1
;
2628 struct _ATOM_PPLIB_STATE_V2 v2
;
2631 static void kv_patch_boot_state(struct amdgpu_device
*adev
,
2634 struct kv_power_info
*pi
= kv_get_pi(adev
);
2637 ps
->levels
[0] = pi
->boot_pl
;
2640 static void kv_parse_pplib_non_clock_info(struct amdgpu_device
*adev
,
2641 struct amdgpu_ps
*rps
,
2642 struct _ATOM_PPLIB_NONCLOCK_INFO
*non_clock_info
,
2645 struct kv_ps
*ps
= kv_get_ps(rps
);
2647 rps
->caps
= le32_to_cpu(non_clock_info
->ulCapsAndSettings
);
2648 rps
->class = le16_to_cpu(non_clock_info
->usClassification
);
2649 rps
->class2
= le16_to_cpu(non_clock_info
->usClassification2
);
2651 if (ATOM_PPLIB_NONCLOCKINFO_VER1
< table_rev
) {
2652 rps
->vclk
= le32_to_cpu(non_clock_info
->ulVCLK
);
2653 rps
->dclk
= le32_to_cpu(non_clock_info
->ulDCLK
);
2659 if (rps
->class & ATOM_PPLIB_CLASSIFICATION_BOOT
) {
2660 adev
->pm
.dpm
.boot_ps
= rps
;
2661 kv_patch_boot_state(adev
, ps
);
2663 if (rps
->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE
)
2664 adev
->pm
.dpm
.uvd_ps
= rps
;
2667 static void kv_parse_pplib_clock_info(struct amdgpu_device
*adev
,
2668 struct amdgpu_ps
*rps
, int index
,
2669 union pplib_clock_info
*clock_info
)
2671 struct kv_power_info
*pi
= kv_get_pi(adev
);
2672 struct kv_ps
*ps
= kv_get_ps(rps
);
2673 struct kv_pl
*pl
= &ps
->levels
[index
];
2676 sclk
= le16_to_cpu(clock_info
->sumo
.usEngineClockLow
);
2677 sclk
|= clock_info
->sumo
.ucEngineClockHigh
<< 16;
2679 pl
->vddc_index
= clock_info
->sumo
.vddcIndex
;
2681 ps
->num_levels
= index
+ 1;
2683 if (pi
->caps_sclk_ds
) {
2684 pl
->ds_divider_index
= 5;
2685 pl
->ss_divider_index
= 5;
2689 static int kv_parse_power_table(struct amdgpu_device
*adev
)
2691 struct amdgpu_mode_info
*mode_info
= &adev
->mode_info
;
2692 struct _ATOM_PPLIB_NONCLOCK_INFO
*non_clock_info
;
2693 union pplib_power_state
*power_state
;
2694 int i
, j
, k
, non_clock_array_index
, clock_array_index
;
2695 union pplib_clock_info
*clock_info
;
2696 struct _StateArray
*state_array
;
2697 struct _ClockInfoArray
*clock_info_array
;
2698 struct _NonClockInfoArray
*non_clock_info_array
;
2699 union power_info
*power_info
;
2700 int index
= GetIndexIntoMasterTable(DATA
, PowerPlayInfo
);
2703 u8
*power_state_offset
;
2706 if (!amdgpu_atom_parse_data_header(mode_info
->atom_context
, index
, NULL
,
2707 &frev
, &crev
, &data_offset
))
2709 power_info
= (union power_info
*)(mode_info
->atom_context
->bios
+ data_offset
);
2711 amdgpu_add_thermal_controller(adev
);
2713 state_array
= (struct _StateArray
*)
2714 (mode_info
->atom_context
->bios
+ data_offset
+
2715 le16_to_cpu(power_info
->pplib
.usStateArrayOffset
));
2716 clock_info_array
= (struct _ClockInfoArray
*)
2717 (mode_info
->atom_context
->bios
+ data_offset
+
2718 le16_to_cpu(power_info
->pplib
.usClockInfoArrayOffset
));
2719 non_clock_info_array
= (struct _NonClockInfoArray
*)
2720 (mode_info
->atom_context
->bios
+ data_offset
+
2721 le16_to_cpu(power_info
->pplib
.usNonClockInfoArrayOffset
));
2723 adev
->pm
.dpm
.ps
= kzalloc(sizeof(struct amdgpu_ps
) *
2724 state_array
->ucNumEntries
, GFP_KERNEL
);
2725 if (!adev
->pm
.dpm
.ps
)
2727 power_state_offset
= (u8
*)state_array
->states
;
2728 for (i
= 0; i
< state_array
->ucNumEntries
; i
++) {
2730 power_state
= (union pplib_power_state
*)power_state_offset
;
2731 non_clock_array_index
= power_state
->v2
.nonClockInfoIndex
;
2732 non_clock_info
= (struct _ATOM_PPLIB_NONCLOCK_INFO
*)
2733 &non_clock_info_array
->nonClockInfo
[non_clock_array_index
];
2734 ps
= kzalloc(sizeof(struct kv_ps
), GFP_KERNEL
);
2736 kfree(adev
->pm
.dpm
.ps
);
2739 adev
->pm
.dpm
.ps
[i
].ps_priv
= ps
;
2741 idx
= (u8
*)&power_state
->v2
.clockInfoIndex
[0];
2742 for (j
= 0; j
< power_state
->v2
.ucNumDPMLevels
; j
++) {
2743 clock_array_index
= idx
[j
];
2744 if (clock_array_index
>= clock_info_array
->ucNumEntries
)
2746 if (k
>= SUMO_MAX_HARDWARE_POWERLEVELS
)
2748 clock_info
= (union pplib_clock_info
*)
2749 ((u8
*)&clock_info_array
->clockInfo
[0] +
2750 (clock_array_index
* clock_info_array
->ucEntrySize
));
2751 kv_parse_pplib_clock_info(adev
,
2752 &adev
->pm
.dpm
.ps
[i
], k
,
2756 kv_parse_pplib_non_clock_info(adev
, &adev
->pm
.dpm
.ps
[i
],
2758 non_clock_info_array
->ucEntrySize
);
2759 power_state_offset
+= 2 + power_state
->v2
.ucNumDPMLevels
;
2761 adev
->pm
.dpm
.num_ps
= state_array
->ucNumEntries
;
2763 /* fill in the vce power states */
2764 for (i
= 0; i
< adev
->pm
.dpm
.num_of_vce_states
; i
++) {
2766 clock_array_index
= adev
->pm
.dpm
.vce_states
[i
].clk_idx
;
2767 clock_info
= (union pplib_clock_info
*)
2768 &clock_info_array
->clockInfo
[clock_array_index
* clock_info_array
->ucEntrySize
];
2769 sclk
= le16_to_cpu(clock_info
->sumo
.usEngineClockLow
);
2770 sclk
|= clock_info
->sumo
.ucEngineClockHigh
<< 16;
2771 adev
->pm
.dpm
.vce_states
[i
].sclk
= sclk
;
2772 adev
->pm
.dpm
.vce_states
[i
].mclk
= 0;
2778 static int kv_dpm_init(struct amdgpu_device
*adev
)
2780 struct kv_power_info
*pi
;
2783 pi
= kzalloc(sizeof(struct kv_power_info
), GFP_KERNEL
);
2786 adev
->pm
.dpm
.priv
= pi
;
2788 ret
= amdgpu_get_platform_caps(adev
);
2792 ret
= amdgpu_parse_extended_power_table(adev
);
2796 for (i
= 0; i
< SUMO_MAX_HARDWARE_POWERLEVELS
; i
++)
2797 pi
->at
[i
] = TRINITY_AT_DFLT
;
2799 pi
->sram_end
= SMC_RAM_END
;
2801 pi
->enable_nb_dpm
= true;
2803 pi
->caps_power_containment
= true;
2804 pi
->caps_cac
= true;
2805 pi
->enable_didt
= false;
2806 if (pi
->enable_didt
) {
2807 pi
->caps_sq_ramping
= true;
2808 pi
->caps_db_ramping
= true;
2809 pi
->caps_td_ramping
= true;
2810 pi
->caps_tcp_ramping
= true;
2813 if (amdgpu_pp_feature_mask
& SCLK_DEEP_SLEEP_MASK
)
2814 pi
->caps_sclk_ds
= true;
2816 pi
->caps_sclk_ds
= false;
2818 pi
->enable_auto_thermal_throttling
= true;
2819 pi
->disable_nb_ps3_in_battery
= false;
2820 if (amdgpu_bapm
== 0)
2821 pi
->bapm_enable
= false;
2823 pi
->bapm_enable
= true;
2824 pi
->voltage_drop_t
= 0;
2825 pi
->caps_sclk_throttle_low_notification
= false;
2826 pi
->caps_fps
= false; /* true? */
2827 pi
->caps_uvd_pg
= (adev
->pg_flags
& AMD_PG_SUPPORT_UVD
) ? true : false;
2828 pi
->caps_uvd_dpm
= true;
2829 pi
->caps_vce_pg
= (adev
->pg_flags
& AMD_PG_SUPPORT_VCE
) ? true : false;
2830 pi
->caps_samu_pg
= (adev
->pg_flags
& AMD_PG_SUPPORT_SAMU
) ? true : false;
2831 pi
->caps_acp_pg
= (adev
->pg_flags
& AMD_PG_SUPPORT_ACP
) ? true : false;
2832 pi
->caps_stable_p_state
= false;
2834 ret
= kv_parse_sys_info_table(adev
);
2838 kv_patch_voltage_values(adev
);
2839 kv_construct_boot_state(adev
);
2841 ret
= kv_parse_power_table(adev
);
2845 pi
->enable_dpm
= true;
2851 kv_dpm_debugfs_print_current_performance_level(struct amdgpu_device
*adev
,
2854 struct kv_power_info
*pi
= kv_get_pi(adev
);
2856 (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX
) &
2857 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK
) >>
2858 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT
;
2862 if (current_index
>= SMU__NUM_SCLK_DPM_STATE
) {
2863 seq_printf(m
, "invalid dpm profile %d\n", current_index
);
2865 sclk
= be32_to_cpu(pi
->graphics_level
[current_index
].SclkFrequency
);
2866 tmp
= (RREG32_SMC(ixSMU_VOLTAGE_STATUS
) &
2867 SMU_VOLTAGE_STATUS__SMU_VOLTAGE_CURRENT_LEVEL_MASK
) >>
2868 SMU_VOLTAGE_STATUS__SMU_VOLTAGE_CURRENT_LEVEL__SHIFT
;
2869 vddc
= kv_convert_8bit_index_to_voltage(adev
, (u16
)tmp
);
2870 seq_printf(m
, "uvd %sabled\n", pi
->uvd_power_gated
? "dis" : "en");
2871 seq_printf(m
, "vce %sabled\n", pi
->vce_power_gated
? "dis" : "en");
2872 seq_printf(m
, "power level %d sclk: %u vddc: %u\n",
2873 current_index
, sclk
, vddc
);
2878 kv_dpm_print_power_state(struct amdgpu_device
*adev
,
2879 struct amdgpu_ps
*rps
)
2882 struct kv_ps
*ps
= kv_get_ps(rps
);
2884 amdgpu_dpm_print_class_info(rps
->class, rps
->class2
);
2885 amdgpu_dpm_print_cap_info(rps
->caps
);
2886 printk("\tuvd vclk: %d dclk: %d\n", rps
->vclk
, rps
->dclk
);
2887 for (i
= 0; i
< ps
->num_levels
; i
++) {
2888 struct kv_pl
*pl
= &ps
->levels
[i
];
2889 printk("\t\tpower level %d sclk: %u vddc: %u\n",
2891 kv_convert_8bit_index_to_voltage(adev
, pl
->vddc_index
));
2893 amdgpu_dpm_print_ps_status(adev
, rps
);
2896 static void kv_dpm_fini(struct amdgpu_device
*adev
)
2900 for (i
= 0; i
< adev
->pm
.dpm
.num_ps
; i
++) {
2901 kfree(adev
->pm
.dpm
.ps
[i
].ps_priv
);
2903 kfree(adev
->pm
.dpm
.ps
);
2904 kfree(adev
->pm
.dpm
.priv
);
2905 amdgpu_free_extended_power_table(adev
);
2908 static void kv_dpm_display_configuration_changed(struct amdgpu_device
*adev
)
2913 static u32
kv_dpm_get_sclk(struct amdgpu_device
*adev
, bool low
)
2915 struct kv_power_info
*pi
= kv_get_pi(adev
);
2916 struct kv_ps
*requested_state
= kv_get_ps(&pi
->requested_rps
);
2919 return requested_state
->levels
[0].sclk
;
2921 return requested_state
->levels
[requested_state
->num_levels
- 1].sclk
;
2924 static u32
kv_dpm_get_mclk(struct amdgpu_device
*adev
, bool low
)
2926 struct kv_power_info
*pi
= kv_get_pi(adev
);
2928 return pi
->sys_info
.bootup_uma_clk
;
2931 /* get temperature in millidegrees */
2932 static int kv_dpm_get_temp(struct amdgpu_device
*adev
)
2935 int actual_temp
= 0;
2937 temp
= RREG32_SMC(0xC0300E0C);
2940 actual_temp
= (temp
/ 8) - 49;
2944 actual_temp
= actual_temp
* 1000;
2949 static int kv_dpm_early_init(void *handle
)
2951 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
2953 kv_dpm_set_dpm_funcs(adev
);
2954 kv_dpm_set_irq_funcs(adev
);
2959 static int kv_dpm_late_init(void *handle
)
2961 /* powerdown unused blocks for now */
2962 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
2968 /* init the sysfs and debugfs files late */
2969 ret
= amdgpu_pm_sysfs_init(adev
);
2973 kv_dpm_powergate_acp(adev
, true);
2974 kv_dpm_powergate_samu(adev
, true);
2979 static int kv_dpm_sw_init(void *handle
)
2982 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
2984 ret
= amdgpu_irq_add_id(adev
, AMDGPU_IH_CLIENTID_LEGACY
, 230,
2985 &adev
->pm
.dpm
.thermal
.irq
);
2989 ret
= amdgpu_irq_add_id(adev
, AMDGPU_IH_CLIENTID_LEGACY
, 231,
2990 &adev
->pm
.dpm
.thermal
.irq
);
2994 /* default to balanced state */
2995 adev
->pm
.dpm
.state
= POWER_STATE_TYPE_BALANCED
;
2996 adev
->pm
.dpm
.user_state
= POWER_STATE_TYPE_BALANCED
;
2997 adev
->pm
.dpm
.forced_level
= AMD_DPM_FORCED_LEVEL_AUTO
;
2998 adev
->pm
.default_sclk
= adev
->clock
.default_sclk
;
2999 adev
->pm
.default_mclk
= adev
->clock
.default_mclk
;
3000 adev
->pm
.current_sclk
= adev
->clock
.default_sclk
;
3001 adev
->pm
.current_mclk
= adev
->clock
.default_mclk
;
3002 adev
->pm
.int_thermal_type
= THERMAL_TYPE_NONE
;
3004 if (amdgpu_dpm
== 0)
3007 INIT_WORK(&adev
->pm
.dpm
.thermal
.work
, amdgpu_dpm_thermal_work_handler
);
3008 mutex_lock(&adev
->pm
.mutex
);
3009 ret
= kv_dpm_init(adev
);
3012 adev
->pm
.dpm
.current_ps
= adev
->pm
.dpm
.requested_ps
= adev
->pm
.dpm
.boot_ps
;
3013 if (amdgpu_dpm
== 1)
3014 amdgpu_pm_print_power_states(adev
);
3015 mutex_unlock(&adev
->pm
.mutex
);
3016 DRM_INFO("amdgpu: dpm initialized\n");
3022 mutex_unlock(&adev
->pm
.mutex
);
3023 DRM_ERROR("amdgpu: dpm initialization failed\n");
3027 static int kv_dpm_sw_fini(void *handle
)
3029 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
3031 flush_work(&adev
->pm
.dpm
.thermal
.work
);
3033 mutex_lock(&adev
->pm
.mutex
);
3034 amdgpu_pm_sysfs_fini(adev
);
3036 mutex_unlock(&adev
->pm
.mutex
);
3041 static int kv_dpm_hw_init(void *handle
)
3044 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
3049 mutex_lock(&adev
->pm
.mutex
);
3050 kv_dpm_setup_asic(adev
);
3051 ret
= kv_dpm_enable(adev
);
3053 adev
->pm
.dpm_enabled
= false;
3055 adev
->pm
.dpm_enabled
= true;
3056 mutex_unlock(&adev
->pm
.mutex
);
3061 static int kv_dpm_hw_fini(void *handle
)
3063 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
3065 if (adev
->pm
.dpm_enabled
) {
3066 mutex_lock(&adev
->pm
.mutex
);
3067 kv_dpm_disable(adev
);
3068 mutex_unlock(&adev
->pm
.mutex
);
3074 static int kv_dpm_suspend(void *handle
)
3076 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
3078 if (adev
->pm
.dpm_enabled
) {
3079 mutex_lock(&adev
->pm
.mutex
);
3081 kv_dpm_disable(adev
);
3082 /* reset the power state */
3083 adev
->pm
.dpm
.current_ps
= adev
->pm
.dpm
.requested_ps
= adev
->pm
.dpm
.boot_ps
;
3084 mutex_unlock(&adev
->pm
.mutex
);
3089 static int kv_dpm_resume(void *handle
)
3092 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
3094 if (adev
->pm
.dpm_enabled
) {
3095 /* asic init will reset to the boot state */
3096 mutex_lock(&adev
->pm
.mutex
);
3097 kv_dpm_setup_asic(adev
);
3098 ret
= kv_dpm_enable(adev
);
3100 adev
->pm
.dpm_enabled
= false;
3102 adev
->pm
.dpm_enabled
= true;
3103 mutex_unlock(&adev
->pm
.mutex
);
3104 if (adev
->pm
.dpm_enabled
)
3105 amdgpu_pm_compute_clocks(adev
);
3110 static bool kv_dpm_is_idle(void *handle
)
3115 static int kv_dpm_wait_for_idle(void *handle
)
3121 static int kv_dpm_soft_reset(void *handle
)
3126 static int kv_dpm_set_interrupt_state(struct amdgpu_device
*adev
,
3127 struct amdgpu_irq_src
*src
,
3129 enum amdgpu_interrupt_state state
)
3134 case AMDGPU_THERMAL_IRQ_LOW_TO_HIGH
:
3136 case AMDGPU_IRQ_STATE_DISABLE
:
3137 cg_thermal_int
= RREG32_SMC(ixCG_THERMAL_INT_CTRL
);
3138 cg_thermal_int
&= ~CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK
;
3139 WREG32_SMC(ixCG_THERMAL_INT_CTRL
, cg_thermal_int
);
3141 case AMDGPU_IRQ_STATE_ENABLE
:
3142 cg_thermal_int
= RREG32_SMC(ixCG_THERMAL_INT_CTRL
);
3143 cg_thermal_int
|= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK
;
3144 WREG32_SMC(ixCG_THERMAL_INT_CTRL
, cg_thermal_int
);
3151 case AMDGPU_THERMAL_IRQ_HIGH_TO_LOW
:
3153 case AMDGPU_IRQ_STATE_DISABLE
:
3154 cg_thermal_int
= RREG32_SMC(ixCG_THERMAL_INT_CTRL
);
3155 cg_thermal_int
&= ~CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK
;
3156 WREG32_SMC(ixCG_THERMAL_INT_CTRL
, cg_thermal_int
);
3158 case AMDGPU_IRQ_STATE_ENABLE
:
3159 cg_thermal_int
= RREG32_SMC(ixCG_THERMAL_INT_CTRL
);
3160 cg_thermal_int
|= CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK
;
3161 WREG32_SMC(ixCG_THERMAL_INT_CTRL
, cg_thermal_int
);
3174 static int kv_dpm_process_interrupt(struct amdgpu_device
*adev
,
3175 struct amdgpu_irq_src
*source
,
3176 struct amdgpu_iv_entry
*entry
)
3178 bool queue_thermal
= false;
3183 switch (entry
->src_id
) {
3184 case 230: /* thermal low to high */
3185 DRM_DEBUG("IH: thermal low to high\n");
3186 adev
->pm
.dpm
.thermal
.high_to_low
= false;
3187 queue_thermal
= true;
3189 case 231: /* thermal high to low */
3190 DRM_DEBUG("IH: thermal high to low\n");
3191 adev
->pm
.dpm
.thermal
.high_to_low
= true;
3192 queue_thermal
= true;
3199 schedule_work(&adev
->pm
.dpm
.thermal
.work
);
3204 static int kv_dpm_set_clockgating_state(void *handle
,
3205 enum amd_clockgating_state state
)
3210 static int kv_dpm_set_powergating_state(void *handle
,
3211 enum amd_powergating_state state
)
3216 static inline bool kv_are_power_levels_equal(const struct kv_pl
*kv_cpl1
,
3217 const struct kv_pl
*kv_cpl2
)
3219 return ((kv_cpl1
->sclk
== kv_cpl2
->sclk
) &&
3220 (kv_cpl1
->vddc_index
== kv_cpl2
->vddc_index
) &&
3221 (kv_cpl1
->ds_divider_index
== kv_cpl2
->ds_divider_index
) &&
3222 (kv_cpl1
->force_nbp_state
== kv_cpl2
->force_nbp_state
));
3225 static int kv_check_state_equal(struct amdgpu_device
*adev
,
3226 struct amdgpu_ps
*cps
,
3227 struct amdgpu_ps
*rps
,
3230 struct kv_ps
*kv_cps
;
3231 struct kv_ps
*kv_rps
;
3234 if (adev
== NULL
|| cps
== NULL
|| rps
== NULL
|| equal
== NULL
)
3237 kv_cps
= kv_get_ps(cps
);
3238 kv_rps
= kv_get_ps(rps
);
3240 if (kv_cps
== NULL
) {
3245 if (kv_cps
->num_levels
!= kv_rps
->num_levels
) {
3250 for (i
= 0; i
< kv_cps
->num_levels
; i
++) {
3251 if (!kv_are_power_levels_equal(&(kv_cps
->levels
[i
]),
3252 &(kv_rps
->levels
[i
]))) {
3258 /* If all performance levels are the same try to use the UVD clocks to break the tie.*/
3259 *equal
= ((cps
->vclk
== rps
->vclk
) && (cps
->dclk
== rps
->dclk
));
3260 *equal
&= ((cps
->evclk
== rps
->evclk
) && (cps
->ecclk
== rps
->ecclk
));
3265 static int kv_dpm_read_sensor(struct amdgpu_device
*adev
, int idx
,
3266 void *value
, int *size
)
3268 struct kv_power_info
*pi
= kv_get_pi(adev
);
3271 (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX
) &
3272 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK
) >>
3273 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT
;
3275 /* size must be at least 4 bytes for all sensors */
3280 case AMDGPU_PP_SENSOR_GFX_SCLK
:
3281 if (pl_index
< SMU__NUM_SCLK_DPM_STATE
) {
3283 pi
->graphics_level
[pl_index
].SclkFrequency
);
3284 *((uint32_t *)value
) = sclk
;
3289 case AMDGPU_PP_SENSOR_GPU_TEMP
:
3290 *((uint32_t *)value
) = kv_dpm_get_temp(adev
);
3298 const struct amd_ip_funcs kv_dpm_ip_funcs
= {
3300 .early_init
= kv_dpm_early_init
,
3301 .late_init
= kv_dpm_late_init
,
3302 .sw_init
= kv_dpm_sw_init
,
3303 .sw_fini
= kv_dpm_sw_fini
,
3304 .hw_init
= kv_dpm_hw_init
,
3305 .hw_fini
= kv_dpm_hw_fini
,
3306 .suspend
= kv_dpm_suspend
,
3307 .resume
= kv_dpm_resume
,
3308 .is_idle
= kv_dpm_is_idle
,
3309 .wait_for_idle
= kv_dpm_wait_for_idle
,
3310 .soft_reset
= kv_dpm_soft_reset
,
3311 .set_clockgating_state
= kv_dpm_set_clockgating_state
,
3312 .set_powergating_state
= kv_dpm_set_powergating_state
,
3315 static const struct amdgpu_dpm_funcs kv_dpm_funcs
= {
3316 .get_temperature
= &kv_dpm_get_temp
,
3317 .pre_set_power_state
= &kv_dpm_pre_set_power_state
,
3318 .set_power_state
= &kv_dpm_set_power_state
,
3319 .post_set_power_state
= &kv_dpm_post_set_power_state
,
3320 .display_configuration_changed
= &kv_dpm_display_configuration_changed
,
3321 .get_sclk
= &kv_dpm_get_sclk
,
3322 .get_mclk
= &kv_dpm_get_mclk
,
3323 .print_power_state
= &kv_dpm_print_power_state
,
3324 .debugfs_print_current_performance_level
= &kv_dpm_debugfs_print_current_performance_level
,
3325 .force_performance_level
= &kv_dpm_force_performance_level
,
3326 .powergate_uvd
= &kv_dpm_powergate_uvd
,
3327 .enable_bapm
= &kv_dpm_enable_bapm
,
3328 .get_vce_clock_state
= amdgpu_get_vce_clock_state
,
3329 .check_state_equal
= kv_check_state_equal
,
3330 .read_sensor
= &kv_dpm_read_sensor
,
3333 static void kv_dpm_set_dpm_funcs(struct amdgpu_device
*adev
)
3335 if (adev
->pm
.funcs
== NULL
)
3336 adev
->pm
.funcs
= &kv_dpm_funcs
;
3339 static const struct amdgpu_irq_src_funcs kv_dpm_irq_funcs
= {
3340 .set
= kv_dpm_set_interrupt_state
,
3341 .process
= kv_dpm_process_interrupt
,
3344 static void kv_dpm_set_irq_funcs(struct amdgpu_device
*adev
)
3346 adev
->pm
.dpm
.thermal
.irq
.num_types
= AMDGPU_THERMAL_IRQ_LAST
;
3347 adev
->pm
.dpm
.thermal
.irq
.funcs
= &kv_dpm_irq_funcs
;