]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/gpu/drm/radeon/ci_dpm.c
drm/radeon/dpm: implement vblank_too_short callback for CI
[mirror_ubuntu-bionic-kernel.git] / drivers / gpu / drm / radeon / ci_dpm.c
1 /*
2 * Copyright 2013 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24 #include "drmP.h"
25 #include "radeon.h"
26 #include "cikd.h"
27 #include "r600_dpm.h"
28 #include "ci_dpm.h"
29 #include "atom.h"
30 #include <linux/seq_file.h>
31
32 #define MC_CG_ARB_FREQ_F0 0x0a
33 #define MC_CG_ARB_FREQ_F1 0x0b
34 #define MC_CG_ARB_FREQ_F2 0x0c
35 #define MC_CG_ARB_FREQ_F3 0x0d
36
37 #define SMC_RAM_END 0x40000
38
39 #define VOLTAGE_SCALE 4
40 #define VOLTAGE_VID_OFFSET_SCALE1 625
41 #define VOLTAGE_VID_OFFSET_SCALE2 100
42
43 static const struct ci_pt_defaults defaults_bonaire_xt =
44 {
45 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
46 { 0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8, 0xC9, 0xC9, 0x2F, 0x4D, 0x61 },
47 { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 }
48 };
49
50 static const struct ci_pt_defaults defaults_bonaire_pro =
51 {
52 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x65062,
53 { 0x8C, 0x23F, 0x244, 0xA6, 0x83, 0x85, 0x86, 0x86, 0x83, 0xDB, 0xDB, 0xDA, 0x67, 0x60, 0x5F },
54 { 0x187, 0x193, 0x193, 0x1C7, 0x1D1, 0x1D1, 0x210, 0x219, 0x219, 0x266, 0x26C, 0x26C, 0x2C9, 0x2CB, 0x2CB }
55 };
56
57 static const struct ci_pt_defaults defaults_saturn_xt =
58 {
59 1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x70000,
60 { 0x8C, 0x247, 0x249, 0xA6, 0x80, 0x81, 0x8B, 0x89, 0x86, 0xC9, 0xCA, 0xC9, 0x4D, 0x4D, 0x4D },
61 { 0x187, 0x187, 0x187, 0x1C7, 0x1C7, 0x1C7, 0x210, 0x210, 0x210, 0x266, 0x266, 0x266, 0x2C9, 0x2C9, 0x2C9 }
62 };
63
64 static const struct ci_pt_defaults defaults_saturn_pro =
65 {
66 1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x30000,
67 { 0x96, 0x21D, 0x23B, 0xA1, 0x85, 0x87, 0x83, 0x84, 0x81, 0xE6, 0xE6, 0xE6, 0x71, 0x6A, 0x6A },
68 { 0x193, 0x19E, 0x19E, 0x1D2, 0x1DC, 0x1DC, 0x21A, 0x223, 0x223, 0x26E, 0x27E, 0x274, 0x2CF, 0x2D2, 0x2D2 }
69 };
70
71 static const struct ci_pt_config_reg didt_config_ci[] =
72 {
73 { 0x10, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
74 { 0x10, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
75 { 0x10, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
76 { 0x10, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
77 { 0x11, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
78 { 0x11, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
79 { 0x11, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
80 { 0x11, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
81 { 0x12, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
82 { 0x12, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
83 { 0x12, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
84 { 0x12, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
85 { 0x2, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
86 { 0x2, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
87 { 0x2, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
88 { 0x1, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
89 { 0x1, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
90 { 0x0, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
91 { 0x30, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
92 { 0x30, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
93 { 0x30, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
94 { 0x30, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
95 { 0x31, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
96 { 0x31, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
97 { 0x31, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
98 { 0x31, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
99 { 0x32, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
100 { 0x32, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
101 { 0x32, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
102 { 0x32, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
103 { 0x22, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
104 { 0x22, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
105 { 0x22, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
106 { 0x21, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
107 { 0x21, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
108 { 0x20, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
109 { 0x50, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
110 { 0x50, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
111 { 0x50, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
112 { 0x50, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
113 { 0x51, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
114 { 0x51, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
115 { 0x51, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
116 { 0x51, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
117 { 0x52, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
118 { 0x52, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
119 { 0x52, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
120 { 0x52, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
121 { 0x42, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
122 { 0x42, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
123 { 0x42, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
124 { 0x41, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
125 { 0x41, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
126 { 0x40, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
127 { 0x70, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
128 { 0x70, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
129 { 0x70, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
130 { 0x70, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
131 { 0x71, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
132 { 0x71, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
133 { 0x71, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
134 { 0x71, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
135 { 0x72, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
136 { 0x72, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
137 { 0x72, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
138 { 0x72, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
139 { 0x62, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
140 { 0x62, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
141 { 0x62, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
142 { 0x61, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
143 { 0x61, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
144 { 0x60, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
145 { 0xFFFFFFFF }
146 };
147
148 extern u8 rv770_get_memory_module_index(struct radeon_device *rdev);
149 extern int ni_copy_and_switch_arb_sets(struct radeon_device *rdev,
150 u32 arb_freq_src, u32 arb_freq_dest);
151 extern u8 si_get_ddr3_mclk_frequency_ratio(u32 memory_clock);
152 extern u8 si_get_mclk_frequency_ratio(u32 memory_clock, bool strobe_mode);
153 extern void si_trim_voltage_table_to_fit_state_table(struct radeon_device *rdev,
154 u32 max_voltage_steps,
155 struct atom_voltage_table *voltage_table);
156 extern void cik_enter_rlc_safe_mode(struct radeon_device *rdev);
157 extern void cik_exit_rlc_safe_mode(struct radeon_device *rdev);
158
159 static int ci_get_std_voltage_value_sidd(struct radeon_device *rdev,
160 struct atom_voltage_table_entry *voltage_table,
161 u16 *std_voltage_hi_sidd, u16 *std_voltage_lo_sidd);
162 static int ci_set_power_limit(struct radeon_device *rdev, u32 n);
163 static int ci_set_overdrive_target_tdp(struct radeon_device *rdev,
164 u32 target_tdp);
165 static int ci_update_uvd_dpm(struct radeon_device *rdev, bool gate);
166
167 static struct ci_power_info *ci_get_pi(struct radeon_device *rdev)
168 {
169 struct ci_power_info *pi = rdev->pm.dpm.priv;
170
171 return pi;
172 }
173
174 static struct ci_ps *ci_get_ps(struct radeon_ps *rps)
175 {
176 struct ci_ps *ps = rps->ps_priv;
177
178 return ps;
179 }
180
181 static void ci_initialize_powertune_defaults(struct radeon_device *rdev)
182 {
183 struct ci_power_info *pi = ci_get_pi(rdev);
184
185 switch (rdev->pdev->device) {
186 case 0x6650:
187 case 0x6658:
188 case 0x665C:
189 default:
190 pi->powertune_defaults = &defaults_bonaire_xt;
191 break;
192 case 0x6651:
193 case 0x665D:
194 pi->powertune_defaults = &defaults_bonaire_pro;
195 break;
196 case 0x6640:
197 pi->powertune_defaults = &defaults_saturn_xt;
198 break;
199 case 0x6641:
200 pi->powertune_defaults = &defaults_saturn_pro;
201 break;
202 }
203
204 pi->dte_tj_offset = 0;
205
206 pi->caps_power_containment = true;
207 pi->caps_cac = false;
208 pi->caps_sq_ramping = false;
209 pi->caps_db_ramping = false;
210 pi->caps_td_ramping = false;
211 pi->caps_tcp_ramping = false;
212
213 if (pi->caps_power_containment) {
214 pi->caps_cac = true;
215 pi->enable_bapm_feature = true;
216 pi->enable_tdc_limit_feature = true;
217 pi->enable_pkg_pwr_tracking_feature = true;
218 }
219 }
220
221 static u8 ci_convert_to_vid(u16 vddc)
222 {
223 return (6200 - (vddc * VOLTAGE_SCALE)) / 25;
224 }
225
226 static int ci_populate_bapm_vddc_vid_sidd(struct radeon_device *rdev)
227 {
228 struct ci_power_info *pi = ci_get_pi(rdev);
229 u8 *hi_vid = pi->smc_powertune_table.BapmVddCVidHiSidd;
230 u8 *lo_vid = pi->smc_powertune_table.BapmVddCVidLoSidd;
231 u8 *hi2_vid = pi->smc_powertune_table.BapmVddCVidHiSidd2;
232 u32 i;
233
234 if (rdev->pm.dpm.dyn_state.cac_leakage_table.entries == NULL)
235 return -EINVAL;
236 if (rdev->pm.dpm.dyn_state.cac_leakage_table.count > 8)
237 return -EINVAL;
238 if (rdev->pm.dpm.dyn_state.cac_leakage_table.count !=
239 rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count)
240 return -EINVAL;
241
242 for (i = 0; i < rdev->pm.dpm.dyn_state.cac_leakage_table.count; i++) {
243 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
244 lo_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1);
245 hi_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2);
246 hi2_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3);
247 } else {
248 lo_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc);
249 hi_vid[i] = ci_convert_to_vid((u16)rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage);
250 }
251 }
252 return 0;
253 }
254
255 static int ci_populate_vddc_vid(struct radeon_device *rdev)
256 {
257 struct ci_power_info *pi = ci_get_pi(rdev);
258 u8 *vid = pi->smc_powertune_table.VddCVid;
259 u32 i;
260
261 if (pi->vddc_voltage_table.count > 8)
262 return -EINVAL;
263
264 for (i = 0; i < pi->vddc_voltage_table.count; i++)
265 vid[i] = ci_convert_to_vid(pi->vddc_voltage_table.entries[i].value);
266
267 return 0;
268 }
269
270 static int ci_populate_svi_load_line(struct radeon_device *rdev)
271 {
272 struct ci_power_info *pi = ci_get_pi(rdev);
273 const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
274
275 pi->smc_powertune_table.SviLoadLineEn = pt_defaults->svi_load_line_en;
276 pi->smc_powertune_table.SviLoadLineVddC = pt_defaults->svi_load_line_vddc;
277 pi->smc_powertune_table.SviLoadLineTrimVddC = 3;
278 pi->smc_powertune_table.SviLoadLineOffsetVddC = 0;
279
280 return 0;
281 }
282
283 static int ci_populate_tdc_limit(struct radeon_device *rdev)
284 {
285 struct ci_power_info *pi = ci_get_pi(rdev);
286 const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
287 u16 tdc_limit;
288
289 tdc_limit = rdev->pm.dpm.dyn_state.cac_tdp_table->tdc * 256;
290 pi->smc_powertune_table.TDC_VDDC_PkgLimit = cpu_to_be16(tdc_limit);
291 pi->smc_powertune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
292 pt_defaults->tdc_vddc_throttle_release_limit_perc;
293 pi->smc_powertune_table.TDC_MAWt = pt_defaults->tdc_mawt;
294
295 return 0;
296 }
297
298 static int ci_populate_dw8(struct radeon_device *rdev)
299 {
300 struct ci_power_info *pi = ci_get_pi(rdev);
301 const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
302 int ret;
303
304 ret = ci_read_smc_sram_dword(rdev,
305 SMU7_FIRMWARE_HEADER_LOCATION +
306 offsetof(SMU7_Firmware_Header, PmFuseTable) +
307 offsetof(SMU7_Discrete_PmFuses, TdcWaterfallCtl),
308 (u32 *)&pi->smc_powertune_table.TdcWaterfallCtl,
309 pi->sram_end);
310 if (ret)
311 return -EINVAL;
312 else
313 pi->smc_powertune_table.TdcWaterfallCtl = pt_defaults->tdc_waterfall_ctl;
314
315 return 0;
316 }
317
318 static int ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(struct radeon_device *rdev)
319 {
320 struct ci_power_info *pi = ci_get_pi(rdev);
321 u8 *hi_vid = pi->smc_powertune_table.BapmVddCVidHiSidd;
322 u8 *lo_vid = pi->smc_powertune_table.BapmVddCVidLoSidd;
323 int i, min, max;
324
325 min = max = hi_vid[0];
326 for (i = 0; i < 8; i++) {
327 if (0 != hi_vid[i]) {
328 if (min > hi_vid[i])
329 min = hi_vid[i];
330 if (max < hi_vid[i])
331 max = hi_vid[i];
332 }
333
334 if (0 != lo_vid[i]) {
335 if (min > lo_vid[i])
336 min = lo_vid[i];
337 if (max < lo_vid[i])
338 max = lo_vid[i];
339 }
340 }
341
342 if ((min == 0) || (max == 0))
343 return -EINVAL;
344 pi->smc_powertune_table.GnbLPMLMaxVid = (u8)max;
345 pi->smc_powertune_table.GnbLPMLMinVid = (u8)min;
346
347 return 0;
348 }
349
350 static int ci_populate_bapm_vddc_base_leakage_sidd(struct radeon_device *rdev)
351 {
352 struct ci_power_info *pi = ci_get_pi(rdev);
353 u16 hi_sidd = pi->smc_powertune_table.BapmVddCBaseLeakageHiSidd;
354 u16 lo_sidd = pi->smc_powertune_table.BapmVddCBaseLeakageLoSidd;
355 struct radeon_cac_tdp_table *cac_tdp_table =
356 rdev->pm.dpm.dyn_state.cac_tdp_table;
357
358 hi_sidd = cac_tdp_table->high_cac_leakage / 100 * 256;
359 lo_sidd = cac_tdp_table->low_cac_leakage / 100 * 256;
360
361 pi->smc_powertune_table.BapmVddCBaseLeakageHiSidd = cpu_to_be16(hi_sidd);
362 pi->smc_powertune_table.BapmVddCBaseLeakageLoSidd = cpu_to_be16(lo_sidd);
363
364 return 0;
365 }
366
367 static int ci_populate_bapm_parameters_in_dpm_table(struct radeon_device *rdev)
368 {
369 struct ci_power_info *pi = ci_get_pi(rdev);
370 const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
371 SMU7_Discrete_DpmTable *dpm_table = &pi->smc_state_table;
372 struct radeon_cac_tdp_table *cac_tdp_table =
373 rdev->pm.dpm.dyn_state.cac_tdp_table;
374 struct radeon_ppm_table *ppm = rdev->pm.dpm.dyn_state.ppm_table;
375 int i, j, k;
376 const u16 *def1;
377 const u16 *def2;
378
379 dpm_table->DefaultTdp = cac_tdp_table->tdp * 256;
380 dpm_table->TargetTdp = cac_tdp_table->configurable_tdp * 256;
381
382 dpm_table->DTETjOffset = (u8)pi->dte_tj_offset;
383 dpm_table->GpuTjMax =
384 (u8)(pi->thermal_temp_setting.temperature_high / 1000);
385 dpm_table->GpuTjHyst = 8;
386
387 dpm_table->DTEAmbientTempBase = pt_defaults->dte_ambient_temp_base;
388
389 if (ppm) {
390 dpm_table->PPM_PkgPwrLimit = cpu_to_be16((u16)ppm->dgpu_tdp * 256 / 1000);
391 dpm_table->PPM_TemperatureLimit = cpu_to_be16((u16)ppm->tj_max * 256);
392 } else {
393 dpm_table->PPM_PkgPwrLimit = cpu_to_be16(0);
394 dpm_table->PPM_TemperatureLimit = cpu_to_be16(0);
395 }
396
397 dpm_table->BAPM_TEMP_GRADIENT = cpu_to_be32(pt_defaults->bapm_temp_gradient);
398 def1 = pt_defaults->bapmti_r;
399 def2 = pt_defaults->bapmti_rc;
400
401 for (i = 0; i < SMU7_DTE_ITERATIONS; i++) {
402 for (j = 0; j < SMU7_DTE_SOURCES; j++) {
403 for (k = 0; k < SMU7_DTE_SINKS; k++) {
404 dpm_table->BAPMTI_R[i][j][k] = cpu_to_be16(*def1);
405 dpm_table->BAPMTI_RC[i][j][k] = cpu_to_be16(*def2);
406 def1++;
407 def2++;
408 }
409 }
410 }
411
412 return 0;
413 }
414
415 static int ci_populate_pm_base(struct radeon_device *rdev)
416 {
417 struct ci_power_info *pi = ci_get_pi(rdev);
418 u32 pm_fuse_table_offset;
419 int ret;
420
421 if (pi->caps_power_containment) {
422 ret = ci_read_smc_sram_dword(rdev,
423 SMU7_FIRMWARE_HEADER_LOCATION +
424 offsetof(SMU7_Firmware_Header, PmFuseTable),
425 &pm_fuse_table_offset, pi->sram_end);
426 if (ret)
427 return ret;
428 ret = ci_populate_bapm_vddc_vid_sidd(rdev);
429 if (ret)
430 return ret;
431 ret = ci_populate_vddc_vid(rdev);
432 if (ret)
433 return ret;
434 ret = ci_populate_svi_load_line(rdev);
435 if (ret)
436 return ret;
437 ret = ci_populate_tdc_limit(rdev);
438 if (ret)
439 return ret;
440 ret = ci_populate_dw8(rdev);
441 if (ret)
442 return ret;
443 ret = ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(rdev);
444 if (ret)
445 return ret;
446 ret = ci_populate_bapm_vddc_base_leakage_sidd(rdev);
447 if (ret)
448 return ret;
449 ret = ci_copy_bytes_to_smc(rdev, pm_fuse_table_offset,
450 (u8 *)&pi->smc_powertune_table,
451 sizeof(SMU7_Discrete_PmFuses), pi->sram_end);
452 if (ret)
453 return ret;
454 }
455
456 return 0;
457 }
458
459 static void ci_do_enable_didt(struct radeon_device *rdev, const bool enable)
460 {
461 struct ci_power_info *pi = ci_get_pi(rdev);
462 u32 data;
463
464 if (pi->caps_sq_ramping) {
465 data = RREG32_DIDT(DIDT_SQ_CTRL0);
466 if (enable)
467 data |= DIDT_CTRL_EN;
468 else
469 data &= ~DIDT_CTRL_EN;
470 WREG32_DIDT(DIDT_SQ_CTRL0, data);
471 }
472
473 if (pi->caps_db_ramping) {
474 data = RREG32_DIDT(DIDT_DB_CTRL0);
475 if (enable)
476 data |= DIDT_CTRL_EN;
477 else
478 data &= ~DIDT_CTRL_EN;
479 WREG32_DIDT(DIDT_DB_CTRL0, data);
480 }
481
482 if (pi->caps_td_ramping) {
483 data = RREG32_DIDT(DIDT_TD_CTRL0);
484 if (enable)
485 data |= DIDT_CTRL_EN;
486 else
487 data &= ~DIDT_CTRL_EN;
488 WREG32_DIDT(DIDT_TD_CTRL0, data);
489 }
490
491 if (pi->caps_tcp_ramping) {
492 data = RREG32_DIDT(DIDT_TCP_CTRL0);
493 if (enable)
494 data |= DIDT_CTRL_EN;
495 else
496 data &= ~DIDT_CTRL_EN;
497 WREG32_DIDT(DIDT_TCP_CTRL0, data);
498 }
499 }
500
501 static int ci_program_pt_config_registers(struct radeon_device *rdev,
502 const struct ci_pt_config_reg *cac_config_regs)
503 {
504 const struct ci_pt_config_reg *config_regs = cac_config_regs;
505 u32 data;
506 u32 cache = 0;
507
508 if (config_regs == NULL)
509 return -EINVAL;
510
511 while (config_regs->offset != 0xFFFFFFFF) {
512 if (config_regs->type == CISLANDS_CONFIGREG_CACHE) {
513 cache |= ((config_regs->value << config_regs->shift) & config_regs->mask);
514 } else {
515 switch (config_regs->type) {
516 case CISLANDS_CONFIGREG_SMC_IND:
517 data = RREG32_SMC(config_regs->offset);
518 break;
519 case CISLANDS_CONFIGREG_DIDT_IND:
520 data = RREG32_DIDT(config_regs->offset);
521 break;
522 default:
523 data = RREG32(config_regs->offset << 2);
524 break;
525 }
526
527 data &= ~config_regs->mask;
528 data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
529 data |= cache;
530
531 switch (config_regs->type) {
532 case CISLANDS_CONFIGREG_SMC_IND:
533 WREG32_SMC(config_regs->offset, data);
534 break;
535 case CISLANDS_CONFIGREG_DIDT_IND:
536 WREG32_DIDT(config_regs->offset, data);
537 break;
538 default:
539 WREG32(config_regs->offset << 2, data);
540 break;
541 }
542 cache = 0;
543 }
544 config_regs++;
545 }
546 return 0;
547 }
548
549 static int ci_enable_didt(struct radeon_device *rdev, bool enable)
550 {
551 struct ci_power_info *pi = ci_get_pi(rdev);
552 int ret;
553
554 if (pi->caps_sq_ramping || pi->caps_db_ramping ||
555 pi->caps_td_ramping || pi->caps_tcp_ramping) {
556 cik_enter_rlc_safe_mode(rdev);
557
558 if (enable) {
559 ret = ci_program_pt_config_registers(rdev, didt_config_ci);
560 if (ret) {
561 cik_exit_rlc_safe_mode(rdev);
562 return ret;
563 }
564 }
565
566 ci_do_enable_didt(rdev, enable);
567
568 cik_exit_rlc_safe_mode(rdev);
569 }
570
571 return 0;
572 }
573
574 static int ci_enable_power_containment(struct radeon_device *rdev, bool enable)
575 {
576 struct ci_power_info *pi = ci_get_pi(rdev);
577 PPSMC_Result smc_result;
578 int ret = 0;
579
580 if (enable) {
581 pi->power_containment_features = 0;
582 if (pi->caps_power_containment) {
583 if (pi->enable_bapm_feature) {
584 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableDTE);
585 if (smc_result != PPSMC_Result_OK)
586 ret = -EINVAL;
587 else
588 pi->power_containment_features |= POWERCONTAINMENT_FEATURE_BAPM;
589 }
590
591 if (pi->enable_tdc_limit_feature) {
592 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_TDCLimitEnable);
593 if (smc_result != PPSMC_Result_OK)
594 ret = -EINVAL;
595 else
596 pi->power_containment_features |= POWERCONTAINMENT_FEATURE_TDCLimit;
597 }
598
599 if (pi->enable_pkg_pwr_tracking_feature) {
600 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PkgPwrLimitEnable);
601 if (smc_result != PPSMC_Result_OK) {
602 ret = -EINVAL;
603 } else {
604 struct radeon_cac_tdp_table *cac_tdp_table =
605 rdev->pm.dpm.dyn_state.cac_tdp_table;
606 u32 default_pwr_limit =
607 (u32)(cac_tdp_table->maximum_power_delivery_limit * 256);
608
609 pi->power_containment_features |= POWERCONTAINMENT_FEATURE_PkgPwrLimit;
610
611 ci_set_power_limit(rdev, default_pwr_limit);
612 }
613 }
614 }
615 } else {
616 if (pi->caps_power_containment && pi->power_containment_features) {
617 if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_TDCLimit)
618 ci_send_msg_to_smc(rdev, PPSMC_MSG_TDCLimitDisable);
619
620 if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_BAPM)
621 ci_send_msg_to_smc(rdev, PPSMC_MSG_DisableDTE);
622
623 if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit)
624 ci_send_msg_to_smc(rdev, PPSMC_MSG_PkgPwrLimitDisable);
625 pi->power_containment_features = 0;
626 }
627 }
628
629 return ret;
630 }
631
632 static int ci_enable_smc_cac(struct radeon_device *rdev, bool enable)
633 {
634 struct ci_power_info *pi = ci_get_pi(rdev);
635 PPSMC_Result smc_result;
636 int ret = 0;
637
638 if (pi->caps_cac) {
639 if (enable) {
640 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableCac);
641 if (smc_result != PPSMC_Result_OK) {
642 ret = -EINVAL;
643 pi->cac_enabled = false;
644 } else {
645 pi->cac_enabled = true;
646 }
647 } else if (pi->cac_enabled) {
648 ci_send_msg_to_smc(rdev, PPSMC_MSG_DisableCac);
649 pi->cac_enabled = false;
650 }
651 }
652
653 return ret;
654 }
655
656 static int ci_power_control_set_level(struct radeon_device *rdev)
657 {
658 struct ci_power_info *pi = ci_get_pi(rdev);
659 struct radeon_cac_tdp_table *cac_tdp_table =
660 rdev->pm.dpm.dyn_state.cac_tdp_table;
661 s32 adjust_percent;
662 s32 target_tdp;
663 int ret = 0;
664 bool adjust_polarity = false; /* ??? */
665
666 if (pi->caps_power_containment &&
667 (pi->power_containment_features & POWERCONTAINMENT_FEATURE_BAPM)) {
668 adjust_percent = adjust_polarity ?
669 rdev->pm.dpm.tdp_adjustment : (-1 * rdev->pm.dpm.tdp_adjustment);
670 target_tdp = ((100 + adjust_percent) *
671 (s32)cac_tdp_table->configurable_tdp) / 100;
672 target_tdp *= 256;
673
674 ret = ci_set_overdrive_target_tdp(rdev, (u32)target_tdp);
675 }
676
677 return ret;
678 }
679
680 static void ci_dpm_powergate_uvd(struct radeon_device *rdev, bool gate)
681 {
682 ci_update_uvd_dpm(rdev, gate);
683 }
684
685 bool ci_dpm_vblank_too_short(struct radeon_device *rdev)
686 {
687 struct ci_power_info *pi = ci_get_pi(rdev);
688 u32 vblank_time = r600_dpm_get_vblank_time(rdev);
689 u32 switch_limit = pi->mem_gddr5 ? 450 : 300;
690
691 if (vblank_time < switch_limit)
692 return true;
693 else
694 return false;
695
696 }
697
698 static void ci_apply_state_adjust_rules(struct radeon_device *rdev,
699 struct radeon_ps *rps)
700 {
701 struct ci_ps *ps = ci_get_ps(rps);
702 struct ci_power_info *pi = ci_get_pi(rdev);
703 struct radeon_clock_and_voltage_limits *max_limits;
704 bool disable_mclk_switching;
705 u32 sclk, mclk;
706 int i;
707
708 if ((rdev->pm.dpm.new_active_crtc_count > 1) ||
709 ci_dpm_vblank_too_short(rdev))
710 disable_mclk_switching = true;
711 else
712 disable_mclk_switching = false;
713
714 if ((rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY)
715 pi->battery_state = true;
716 else
717 pi->battery_state = false;
718
719 if (rdev->pm.dpm.ac_power)
720 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
721 else
722 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
723
724 if (rdev->pm.dpm.ac_power == false) {
725 for (i = 0; i < ps->performance_level_count; i++) {
726 if (ps->performance_levels[i].mclk > max_limits->mclk)
727 ps->performance_levels[i].mclk = max_limits->mclk;
728 if (ps->performance_levels[i].sclk > max_limits->sclk)
729 ps->performance_levels[i].sclk = max_limits->sclk;
730 }
731 }
732
733 /* XXX validate the min clocks required for display */
734
735 if (disable_mclk_switching) {
736 mclk = ps->performance_levels[ps->performance_level_count - 1].mclk;
737 sclk = ps->performance_levels[0].sclk;
738 } else {
739 mclk = ps->performance_levels[0].mclk;
740 sclk = ps->performance_levels[0].sclk;
741 }
742
743 ps->performance_levels[0].sclk = sclk;
744 ps->performance_levels[0].mclk = mclk;
745
746 if (ps->performance_levels[1].sclk < ps->performance_levels[0].sclk)
747 ps->performance_levels[1].sclk = ps->performance_levels[0].sclk;
748
749 if (disable_mclk_switching) {
750 if (ps->performance_levels[0].mclk < ps->performance_levels[1].mclk)
751 ps->performance_levels[0].mclk = ps->performance_levels[1].mclk;
752 } else {
753 if (ps->performance_levels[1].mclk < ps->performance_levels[0].mclk)
754 ps->performance_levels[1].mclk = ps->performance_levels[0].mclk;
755 }
756 }
757
758 static int ci_set_thermal_temperature_range(struct radeon_device *rdev,
759 int min_temp, int max_temp)
760 {
761 int low_temp = 0 * 1000;
762 int high_temp = 255 * 1000;
763 u32 tmp;
764
765 if (low_temp < min_temp)
766 low_temp = min_temp;
767 if (high_temp > max_temp)
768 high_temp = max_temp;
769 if (high_temp < low_temp) {
770 DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp);
771 return -EINVAL;
772 }
773
774 tmp = RREG32_SMC(CG_THERMAL_INT);
775 tmp &= ~(CI_DIG_THERM_INTH_MASK | CI_DIG_THERM_INTL_MASK);
776 tmp |= CI_DIG_THERM_INTH(high_temp / 1000) |
777 CI_DIG_THERM_INTL(low_temp / 1000);
778 WREG32_SMC(CG_THERMAL_INT, tmp);
779
780 #if 0
781 /* XXX: need to figure out how to handle this properly */
782 tmp = RREG32_SMC(CG_THERMAL_CTRL);
783 tmp &= DIG_THERM_DPM_MASK;
784 tmp |= DIG_THERM_DPM(high_temp / 1000);
785 WREG32_SMC(CG_THERMAL_CTRL, tmp);
786 #endif
787
788 return 0;
789 }
790
791 #if 0
792 static int ci_read_smc_soft_register(struct radeon_device *rdev,
793 u16 reg_offset, u32 *value)
794 {
795 struct ci_power_info *pi = ci_get_pi(rdev);
796
797 return ci_read_smc_sram_dword(rdev,
798 pi->soft_regs_start + reg_offset,
799 value, pi->sram_end);
800 }
801 #endif
802
803 static int ci_write_smc_soft_register(struct radeon_device *rdev,
804 u16 reg_offset, u32 value)
805 {
806 struct ci_power_info *pi = ci_get_pi(rdev);
807
808 return ci_write_smc_sram_dword(rdev,
809 pi->soft_regs_start + reg_offset,
810 value, pi->sram_end);
811 }
812
813 static void ci_init_fps_limits(struct radeon_device *rdev)
814 {
815 struct ci_power_info *pi = ci_get_pi(rdev);
816 SMU7_Discrete_DpmTable *table = &pi->smc_state_table;
817
818 if (pi->caps_fps) {
819 u16 tmp;
820
821 tmp = 45;
822 table->FpsHighT = cpu_to_be16(tmp);
823
824 tmp = 30;
825 table->FpsLowT = cpu_to_be16(tmp);
826 }
827 }
828
829 static int ci_update_sclk_t(struct radeon_device *rdev)
830 {
831 struct ci_power_info *pi = ci_get_pi(rdev);
832 int ret = 0;
833 u32 low_sclk_interrupt_t = 0;
834
835 if (pi->caps_sclk_throttle_low_notification) {
836 low_sclk_interrupt_t = cpu_to_be32(pi->low_sclk_interrupt_t);
837
838 ret = ci_copy_bytes_to_smc(rdev,
839 pi->dpm_table_start +
840 offsetof(SMU7_Discrete_DpmTable, LowSclkInterruptT),
841 (u8 *)&low_sclk_interrupt_t,
842 sizeof(u32), pi->sram_end);
843
844 }
845
846 return ret;
847 }
848
849 static void ci_get_leakage_voltages(struct radeon_device *rdev)
850 {
851 struct ci_power_info *pi = ci_get_pi(rdev);
852 u16 leakage_id, virtual_voltage_id;
853 u16 vddc, vddci;
854 int i;
855
856 pi->vddc_leakage.count = 0;
857 pi->vddci_leakage.count = 0;
858
859 if (radeon_atom_get_leakage_id_from_vbios(rdev, &leakage_id) == 0) {
860 for (i = 0; i < CISLANDS_MAX_LEAKAGE_COUNT; i++) {
861 virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
862 if (radeon_atom_get_leakage_vddc_based_on_leakage_params(rdev, &vddc, &vddci,
863 virtual_voltage_id,
864 leakage_id) == 0) {
865 if (vddc != 0 && vddc != virtual_voltage_id) {
866 pi->vddc_leakage.actual_voltage[pi->vddc_leakage.count] = vddc;
867 pi->vddc_leakage.leakage_id[pi->vddc_leakage.count] = virtual_voltage_id;
868 pi->vddc_leakage.count++;
869 }
870 if (vddci != 0 && vddci != virtual_voltage_id) {
871 pi->vddci_leakage.actual_voltage[pi->vddci_leakage.count] = vddci;
872 pi->vddci_leakage.leakage_id[pi->vddci_leakage.count] = virtual_voltage_id;
873 pi->vddci_leakage.count++;
874 }
875 }
876 }
877 }
878 }
879
880 static void ci_set_dpm_event_sources(struct radeon_device *rdev, u32 sources)
881 {
882 struct ci_power_info *pi = ci_get_pi(rdev);
883 bool want_thermal_protection;
884 enum radeon_dpm_event_src dpm_event_src;
885 u32 tmp;
886
887 switch (sources) {
888 case 0:
889 default:
890 want_thermal_protection = false;
891 break;
892 case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL):
893 want_thermal_protection = true;
894 dpm_event_src = RADEON_DPM_EVENT_SRC_DIGITAL;
895 break;
896 case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL):
897 want_thermal_protection = true;
898 dpm_event_src = RADEON_DPM_EVENT_SRC_EXTERNAL;
899 break;
900 case ((1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL) |
901 (1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL)):
902 want_thermal_protection = true;
903 dpm_event_src = RADEON_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL;
904 break;
905 }
906
907 if (want_thermal_protection) {
908 #if 0
909 /* XXX: need to figure out how to handle this properly */
910 tmp = RREG32_SMC(CG_THERMAL_CTRL);
911 tmp &= DPM_EVENT_SRC_MASK;
912 tmp |= DPM_EVENT_SRC(dpm_event_src);
913 WREG32_SMC(CG_THERMAL_CTRL, tmp);
914 #endif
915
916 tmp = RREG32_SMC(GENERAL_PWRMGT);
917 if (pi->thermal_protection)
918 tmp &= ~THERMAL_PROTECTION_DIS;
919 else
920 tmp |= THERMAL_PROTECTION_DIS;
921 WREG32_SMC(GENERAL_PWRMGT, tmp);
922 } else {
923 tmp = RREG32_SMC(GENERAL_PWRMGT);
924 tmp |= THERMAL_PROTECTION_DIS;
925 WREG32_SMC(GENERAL_PWRMGT, tmp);
926 }
927 }
928
929 static void ci_enable_auto_throttle_source(struct radeon_device *rdev,
930 enum radeon_dpm_auto_throttle_src source,
931 bool enable)
932 {
933 struct ci_power_info *pi = ci_get_pi(rdev);
934
935 if (enable) {
936 if (!(pi->active_auto_throttle_sources & (1 << source))) {
937 pi->active_auto_throttle_sources |= 1 << source;
938 ci_set_dpm_event_sources(rdev, pi->active_auto_throttle_sources);
939 }
940 } else {
941 if (pi->active_auto_throttle_sources & (1 << source)) {
942 pi->active_auto_throttle_sources &= ~(1 << source);
943 ci_set_dpm_event_sources(rdev, pi->active_auto_throttle_sources);
944 }
945 }
946 }
947
948 static void ci_enable_vr_hot_gpio_interrupt(struct radeon_device *rdev)
949 {
950 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REGULATOR_HOT)
951 ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableVRHotGPIOInterrupt);
952 }
953
954 static int ci_unfreeze_sclk_mclk_dpm(struct radeon_device *rdev)
955 {
956 struct ci_power_info *pi = ci_get_pi(rdev);
957 PPSMC_Result smc_result;
958
959 if (!pi->need_update_smu7_dpm_table)
960 return 0;
961
962 if ((!pi->sclk_dpm_key_disabled) &&
963 (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) {
964 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_SCLKDPM_UnfreezeLevel);
965 if (smc_result != PPSMC_Result_OK)
966 return -EINVAL;
967 }
968
969 if ((!pi->mclk_dpm_key_disabled) &&
970 (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
971 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_UnfreezeLevel);
972 if (smc_result != PPSMC_Result_OK)
973 return -EINVAL;
974 }
975
976 pi->need_update_smu7_dpm_table = 0;
977 return 0;
978 }
979
980 static int ci_enable_sclk_mclk_dpm(struct radeon_device *rdev, bool enable)
981 {
982 struct ci_power_info *pi = ci_get_pi(rdev);
983 PPSMC_Result smc_result;
984
985 if (enable) {
986 if (!pi->sclk_dpm_key_disabled) {
987 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_DPM_Enable);
988 if (smc_result != PPSMC_Result_OK)
989 return -EINVAL;
990 }
991
992 if (!pi->mclk_dpm_key_disabled) {
993 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_Enable);
994 if (smc_result != PPSMC_Result_OK)
995 return -EINVAL;
996
997 WREG32_P(MC_SEQ_CNTL_3, CAC_EN, ~CAC_EN);
998
999 WREG32_SMC(LCAC_MC0_CNTL, 0x05);
1000 WREG32_SMC(LCAC_MC1_CNTL, 0x05);
1001 WREG32_SMC(LCAC_CPL_CNTL, 0x100005);
1002
1003 udelay(10);
1004
1005 WREG32_SMC(LCAC_MC0_CNTL, 0x400005);
1006 WREG32_SMC(LCAC_MC1_CNTL, 0x400005);
1007 WREG32_SMC(LCAC_CPL_CNTL, 0x500005);
1008 }
1009 } else {
1010 if (!pi->sclk_dpm_key_disabled) {
1011 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_DPM_Disable);
1012 if (smc_result != PPSMC_Result_OK)
1013 return -EINVAL;
1014 }
1015
1016 if (!pi->mclk_dpm_key_disabled) {
1017 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_Disable);
1018 if (smc_result != PPSMC_Result_OK)
1019 return -EINVAL;
1020 }
1021 }
1022
1023 return 0;
1024 }
1025
1026 static int ci_start_dpm(struct radeon_device *rdev)
1027 {
1028 struct ci_power_info *pi = ci_get_pi(rdev);
1029 PPSMC_Result smc_result;
1030 int ret;
1031 u32 tmp;
1032
1033 tmp = RREG32_SMC(GENERAL_PWRMGT);
1034 tmp |= GLOBAL_PWRMGT_EN;
1035 WREG32_SMC(GENERAL_PWRMGT, tmp);
1036
1037 tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
1038 tmp |= DYNAMIC_PM_EN;
1039 WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
1040
1041 ci_write_smc_soft_register(rdev, offsetof(SMU7_SoftRegisters, VoltageChangeTimeout), 0x1000);
1042
1043 WREG32_P(BIF_LNCNT_RESET, 0, ~RESET_LNCNT_EN);
1044
1045 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_Voltage_Cntl_Enable);
1046 if (smc_result != PPSMC_Result_OK)
1047 return -EINVAL;
1048
1049 ret = ci_enable_sclk_mclk_dpm(rdev, true);
1050 if (ret)
1051 return ret;
1052
1053 if (!pi->pcie_dpm_key_disabled) {
1054 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PCIeDPM_Enable);
1055 if (smc_result != PPSMC_Result_OK)
1056 return -EINVAL;
1057 }
1058
1059 return 0;
1060 }
1061
1062 static int ci_freeze_sclk_mclk_dpm(struct radeon_device *rdev)
1063 {
1064 struct ci_power_info *pi = ci_get_pi(rdev);
1065 PPSMC_Result smc_result;
1066
1067 if (!pi->need_update_smu7_dpm_table)
1068 return 0;
1069
1070 if ((!pi->sclk_dpm_key_disabled) &&
1071 (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) {
1072 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_SCLKDPM_FreezeLevel);
1073 if (smc_result != PPSMC_Result_OK)
1074 return -EINVAL;
1075 }
1076
1077 if ((!pi->mclk_dpm_key_disabled) &&
1078 (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
1079 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_FreezeLevel);
1080 if (smc_result != PPSMC_Result_OK)
1081 return -EINVAL;
1082 }
1083
1084 return 0;
1085 }
1086
1087 static int ci_stop_dpm(struct radeon_device *rdev)
1088 {
1089 struct ci_power_info *pi = ci_get_pi(rdev);
1090 PPSMC_Result smc_result;
1091 int ret;
1092 u32 tmp;
1093
1094 tmp = RREG32_SMC(GENERAL_PWRMGT);
1095 tmp &= ~GLOBAL_PWRMGT_EN;
1096 WREG32_SMC(GENERAL_PWRMGT, tmp);
1097
1098 tmp = RREG32(SCLK_PWRMGT_CNTL);
1099 tmp &= ~DYNAMIC_PM_EN;
1100 WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
1101
1102 if (!pi->pcie_dpm_key_disabled) {
1103 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PCIeDPM_Disable);
1104 if (smc_result != PPSMC_Result_OK)
1105 return -EINVAL;
1106 }
1107
1108 ret = ci_enable_sclk_mclk_dpm(rdev, false);
1109 if (ret)
1110 return ret;
1111
1112 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_Voltage_Cntl_Disable);
1113 if (smc_result != PPSMC_Result_OK)
1114 return -EINVAL;
1115
1116 return 0;
1117 }
1118
1119 static void ci_enable_sclk_control(struct radeon_device *rdev, bool enable)
1120 {
1121 u32 tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
1122
1123 if (enable)
1124 tmp &= ~SCLK_PWRMGT_OFF;
1125 else
1126 tmp |= SCLK_PWRMGT_OFF;
1127 WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
1128 }
1129
1130 #if 0
1131 static int ci_notify_hw_of_power_source(struct radeon_device *rdev,
1132 bool ac_power)
1133 {
1134 struct ci_power_info *pi = ci_get_pi(rdev);
1135 struct radeon_cac_tdp_table *cac_tdp_table =
1136 rdev->pm.dpm.dyn_state.cac_tdp_table;
1137 u32 power_limit;
1138
1139 if (ac_power)
1140 power_limit = (u32)(cac_tdp_table->maximum_power_delivery_limit * 256);
1141 else
1142 power_limit = (u32)(cac_tdp_table->battery_power_limit * 256);
1143
1144 ci_set_power_limit(rdev, power_limit);
1145
1146 if (pi->caps_automatic_dc_transition) {
1147 if (ac_power)
1148 ci_send_msg_to_smc(rdev, PPSMC_MSG_RunningOnAC);
1149 else
1150 ci_send_msg_to_smc(rdev, PPSMC_MSG_Remove_DC_Clamp);
1151 }
1152
1153 return 0;
1154 }
1155 #endif
1156
1157 static PPSMC_Result ci_send_msg_to_smc_with_parameter(struct radeon_device *rdev,
1158 PPSMC_Msg msg, u32 parameter)
1159 {
1160 WREG32(SMC_MSG_ARG_0, parameter);
1161 return ci_send_msg_to_smc(rdev, msg);
1162 }
1163
1164 static PPSMC_Result ci_send_msg_to_smc_return_parameter(struct radeon_device *rdev,
1165 PPSMC_Msg msg, u32 *parameter)
1166 {
1167 PPSMC_Result smc_result;
1168
1169 smc_result = ci_send_msg_to_smc(rdev, msg);
1170
1171 if ((smc_result == PPSMC_Result_OK) && parameter)
1172 *parameter = RREG32(SMC_MSG_ARG_0);
1173
1174 return smc_result;
1175 }
1176
1177 static int ci_dpm_force_state_sclk(struct radeon_device *rdev, u32 n)
1178 {
1179 struct ci_power_info *pi = ci_get_pi(rdev);
1180
1181 if (!pi->sclk_dpm_key_disabled) {
1182 PPSMC_Result smc_result =
1183 ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, n);
1184 if (smc_result != PPSMC_Result_OK)
1185 return -EINVAL;
1186 }
1187
1188 return 0;
1189 }
1190
1191 static int ci_dpm_force_state_mclk(struct radeon_device *rdev, u32 n)
1192 {
1193 struct ci_power_info *pi = ci_get_pi(rdev);
1194
1195 if (!pi->mclk_dpm_key_disabled) {
1196 PPSMC_Result smc_result =
1197 ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_MCLKDPM_ForceState, n);
1198 if (smc_result != PPSMC_Result_OK)
1199 return -EINVAL;
1200 }
1201
1202 return 0;
1203 }
1204
1205 static int ci_dpm_force_state_pcie(struct radeon_device *rdev, u32 n)
1206 {
1207 struct ci_power_info *pi = ci_get_pi(rdev);
1208
1209 if (!pi->pcie_dpm_key_disabled) {
1210 PPSMC_Result smc_result =
1211 ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_PCIeDPM_ForceLevel, n);
1212 if (smc_result != PPSMC_Result_OK)
1213 return -EINVAL;
1214 }
1215
1216 return 0;
1217 }
1218
1219 static int ci_set_power_limit(struct radeon_device *rdev, u32 n)
1220 {
1221 struct ci_power_info *pi = ci_get_pi(rdev);
1222
1223 if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit) {
1224 PPSMC_Result smc_result =
1225 ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_PkgPwrSetLimit, n);
1226 if (smc_result != PPSMC_Result_OK)
1227 return -EINVAL;
1228 }
1229
1230 return 0;
1231 }
1232
1233 static int ci_set_overdrive_target_tdp(struct radeon_device *rdev,
1234 u32 target_tdp)
1235 {
1236 PPSMC_Result smc_result =
1237 ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_OverDriveSetTargetTdp, target_tdp);
1238 if (smc_result != PPSMC_Result_OK)
1239 return -EINVAL;
1240 return 0;
1241 }
1242
1243 static int ci_set_boot_state(struct radeon_device *rdev)
1244 {
1245 return ci_enable_sclk_mclk_dpm(rdev, false);
1246 }
1247
1248 static u32 ci_get_average_sclk_freq(struct radeon_device *rdev)
1249 {
1250 u32 sclk_freq;
1251 PPSMC_Result smc_result =
1252 ci_send_msg_to_smc_return_parameter(rdev,
1253 PPSMC_MSG_API_GetSclkFrequency,
1254 &sclk_freq);
1255 if (smc_result != PPSMC_Result_OK)
1256 sclk_freq = 0;
1257
1258 return sclk_freq;
1259 }
1260
1261 static u32 ci_get_average_mclk_freq(struct radeon_device *rdev)
1262 {
1263 u32 mclk_freq;
1264 PPSMC_Result smc_result =
1265 ci_send_msg_to_smc_return_parameter(rdev,
1266 PPSMC_MSG_API_GetMclkFrequency,
1267 &mclk_freq);
1268 if (smc_result != PPSMC_Result_OK)
1269 mclk_freq = 0;
1270
1271 return mclk_freq;
1272 }
1273
1274 static void ci_dpm_start_smc(struct radeon_device *rdev)
1275 {
1276 int i;
1277
1278 ci_program_jump_on_start(rdev);
1279 ci_start_smc_clock(rdev);
1280 ci_start_smc(rdev);
1281 for (i = 0; i < rdev->usec_timeout; i++) {
1282 if (RREG32_SMC(FIRMWARE_FLAGS) & INTERRUPTS_ENABLED)
1283 break;
1284 }
1285 }
1286
1287 static void ci_dpm_stop_smc(struct radeon_device *rdev)
1288 {
1289 ci_reset_smc(rdev);
1290 ci_stop_smc_clock(rdev);
1291 }
1292
1293 static int ci_process_firmware_header(struct radeon_device *rdev)
1294 {
1295 struct ci_power_info *pi = ci_get_pi(rdev);
1296 u32 tmp;
1297 int ret;
1298
1299 ret = ci_read_smc_sram_dword(rdev,
1300 SMU7_FIRMWARE_HEADER_LOCATION +
1301 offsetof(SMU7_Firmware_Header, DpmTable),
1302 &tmp, pi->sram_end);
1303 if (ret)
1304 return ret;
1305
1306 pi->dpm_table_start = tmp;
1307
1308 ret = ci_read_smc_sram_dword(rdev,
1309 SMU7_FIRMWARE_HEADER_LOCATION +
1310 offsetof(SMU7_Firmware_Header, SoftRegisters),
1311 &tmp, pi->sram_end);
1312 if (ret)
1313 return ret;
1314
1315 pi->soft_regs_start = tmp;
1316
1317 ret = ci_read_smc_sram_dword(rdev,
1318 SMU7_FIRMWARE_HEADER_LOCATION +
1319 offsetof(SMU7_Firmware_Header, mcRegisterTable),
1320 &tmp, pi->sram_end);
1321 if (ret)
1322 return ret;
1323
1324 pi->mc_reg_table_start = tmp;
1325
1326 ret = ci_read_smc_sram_dword(rdev,
1327 SMU7_FIRMWARE_HEADER_LOCATION +
1328 offsetof(SMU7_Firmware_Header, FanTable),
1329 &tmp, pi->sram_end);
1330 if (ret)
1331 return ret;
1332
1333 pi->fan_table_start = tmp;
1334
1335 ret = ci_read_smc_sram_dword(rdev,
1336 SMU7_FIRMWARE_HEADER_LOCATION +
1337 offsetof(SMU7_Firmware_Header, mcArbDramTimingTable),
1338 &tmp, pi->sram_end);
1339 if (ret)
1340 return ret;
1341
1342 pi->arb_table_start = tmp;
1343
1344 return 0;
1345 }
1346
1347 static void ci_read_clock_registers(struct radeon_device *rdev)
1348 {
1349 struct ci_power_info *pi = ci_get_pi(rdev);
1350
1351 pi->clock_registers.cg_spll_func_cntl =
1352 RREG32_SMC(CG_SPLL_FUNC_CNTL);
1353 pi->clock_registers.cg_spll_func_cntl_2 =
1354 RREG32_SMC(CG_SPLL_FUNC_CNTL_2);
1355 pi->clock_registers.cg_spll_func_cntl_3 =
1356 RREG32_SMC(CG_SPLL_FUNC_CNTL_3);
1357 pi->clock_registers.cg_spll_func_cntl_4 =
1358 RREG32_SMC(CG_SPLL_FUNC_CNTL_4);
1359 pi->clock_registers.cg_spll_spread_spectrum =
1360 RREG32_SMC(CG_SPLL_SPREAD_SPECTRUM);
1361 pi->clock_registers.cg_spll_spread_spectrum_2 =
1362 RREG32_SMC(CG_SPLL_SPREAD_SPECTRUM_2);
1363 pi->clock_registers.dll_cntl = RREG32(DLL_CNTL);
1364 pi->clock_registers.mclk_pwrmgt_cntl = RREG32(MCLK_PWRMGT_CNTL);
1365 pi->clock_registers.mpll_ad_func_cntl = RREG32(MPLL_AD_FUNC_CNTL);
1366 pi->clock_registers.mpll_dq_func_cntl = RREG32(MPLL_DQ_FUNC_CNTL);
1367 pi->clock_registers.mpll_func_cntl = RREG32(MPLL_FUNC_CNTL);
1368 pi->clock_registers.mpll_func_cntl_1 = RREG32(MPLL_FUNC_CNTL_1);
1369 pi->clock_registers.mpll_func_cntl_2 = RREG32(MPLL_FUNC_CNTL_2);
1370 pi->clock_registers.mpll_ss1 = RREG32(MPLL_SS1);
1371 pi->clock_registers.mpll_ss2 = RREG32(MPLL_SS2);
1372 }
1373
1374 static void ci_init_sclk_t(struct radeon_device *rdev)
1375 {
1376 struct ci_power_info *pi = ci_get_pi(rdev);
1377
1378 pi->low_sclk_interrupt_t = 0;
1379 }
1380
1381 static void ci_enable_thermal_protection(struct radeon_device *rdev,
1382 bool enable)
1383 {
1384 u32 tmp = RREG32_SMC(GENERAL_PWRMGT);
1385
1386 if (enable)
1387 tmp &= ~THERMAL_PROTECTION_DIS;
1388 else
1389 tmp |= THERMAL_PROTECTION_DIS;
1390 WREG32_SMC(GENERAL_PWRMGT, tmp);
1391 }
1392
1393 static void ci_enable_acpi_power_management(struct radeon_device *rdev)
1394 {
1395 u32 tmp = RREG32_SMC(GENERAL_PWRMGT);
1396
1397 tmp |= STATIC_PM_EN;
1398
1399 WREG32_SMC(GENERAL_PWRMGT, tmp);
1400 }
1401
1402 #if 0
1403 static int ci_enter_ulp_state(struct radeon_device *rdev)
1404 {
1405
1406 WREG32(SMC_MESSAGE_0, PPSMC_MSG_SwitchToMinimumPower);
1407
1408 udelay(25000);
1409
1410 return 0;
1411 }
1412
1413 static int ci_exit_ulp_state(struct radeon_device *rdev)
1414 {
1415 int i;
1416
1417 WREG32(SMC_MESSAGE_0, PPSMC_MSG_ResumeFromMinimumPower);
1418
1419 udelay(7000);
1420
1421 for (i = 0; i < rdev->usec_timeout; i++) {
1422 if (RREG32(SMC_RESP_0) == 1)
1423 break;
1424 udelay(1000);
1425 }
1426
1427 return 0;
1428 }
1429 #endif
1430
1431 static int ci_notify_smc_display_change(struct radeon_device *rdev,
1432 bool has_display)
1433 {
1434 PPSMC_Msg msg = has_display ? PPSMC_MSG_HasDisplay : PPSMC_MSG_NoDisplay;
1435
1436 return (ci_send_msg_to_smc(rdev, msg) == PPSMC_Result_OK) ? 0 : -EINVAL;
1437 }
1438
1439 static int ci_enable_ds_master_switch(struct radeon_device *rdev,
1440 bool enable)
1441 {
1442 struct ci_power_info *pi = ci_get_pi(rdev);
1443
1444 if (enable) {
1445 if (pi->caps_sclk_ds) {
1446 if (ci_send_msg_to_smc(rdev, PPSMC_MSG_MASTER_DeepSleep_ON) != PPSMC_Result_OK)
1447 return -EINVAL;
1448 } else {
1449 if (ci_send_msg_to_smc(rdev, PPSMC_MSG_MASTER_DeepSleep_OFF) != PPSMC_Result_OK)
1450 return -EINVAL;
1451 }
1452 } else {
1453 if (pi->caps_sclk_ds) {
1454 if (ci_send_msg_to_smc(rdev, PPSMC_MSG_MASTER_DeepSleep_OFF) != PPSMC_Result_OK)
1455 return -EINVAL;
1456 }
1457 }
1458
1459 return 0;
1460 }
1461
1462 static void ci_program_display_gap(struct radeon_device *rdev)
1463 {
1464 u32 tmp = RREG32_SMC(CG_DISPLAY_GAP_CNTL);
1465 u32 pre_vbi_time_in_us;
1466 u32 frame_time_in_us;
1467 u32 ref_clock = rdev->clock.spll.reference_freq;
1468 u32 refresh_rate = r600_dpm_get_vrefresh(rdev);
1469 u32 vblank_time = r600_dpm_get_vblank_time(rdev);
1470
1471 tmp &= ~DISP_GAP_MASK;
1472 if (rdev->pm.dpm.new_active_crtc_count > 0)
1473 tmp |= DISP_GAP(R600_PM_DISPLAY_GAP_VBLANK_OR_WM);
1474 else
1475 tmp |= DISP_GAP(R600_PM_DISPLAY_GAP_IGNORE);
1476 WREG32_SMC(CG_DISPLAY_GAP_CNTL, tmp);
1477
1478 if (refresh_rate == 0)
1479 refresh_rate = 60;
1480 if (vblank_time == 0xffffffff)
1481 vblank_time = 500;
1482 frame_time_in_us = 1000000 / refresh_rate;
1483 pre_vbi_time_in_us =
1484 frame_time_in_us - 200 - vblank_time;
1485 tmp = pre_vbi_time_in_us * (ref_clock / 100);
1486
1487 WREG32_SMC(CG_DISPLAY_GAP_CNTL2, tmp);
1488 ci_write_smc_soft_register(rdev, offsetof(SMU7_SoftRegisters, PreVBlankGap), 0x64);
1489 ci_write_smc_soft_register(rdev, offsetof(SMU7_SoftRegisters, VBlankTimeout), (frame_time_in_us - pre_vbi_time_in_us));
1490
1491
1492 ci_notify_smc_display_change(rdev, (rdev->pm.dpm.new_active_crtc_count == 1));
1493
1494 }
1495
1496 static void ci_enable_spread_spectrum(struct radeon_device *rdev, bool enable)
1497 {
1498 struct ci_power_info *pi = ci_get_pi(rdev);
1499 u32 tmp;
1500
1501 if (enable) {
1502 if (pi->caps_sclk_ss_support) {
1503 tmp = RREG32_SMC(GENERAL_PWRMGT);
1504 tmp |= DYN_SPREAD_SPECTRUM_EN;
1505 WREG32_SMC(GENERAL_PWRMGT, tmp);
1506 }
1507 } else {
1508 tmp = RREG32_SMC(CG_SPLL_SPREAD_SPECTRUM);
1509 tmp &= ~SSEN;
1510 WREG32_SMC(CG_SPLL_SPREAD_SPECTRUM, tmp);
1511
1512 tmp = RREG32_SMC(GENERAL_PWRMGT);
1513 tmp &= ~DYN_SPREAD_SPECTRUM_EN;
1514 WREG32_SMC(GENERAL_PWRMGT, tmp);
1515 }
1516 }
1517
1518 static void ci_program_sstp(struct radeon_device *rdev)
1519 {
1520 WREG32_SMC(CG_SSP, (SSTU(R600_SSTU_DFLT) | SST(R600_SST_DFLT)));
1521 }
1522
1523 static void ci_enable_display_gap(struct radeon_device *rdev)
1524 {
1525 u32 tmp = RREG32_SMC(CG_DISPLAY_GAP_CNTL);
1526
1527 tmp &= ~(DISP_GAP_MASK | DISP_GAP_MCHG_MASK);
1528 tmp |= (DISP_GAP(R600_PM_DISPLAY_GAP_IGNORE) |
1529 DISP_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK));
1530
1531 WREG32_SMC(CG_DISPLAY_GAP_CNTL, tmp);
1532 }
1533
1534 static void ci_program_vc(struct radeon_device *rdev)
1535 {
1536 u32 tmp;
1537
1538 tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
1539 tmp &= ~(RESET_SCLK_CNT | RESET_BUSY_CNT);
1540 WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
1541
1542 WREG32_SMC(CG_FTV_0, CISLANDS_VRC_DFLT0);
1543 WREG32_SMC(CG_FTV_1, CISLANDS_VRC_DFLT1);
1544 WREG32_SMC(CG_FTV_2, CISLANDS_VRC_DFLT2);
1545 WREG32_SMC(CG_FTV_3, CISLANDS_VRC_DFLT3);
1546 WREG32_SMC(CG_FTV_4, CISLANDS_VRC_DFLT4);
1547 WREG32_SMC(CG_FTV_5, CISLANDS_VRC_DFLT5);
1548 WREG32_SMC(CG_FTV_6, CISLANDS_VRC_DFLT6);
1549 WREG32_SMC(CG_FTV_7, CISLANDS_VRC_DFLT7);
1550 }
1551
1552 static void ci_clear_vc(struct radeon_device *rdev)
1553 {
1554 u32 tmp;
1555
1556 tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
1557 tmp |= (RESET_SCLK_CNT | RESET_BUSY_CNT);
1558 WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
1559
1560 WREG32_SMC(CG_FTV_0, 0);
1561 WREG32_SMC(CG_FTV_1, 0);
1562 WREG32_SMC(CG_FTV_2, 0);
1563 WREG32_SMC(CG_FTV_3, 0);
1564 WREG32_SMC(CG_FTV_4, 0);
1565 WREG32_SMC(CG_FTV_5, 0);
1566 WREG32_SMC(CG_FTV_6, 0);
1567 WREG32_SMC(CG_FTV_7, 0);
1568 }
1569
1570 static int ci_upload_firmware(struct radeon_device *rdev)
1571 {
1572 struct ci_power_info *pi = ci_get_pi(rdev);
1573 int i, ret;
1574
1575 for (i = 0; i < rdev->usec_timeout; i++) {
1576 if (RREG32_SMC(RCU_UC_EVENTS) & BOOT_SEQ_DONE)
1577 break;
1578 }
1579 WREG32_SMC(SMC_SYSCON_MISC_CNTL, 1);
1580
1581 ci_stop_smc_clock(rdev);
1582 ci_reset_smc(rdev);
1583
1584 ret = ci_load_smc_ucode(rdev, pi->sram_end);
1585
1586 return ret;
1587
1588 }
1589
1590 static int ci_get_svi2_voltage_table(struct radeon_device *rdev,
1591 struct radeon_clock_voltage_dependency_table *voltage_dependency_table,
1592 struct atom_voltage_table *voltage_table)
1593 {
1594 u32 i;
1595
1596 if (voltage_dependency_table == NULL)
1597 return -EINVAL;
1598
1599 voltage_table->mask_low = 0;
1600 voltage_table->phase_delay = 0;
1601
1602 voltage_table->count = voltage_dependency_table->count;
1603 for (i = 0; i < voltage_table->count; i++) {
1604 voltage_table->entries[i].value = voltage_dependency_table->entries[i].v;
1605 voltage_table->entries[i].smio_low = 0;
1606 }
1607
1608 return 0;
1609 }
1610
1611 static int ci_construct_voltage_tables(struct radeon_device *rdev)
1612 {
1613 struct ci_power_info *pi = ci_get_pi(rdev);
1614 int ret;
1615
1616 if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
1617 ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_VDDC,
1618 VOLTAGE_OBJ_GPIO_LUT,
1619 &pi->vddc_voltage_table);
1620 if (ret)
1621 return ret;
1622 } else if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
1623 ret = ci_get_svi2_voltage_table(rdev,
1624 &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
1625 &pi->vddc_voltage_table);
1626 if (ret)
1627 return ret;
1628 }
1629
1630 if (pi->vddc_voltage_table.count > SMU7_MAX_LEVELS_VDDC)
1631 si_trim_voltage_table_to_fit_state_table(rdev, SMU7_MAX_LEVELS_VDDC,
1632 &pi->vddc_voltage_table);
1633
1634 if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
1635 ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_VDDCI,
1636 VOLTAGE_OBJ_GPIO_LUT,
1637 &pi->vddci_voltage_table);
1638 if (ret)
1639 return ret;
1640 } else if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
1641 ret = ci_get_svi2_voltage_table(rdev,
1642 &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
1643 &pi->vddci_voltage_table);
1644 if (ret)
1645 return ret;
1646 }
1647
1648 if (pi->vddci_voltage_table.count > SMU7_MAX_LEVELS_VDDCI)
1649 si_trim_voltage_table_to_fit_state_table(rdev, SMU7_MAX_LEVELS_VDDCI,
1650 &pi->vddci_voltage_table);
1651
1652 if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
1653 ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_MVDDC,
1654 VOLTAGE_OBJ_GPIO_LUT,
1655 &pi->mvdd_voltage_table);
1656 if (ret)
1657 return ret;
1658 } else if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
1659 ret = ci_get_svi2_voltage_table(rdev,
1660 &rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
1661 &pi->mvdd_voltage_table);
1662 if (ret)
1663 return ret;
1664 }
1665
1666 if (pi->mvdd_voltage_table.count > SMU7_MAX_LEVELS_MVDD)
1667 si_trim_voltage_table_to_fit_state_table(rdev, SMU7_MAX_LEVELS_MVDD,
1668 &pi->mvdd_voltage_table);
1669
1670 return 0;
1671 }
1672
1673 static void ci_populate_smc_voltage_table(struct radeon_device *rdev,
1674 struct atom_voltage_table_entry *voltage_table,
1675 SMU7_Discrete_VoltageLevel *smc_voltage_table)
1676 {
1677 int ret;
1678
1679 ret = ci_get_std_voltage_value_sidd(rdev, voltage_table,
1680 &smc_voltage_table->StdVoltageHiSidd,
1681 &smc_voltage_table->StdVoltageLoSidd);
1682
1683 if (ret) {
1684 smc_voltage_table->StdVoltageHiSidd = voltage_table->value * VOLTAGE_SCALE;
1685 smc_voltage_table->StdVoltageLoSidd = voltage_table->value * VOLTAGE_SCALE;
1686 }
1687
1688 smc_voltage_table->Voltage = cpu_to_be16(voltage_table->value * VOLTAGE_SCALE);
1689 smc_voltage_table->StdVoltageHiSidd =
1690 cpu_to_be16(smc_voltage_table->StdVoltageHiSidd);
1691 smc_voltage_table->StdVoltageLoSidd =
1692 cpu_to_be16(smc_voltage_table->StdVoltageLoSidd);
1693 }
1694
1695 static int ci_populate_smc_vddc_table(struct radeon_device *rdev,
1696 SMU7_Discrete_DpmTable *table)
1697 {
1698 struct ci_power_info *pi = ci_get_pi(rdev);
1699 unsigned int count;
1700
1701 table->VddcLevelCount = pi->vddc_voltage_table.count;
1702 for (count = 0; count < table->VddcLevelCount; count++) {
1703 ci_populate_smc_voltage_table(rdev,
1704 &pi->vddc_voltage_table.entries[count],
1705 &table->VddcLevel[count]);
1706
1707 if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
1708 table->VddcLevel[count].Smio |=
1709 pi->vddc_voltage_table.entries[count].smio_low;
1710 else
1711 table->VddcLevel[count].Smio = 0;
1712 }
1713 table->VddcLevelCount = cpu_to_be32(table->VddcLevelCount);
1714
1715 return 0;
1716 }
1717
1718 static int ci_populate_smc_vddci_table(struct radeon_device *rdev,
1719 SMU7_Discrete_DpmTable *table)
1720 {
1721 unsigned int count;
1722 struct ci_power_info *pi = ci_get_pi(rdev);
1723
1724 table->VddciLevelCount = pi->vddci_voltage_table.count;
1725 for (count = 0; count < table->VddciLevelCount; count++) {
1726 ci_populate_smc_voltage_table(rdev,
1727 &pi->vddci_voltage_table.entries[count],
1728 &table->VddciLevel[count]);
1729
1730 if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
1731 table->VddciLevel[count].Smio |=
1732 pi->vddci_voltage_table.entries[count].smio_low;
1733 else
1734 table->VddciLevel[count].Smio = 0;
1735 }
1736 table->VddciLevelCount = cpu_to_be32(table->VddciLevelCount);
1737
1738 return 0;
1739 }
1740
1741 static int ci_populate_smc_mvdd_table(struct radeon_device *rdev,
1742 SMU7_Discrete_DpmTable *table)
1743 {
1744 struct ci_power_info *pi = ci_get_pi(rdev);
1745 unsigned int count;
1746
1747 table->MvddLevelCount = pi->mvdd_voltage_table.count;
1748 for (count = 0; count < table->MvddLevelCount; count++) {
1749 ci_populate_smc_voltage_table(rdev,
1750 &pi->mvdd_voltage_table.entries[count],
1751 &table->MvddLevel[count]);
1752
1753 if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
1754 table->MvddLevel[count].Smio |=
1755 pi->mvdd_voltage_table.entries[count].smio_low;
1756 else
1757 table->MvddLevel[count].Smio = 0;
1758 }
1759 table->MvddLevelCount = cpu_to_be32(table->MvddLevelCount);
1760
1761 return 0;
1762 }
1763
1764 static int ci_populate_smc_voltage_tables(struct radeon_device *rdev,
1765 SMU7_Discrete_DpmTable *table)
1766 {
1767 int ret;
1768
1769 ret = ci_populate_smc_vddc_table(rdev, table);
1770 if (ret)
1771 return ret;
1772
1773 ret = ci_populate_smc_vddci_table(rdev, table);
1774 if (ret)
1775 return ret;
1776
1777 ret = ci_populate_smc_mvdd_table(rdev, table);
1778 if (ret)
1779 return ret;
1780
1781 return 0;
1782 }
1783
1784 static int ci_populate_mvdd_value(struct radeon_device *rdev, u32 mclk,
1785 SMU7_Discrete_VoltageLevel *voltage)
1786 {
1787 struct ci_power_info *pi = ci_get_pi(rdev);
1788 u32 i = 0;
1789
1790 if (pi->mvdd_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
1791 for (i = 0; i < rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.count; i++) {
1792 if (mclk <= rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries[i].clk) {
1793 voltage->Voltage = pi->mvdd_voltage_table.entries[i].value;
1794 break;
1795 }
1796 }
1797
1798 if (i >= rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.count)
1799 return -EINVAL;
1800 }
1801
1802 return -EINVAL;
1803 }
1804
1805 static int ci_get_std_voltage_value_sidd(struct radeon_device *rdev,
1806 struct atom_voltage_table_entry *voltage_table,
1807 u16 *std_voltage_hi_sidd, u16 *std_voltage_lo_sidd)
1808 {
1809 u16 v_index, idx;
1810 bool voltage_found = false;
1811 *std_voltage_hi_sidd = voltage_table->value * VOLTAGE_SCALE;
1812 *std_voltage_lo_sidd = voltage_table->value * VOLTAGE_SCALE;
1813
1814 if (rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries == NULL)
1815 return -EINVAL;
1816
1817 if (rdev->pm.dpm.dyn_state.cac_leakage_table.entries) {
1818 for (v_index = 0; (u32)v_index < rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
1819 if (voltage_table->value ==
1820 rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
1821 voltage_found = true;
1822 if ((u32)v_index < rdev->pm.dpm.dyn_state.cac_leakage_table.count)
1823 idx = v_index;
1824 else
1825 idx = rdev->pm.dpm.dyn_state.cac_leakage_table.count - 1;
1826 *std_voltage_lo_sidd =
1827 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].vddc * VOLTAGE_SCALE;
1828 *std_voltage_hi_sidd =
1829 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].leakage * VOLTAGE_SCALE;
1830 break;
1831 }
1832 }
1833
1834 if (!voltage_found) {
1835 for (v_index = 0; (u32)v_index < rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
1836 if (voltage_table->value <=
1837 rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
1838 voltage_found = true;
1839 if ((u32)v_index < rdev->pm.dpm.dyn_state.cac_leakage_table.count)
1840 idx = v_index;
1841 else
1842 idx = rdev->pm.dpm.dyn_state.cac_leakage_table.count - 1;
1843 *std_voltage_lo_sidd =
1844 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].vddc * VOLTAGE_SCALE;
1845 *std_voltage_hi_sidd =
1846 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].leakage * VOLTAGE_SCALE;
1847 break;
1848 }
1849 }
1850 }
1851 }
1852
1853 return 0;
1854 }
1855
1856 static void ci_populate_phase_value_based_on_sclk(struct radeon_device *rdev,
1857 const struct radeon_phase_shedding_limits_table *limits,
1858 u32 sclk,
1859 u32 *phase_shedding)
1860 {
1861 unsigned int i;
1862
1863 *phase_shedding = 1;
1864
1865 for (i = 0; i < limits->count; i++) {
1866 if (sclk < limits->entries[i].sclk) {
1867 *phase_shedding = i;
1868 break;
1869 }
1870 }
1871 }
1872
1873 static void ci_populate_phase_value_based_on_mclk(struct radeon_device *rdev,
1874 const struct radeon_phase_shedding_limits_table *limits,
1875 u32 mclk,
1876 u32 *phase_shedding)
1877 {
1878 unsigned int i;
1879
1880 *phase_shedding = 1;
1881
1882 for (i = 0; i < limits->count; i++) {
1883 if (mclk < limits->entries[i].mclk) {
1884 *phase_shedding = i;
1885 break;
1886 }
1887 }
1888 }
1889
1890 static int ci_init_arb_table_index(struct radeon_device *rdev)
1891 {
1892 struct ci_power_info *pi = ci_get_pi(rdev);
1893 u32 tmp;
1894 int ret;
1895
1896 ret = ci_read_smc_sram_dword(rdev, pi->arb_table_start,
1897 &tmp, pi->sram_end);
1898 if (ret)
1899 return ret;
1900
1901 tmp &= 0x00FFFFFF;
1902 tmp |= MC_CG_ARB_FREQ_F1 << 24;
1903
1904 return ci_write_smc_sram_dword(rdev, pi->arb_table_start,
1905 tmp, pi->sram_end);
1906 }
1907
1908 static int ci_get_dependency_volt_by_clk(struct radeon_device *rdev,
1909 struct radeon_clock_voltage_dependency_table *allowed_clock_voltage_table,
1910 u32 clock, u32 *voltage)
1911 {
1912 u32 i = 0;
1913
1914 if (allowed_clock_voltage_table->count == 0)
1915 return -EINVAL;
1916
1917 for (i = 0; i < allowed_clock_voltage_table->count; i++) {
1918 if (allowed_clock_voltage_table->entries[i].clk >= clock) {
1919 *voltage = allowed_clock_voltage_table->entries[i].v;
1920 return 0;
1921 }
1922 }
1923
1924 *voltage = allowed_clock_voltage_table->entries[i-1].v;
1925
1926 return 0;
1927 }
1928
1929 static u8 ci_get_sleep_divider_id_from_clock(struct radeon_device *rdev,
1930 u32 sclk, u32 min_sclk_in_sr)
1931 {
1932 u32 i;
1933 u32 tmp;
1934 u32 min = (min_sclk_in_sr > CISLAND_MINIMUM_ENGINE_CLOCK) ?
1935 min_sclk_in_sr : CISLAND_MINIMUM_ENGINE_CLOCK;
1936
1937 if (sclk < min)
1938 return 0;
1939
1940 for (i = CISLAND_MAX_DEEPSLEEP_DIVIDER_ID; ; i--) {
1941 tmp = sclk / (1 << i);
1942 if (tmp >= min || i == 0)
1943 break;
1944 }
1945
1946 return (u8)i;
1947 }
1948
1949 static int ci_initial_switch_from_arb_f0_to_f1(struct radeon_device *rdev)
1950 {
1951 return ni_copy_and_switch_arb_sets(rdev, MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
1952 }
1953
1954 static int ci_reset_to_default(struct radeon_device *rdev)
1955 {
1956 return (ci_send_msg_to_smc(rdev, PPSMC_MSG_ResetToDefaults) == PPSMC_Result_OK) ?
1957 0 : -EINVAL;
1958 }
1959
1960 static int ci_force_switch_to_arb_f0(struct radeon_device *rdev)
1961 {
1962 u32 tmp;
1963
1964 tmp = (RREG32_SMC(SMC_SCRATCH9) & 0x0000ff00) >> 8;
1965
1966 if (tmp == MC_CG_ARB_FREQ_F0)
1967 return 0;
1968
1969 return ni_copy_and_switch_arb_sets(rdev, tmp, MC_CG_ARB_FREQ_F0);
1970 }
1971
1972 static int ci_populate_memory_timing_parameters(struct radeon_device *rdev,
1973 u32 sclk,
1974 u32 mclk,
1975 SMU7_Discrete_MCArbDramTimingTableEntry *arb_regs)
1976 {
1977 u32 dram_timing;
1978 u32 dram_timing2;
1979 u32 burst_time;
1980
1981 radeon_atom_set_engine_dram_timings(rdev, sclk, mclk);
1982
1983 dram_timing = RREG32(MC_ARB_DRAM_TIMING);
1984 dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2);
1985 burst_time = RREG32(MC_ARB_BURST_TIME) & STATE0_MASK;
1986
1987 arb_regs->McArbDramTiming = cpu_to_be32(dram_timing);
1988 arb_regs->McArbDramTiming2 = cpu_to_be32(dram_timing2);
1989 arb_regs->McArbBurstTime = (u8)burst_time;
1990
1991 return 0;
1992 }
1993
1994 static int ci_do_program_memory_timing_parameters(struct radeon_device *rdev)
1995 {
1996 struct ci_power_info *pi = ci_get_pi(rdev);
1997 SMU7_Discrete_MCArbDramTimingTable arb_regs;
1998 u32 i, j;
1999 int ret = 0;
2000
2001 memset(&arb_regs, 0, sizeof(SMU7_Discrete_MCArbDramTimingTable));
2002
2003 for (i = 0; i < pi->dpm_table.sclk_table.count; i++) {
2004 for (j = 0; j < pi->dpm_table.mclk_table.count; j++) {
2005 ret = ci_populate_memory_timing_parameters(rdev,
2006 pi->dpm_table.sclk_table.dpm_levels[i].value,
2007 pi->dpm_table.mclk_table.dpm_levels[j].value,
2008 &arb_regs.entries[i][j]);
2009 if (ret)
2010 break;
2011 }
2012 }
2013
2014 if (ret == 0)
2015 ret = ci_copy_bytes_to_smc(rdev,
2016 pi->arb_table_start,
2017 (u8 *)&arb_regs,
2018 sizeof(SMU7_Discrete_MCArbDramTimingTable),
2019 pi->sram_end);
2020
2021 return ret;
2022 }
2023
2024 static int ci_program_memory_timing_parameters(struct radeon_device *rdev)
2025 {
2026 struct ci_power_info *pi = ci_get_pi(rdev);
2027
2028 if (pi->need_update_smu7_dpm_table == 0)
2029 return 0;
2030
2031 return ci_do_program_memory_timing_parameters(rdev);
2032 }
2033
2034 static void ci_populate_smc_initial_state(struct radeon_device *rdev,
2035 struct radeon_ps *radeon_boot_state)
2036 {
2037 struct ci_ps *boot_state = ci_get_ps(radeon_boot_state);
2038 struct ci_power_info *pi = ci_get_pi(rdev);
2039 u32 level = 0;
2040
2041 for (level = 0; level < rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; level++) {
2042 if (rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[level].clk >=
2043 boot_state->performance_levels[0].sclk) {
2044 pi->smc_state_table.GraphicsBootLevel = level;
2045 break;
2046 }
2047 }
2048
2049 for (level = 0; level < rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.count; level++) {
2050 if (rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries[level].clk >=
2051 boot_state->performance_levels[0].mclk) {
2052 pi->smc_state_table.MemoryBootLevel = level;
2053 break;
2054 }
2055 }
2056 }
2057
2058 static u32 ci_get_dpm_level_enable_mask_value(struct ci_single_dpm_table *dpm_table)
2059 {
2060 u32 i;
2061 u32 mask_value = 0;
2062
2063 for (i = dpm_table->count; i > 0; i--) {
2064 mask_value = mask_value << 1;
2065 if (dpm_table->dpm_levels[i-1].enabled)
2066 mask_value |= 0x1;
2067 else
2068 mask_value &= 0xFFFFFFFE;
2069 }
2070
2071 return mask_value;
2072 }
2073
2074 static void ci_populate_smc_link_level(struct radeon_device *rdev,
2075 SMU7_Discrete_DpmTable *table)
2076 {
2077 struct ci_power_info *pi = ci_get_pi(rdev);
2078 struct ci_dpm_table *dpm_table = &pi->dpm_table;
2079 u32 i;
2080
2081 for (i = 0; i < dpm_table->pcie_speed_table.count; i++) {
2082 table->LinkLevel[i].PcieGenSpeed =
2083 (u8)dpm_table->pcie_speed_table.dpm_levels[i].value;
2084 table->LinkLevel[i].PcieLaneCount =
2085 r600_encode_pci_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1);
2086 table->LinkLevel[i].EnabledForActivity = 1;
2087 table->LinkLevel[i].DownT = cpu_to_be32(5);
2088 table->LinkLevel[i].UpT = cpu_to_be32(30);
2089 }
2090
2091 pi->smc_state_table.LinkLevelCount = (u8)dpm_table->pcie_speed_table.count;
2092 pi->dpm_level_enable_mask.pcie_dpm_enable_mask =
2093 ci_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
2094 }
2095
2096 static int ci_populate_smc_uvd_level(struct radeon_device *rdev,
2097 SMU7_Discrete_DpmTable *table)
2098 {
2099 u32 count;
2100 struct atom_clock_dividers dividers;
2101 int ret = -EINVAL;
2102
2103 table->UvdLevelCount =
2104 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count;
2105
2106 for (count = 0; count < table->UvdLevelCount; count++) {
2107 table->UvdLevel[count].VclkFrequency =
2108 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].vclk;
2109 table->UvdLevel[count].DclkFrequency =
2110 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].dclk;
2111 table->UvdLevel[count].MinVddc =
2112 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
2113 table->UvdLevel[count].MinVddcPhases = 1;
2114
2115 ret = radeon_atom_get_clock_dividers(rdev,
2116 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2117 table->UvdLevel[count].VclkFrequency, false, &dividers);
2118 if (ret)
2119 return ret;
2120
2121 table->UvdLevel[count].VclkDivider = (u8)dividers.post_divider;
2122
2123 ret = radeon_atom_get_clock_dividers(rdev,
2124 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2125 table->UvdLevel[count].DclkFrequency, false, &dividers);
2126 if (ret)
2127 return ret;
2128
2129 table->UvdLevel[count].DclkDivider = (u8)dividers.post_divider;
2130
2131 table->UvdLevel[count].VclkFrequency = cpu_to_be32(table->UvdLevel[count].VclkFrequency);
2132 table->UvdLevel[count].DclkFrequency = cpu_to_be32(table->UvdLevel[count].DclkFrequency);
2133 table->UvdLevel[count].MinVddc = cpu_to_be16(table->UvdLevel[count].MinVddc);
2134 }
2135
2136 return ret;
2137 }
2138
2139 static int ci_populate_smc_vce_level(struct radeon_device *rdev,
2140 SMU7_Discrete_DpmTable *table)
2141 {
2142 u32 count;
2143 struct atom_clock_dividers dividers;
2144 int ret = -EINVAL;
2145
2146 table->VceLevelCount =
2147 rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count;
2148
2149 for (count = 0; count < table->VceLevelCount; count++) {
2150 table->VceLevel[count].Frequency =
2151 rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[count].evclk;
2152 table->VceLevel[count].MinVoltage =
2153 (u16)rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
2154 table->VceLevel[count].MinPhases = 1;
2155
2156 ret = radeon_atom_get_clock_dividers(rdev,
2157 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2158 table->VceLevel[count].Frequency, false, &dividers);
2159 if (ret)
2160 return ret;
2161
2162 table->VceLevel[count].Divider = (u8)dividers.post_divider;
2163
2164 table->VceLevel[count].Frequency = cpu_to_be32(table->VceLevel[count].Frequency);
2165 table->VceLevel[count].MinVoltage = cpu_to_be16(table->VceLevel[count].MinVoltage);
2166 }
2167
2168 return ret;
2169
2170 }
2171
2172 static int ci_populate_smc_acp_level(struct radeon_device *rdev,
2173 SMU7_Discrete_DpmTable *table)
2174 {
2175 u32 count;
2176 struct atom_clock_dividers dividers;
2177 int ret = -EINVAL;
2178
2179 table->AcpLevelCount = (u8)
2180 (rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count);
2181
2182 for (count = 0; count < table->AcpLevelCount; count++) {
2183 table->AcpLevel[count].Frequency =
2184 rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[count].clk;
2185 table->AcpLevel[count].MinVoltage =
2186 rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[count].v;
2187 table->AcpLevel[count].MinPhases = 1;
2188
2189 ret = radeon_atom_get_clock_dividers(rdev,
2190 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2191 table->AcpLevel[count].Frequency, false, &dividers);
2192 if (ret)
2193 return ret;
2194
2195 table->AcpLevel[count].Divider = (u8)dividers.post_divider;
2196
2197 table->AcpLevel[count].Frequency = cpu_to_be32(table->AcpLevel[count].Frequency);
2198 table->AcpLevel[count].MinVoltage = cpu_to_be16(table->AcpLevel[count].MinVoltage);
2199 }
2200
2201 return ret;
2202 }
2203
2204 static int ci_populate_smc_samu_level(struct radeon_device *rdev,
2205 SMU7_Discrete_DpmTable *table)
2206 {
2207 u32 count;
2208 struct atom_clock_dividers dividers;
2209 int ret = -EINVAL;
2210
2211 table->SamuLevelCount =
2212 rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count;
2213
2214 for (count = 0; count < table->SamuLevelCount; count++) {
2215 table->SamuLevel[count].Frequency =
2216 rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[count].clk;
2217 table->SamuLevel[count].MinVoltage =
2218 rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
2219 table->SamuLevel[count].MinPhases = 1;
2220
2221 ret = radeon_atom_get_clock_dividers(rdev,
2222 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2223 table->SamuLevel[count].Frequency, false, &dividers);
2224 if (ret)
2225 return ret;
2226
2227 table->SamuLevel[count].Divider = (u8)dividers.post_divider;
2228
2229 table->SamuLevel[count].Frequency = cpu_to_be32(table->SamuLevel[count].Frequency);
2230 table->SamuLevel[count].MinVoltage = cpu_to_be16(table->SamuLevel[count].MinVoltage);
2231 }
2232
2233 return ret;
2234 }
2235
2236 static int ci_calculate_mclk_params(struct radeon_device *rdev,
2237 u32 memory_clock,
2238 SMU7_Discrete_MemoryLevel *mclk,
2239 bool strobe_mode,
2240 bool dll_state_on)
2241 {
2242 struct ci_power_info *pi = ci_get_pi(rdev);
2243 u32 dll_cntl = pi->clock_registers.dll_cntl;
2244 u32 mclk_pwrmgt_cntl = pi->clock_registers.mclk_pwrmgt_cntl;
2245 u32 mpll_ad_func_cntl = pi->clock_registers.mpll_ad_func_cntl;
2246 u32 mpll_dq_func_cntl = pi->clock_registers.mpll_dq_func_cntl;
2247 u32 mpll_func_cntl = pi->clock_registers.mpll_func_cntl;
2248 u32 mpll_func_cntl_1 = pi->clock_registers.mpll_func_cntl_1;
2249 u32 mpll_func_cntl_2 = pi->clock_registers.mpll_func_cntl_2;
2250 u32 mpll_ss1 = pi->clock_registers.mpll_ss1;
2251 u32 mpll_ss2 = pi->clock_registers.mpll_ss2;
2252 struct atom_mpll_param mpll_param;
2253 int ret;
2254
2255 ret = radeon_atom_get_memory_pll_dividers(rdev, memory_clock, strobe_mode, &mpll_param);
2256 if (ret)
2257 return ret;
2258
2259 mpll_func_cntl &= ~BWCTRL_MASK;
2260 mpll_func_cntl |= BWCTRL(mpll_param.bwcntl);
2261
2262 mpll_func_cntl_1 &= ~(CLKF_MASK | CLKFRAC_MASK | VCO_MODE_MASK);
2263 mpll_func_cntl_1 |= CLKF(mpll_param.clkf) |
2264 CLKFRAC(mpll_param.clkfrac) | VCO_MODE(mpll_param.vco_mode);
2265
2266 mpll_ad_func_cntl &= ~YCLK_POST_DIV_MASK;
2267 mpll_ad_func_cntl |= YCLK_POST_DIV(mpll_param.post_div);
2268
2269 if (pi->mem_gddr5) {
2270 mpll_dq_func_cntl &= ~(YCLK_SEL_MASK | YCLK_POST_DIV_MASK);
2271 mpll_dq_func_cntl |= YCLK_SEL(mpll_param.yclk_sel) |
2272 YCLK_POST_DIV(mpll_param.post_div);
2273 }
2274
2275 if (pi->caps_mclk_ss_support) {
2276 struct radeon_atom_ss ss;
2277 u32 freq_nom;
2278 u32 tmp;
2279 u32 reference_clock = rdev->clock.mpll.reference_freq;
2280
2281 if (pi->mem_gddr5)
2282 freq_nom = memory_clock * 4;
2283 else
2284 freq_nom = memory_clock * 2;
2285
2286 tmp = (freq_nom / reference_clock);
2287 tmp = tmp * tmp;
2288 if (radeon_atombios_get_asic_ss_info(rdev, &ss,
2289 ASIC_INTERNAL_MEMORY_SS, freq_nom)) {
2290 u32 clks = reference_clock * 5 / ss.rate;
2291 u32 clkv = (u32)((((131 * ss.percentage * ss.rate) / 100) * tmp) / freq_nom);
2292
2293 mpll_ss1 &= ~CLKV_MASK;
2294 mpll_ss1 |= CLKV(clkv);
2295
2296 mpll_ss2 &= ~CLKS_MASK;
2297 mpll_ss2 |= CLKS(clks);
2298 }
2299 }
2300
2301 mclk_pwrmgt_cntl &= ~DLL_SPEED_MASK;
2302 mclk_pwrmgt_cntl |= DLL_SPEED(mpll_param.dll_speed);
2303
2304 if (dll_state_on)
2305 mclk_pwrmgt_cntl |= MRDCK0_PDNB | MRDCK1_PDNB;
2306 else
2307 mclk_pwrmgt_cntl &= ~(MRDCK0_PDNB | MRDCK1_PDNB);
2308
2309 mclk->MclkFrequency = memory_clock;
2310 mclk->MpllFuncCntl = mpll_func_cntl;
2311 mclk->MpllFuncCntl_1 = mpll_func_cntl_1;
2312 mclk->MpllFuncCntl_2 = mpll_func_cntl_2;
2313 mclk->MpllAdFuncCntl = mpll_ad_func_cntl;
2314 mclk->MpllDqFuncCntl = mpll_dq_func_cntl;
2315 mclk->MclkPwrmgtCntl = mclk_pwrmgt_cntl;
2316 mclk->DllCntl = dll_cntl;
2317 mclk->MpllSs1 = mpll_ss1;
2318 mclk->MpllSs2 = mpll_ss2;
2319
2320 return 0;
2321 }
2322
2323 static int ci_populate_single_memory_level(struct radeon_device *rdev,
2324 u32 memory_clock,
2325 SMU7_Discrete_MemoryLevel *memory_level)
2326 {
2327 struct ci_power_info *pi = ci_get_pi(rdev);
2328 int ret;
2329 bool dll_state_on;
2330
2331 if (rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries) {
2332 ret = ci_get_dependency_volt_by_clk(rdev,
2333 &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
2334 memory_clock, &memory_level->MinVddc);
2335 if (ret)
2336 return ret;
2337 }
2338
2339 if (rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries) {
2340 ret = ci_get_dependency_volt_by_clk(rdev,
2341 &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
2342 memory_clock, &memory_level->MinVddci);
2343 if (ret)
2344 return ret;
2345 }
2346
2347 if (rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries) {
2348 ret = ci_get_dependency_volt_by_clk(rdev,
2349 &rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
2350 memory_clock, &memory_level->MinMvdd);
2351 if (ret)
2352 return ret;
2353 }
2354
2355 memory_level->MinVddcPhases = 1;
2356
2357 if (pi->vddc_phase_shed_control)
2358 ci_populate_phase_value_based_on_mclk(rdev,
2359 &rdev->pm.dpm.dyn_state.phase_shedding_limits_table,
2360 memory_clock,
2361 &memory_level->MinVddcPhases);
2362
2363 memory_level->EnabledForThrottle = 1;
2364 memory_level->EnabledForActivity = 1;
2365 memory_level->UpH = 0;
2366 memory_level->DownH = 100;
2367 memory_level->VoltageDownH = 0;
2368 memory_level->ActivityLevel = (u16)pi->mclk_activity_target;
2369
2370 memory_level->StutterEnable = false;
2371 memory_level->StrobeEnable = false;
2372 memory_level->EdcReadEnable = false;
2373 memory_level->EdcWriteEnable = false;
2374 memory_level->RttEnable = false;
2375
2376 memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
2377
2378 if (pi->mclk_stutter_mode_threshold &&
2379 (memory_clock <= pi->mclk_stutter_mode_threshold) &&
2380 (pi->uvd_enabled == false) &&
2381 (RREG32(DPG_PIPE_STUTTER_CONTROL) & STUTTER_ENABLE) &&
2382 (rdev->pm.dpm.new_active_crtc_count <= 2))
2383 memory_level->StutterEnable = true;
2384
2385 if (pi->mclk_strobe_mode_threshold &&
2386 (memory_clock <= pi->mclk_strobe_mode_threshold))
2387 memory_level->StrobeEnable = 1;
2388
2389 if (pi->mem_gddr5) {
2390 memory_level->StrobeRatio =
2391 si_get_mclk_frequency_ratio(memory_clock, memory_level->StrobeEnable);
2392 if (pi->mclk_edc_enable_threshold &&
2393 (memory_clock > pi->mclk_edc_enable_threshold))
2394 memory_level->EdcReadEnable = true;
2395
2396 if (pi->mclk_edc_wr_enable_threshold &&
2397 (memory_clock > pi->mclk_edc_wr_enable_threshold))
2398 memory_level->EdcWriteEnable = true;
2399
2400 if (memory_level->StrobeEnable) {
2401 if (si_get_mclk_frequency_ratio(memory_clock, true) >=
2402 ((RREG32(MC_SEQ_MISC7) >> 16) & 0xf))
2403 dll_state_on = ((RREG32(MC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
2404 else
2405 dll_state_on = ((RREG32(MC_SEQ_MISC6) >> 1) & 0x1) ? true : false;
2406 } else {
2407 dll_state_on = pi->dll_default_on;
2408 }
2409 } else {
2410 memory_level->StrobeRatio = si_get_ddr3_mclk_frequency_ratio(memory_clock);
2411 dll_state_on = ((RREG32(MC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
2412 }
2413
2414 ret = ci_calculate_mclk_params(rdev, memory_clock, memory_level, memory_level->StrobeEnable, dll_state_on);
2415 if (ret)
2416 return ret;
2417
2418 memory_level->MinVddc = cpu_to_be32(memory_level->MinVddc * VOLTAGE_SCALE);
2419 memory_level->MinVddcPhases = cpu_to_be32(memory_level->MinVddcPhases);
2420 memory_level->MinVddci = cpu_to_be32(memory_level->MinVddci * VOLTAGE_SCALE);
2421 memory_level->MinMvdd = cpu_to_be32(memory_level->MinMvdd * VOLTAGE_SCALE);
2422
2423 memory_level->MclkFrequency = cpu_to_be32(memory_level->MclkFrequency);
2424 memory_level->ActivityLevel = cpu_to_be16(memory_level->ActivityLevel);
2425 memory_level->MpllFuncCntl = cpu_to_be32(memory_level->MpllFuncCntl);
2426 memory_level->MpllFuncCntl_1 = cpu_to_be32(memory_level->MpllFuncCntl_1);
2427 memory_level->MpllFuncCntl_2 = cpu_to_be32(memory_level->MpllFuncCntl_2);
2428 memory_level->MpllAdFuncCntl = cpu_to_be32(memory_level->MpllAdFuncCntl);
2429 memory_level->MpllDqFuncCntl = cpu_to_be32(memory_level->MpllDqFuncCntl);
2430 memory_level->MclkPwrmgtCntl = cpu_to_be32(memory_level->MclkPwrmgtCntl);
2431 memory_level->DllCntl = cpu_to_be32(memory_level->DllCntl);
2432 memory_level->MpllSs1 = cpu_to_be32(memory_level->MpllSs1);
2433 memory_level->MpllSs2 = cpu_to_be32(memory_level->MpllSs2);
2434
2435 return 0;
2436 }
2437
2438 static int ci_populate_smc_acpi_level(struct radeon_device *rdev,
2439 SMU7_Discrete_DpmTable *table)
2440 {
2441 struct ci_power_info *pi = ci_get_pi(rdev);
2442 struct atom_clock_dividers dividers;
2443 SMU7_Discrete_VoltageLevel voltage_level;
2444 u32 spll_func_cntl = pi->clock_registers.cg_spll_func_cntl;
2445 u32 spll_func_cntl_2 = pi->clock_registers.cg_spll_func_cntl_2;
2446 u32 dll_cntl = pi->clock_registers.dll_cntl;
2447 u32 mclk_pwrmgt_cntl = pi->clock_registers.mclk_pwrmgt_cntl;
2448 int ret;
2449
2450 table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
2451
2452 if (pi->acpi_vddc)
2453 table->ACPILevel.MinVddc = cpu_to_be32(pi->acpi_vddc * VOLTAGE_SCALE);
2454 else
2455 table->ACPILevel.MinVddc = cpu_to_be32(pi->min_vddc_in_pp_table * VOLTAGE_SCALE);
2456
2457 table->ACPILevel.MinVddcPhases = pi->vddc_phase_shed_control ? 0 : 1;
2458
2459 table->ACPILevel.SclkFrequency = rdev->clock.spll.reference_freq;
2460
2461 ret = radeon_atom_get_clock_dividers(rdev,
2462 COMPUTE_GPUCLK_INPUT_FLAG_SCLK,
2463 table->ACPILevel.SclkFrequency, false, &dividers);
2464 if (ret)
2465 return ret;
2466
2467 table->ACPILevel.SclkDid = (u8)dividers.post_divider;
2468 table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
2469 table->ACPILevel.DeepSleepDivId = 0;
2470
2471 spll_func_cntl &= ~SPLL_PWRON;
2472 spll_func_cntl |= SPLL_RESET;
2473
2474 spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
2475 spll_func_cntl_2 |= SCLK_MUX_SEL(4);
2476
2477 table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
2478 table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
2479 table->ACPILevel.CgSpllFuncCntl3 = pi->clock_registers.cg_spll_func_cntl_3;
2480 table->ACPILevel.CgSpllFuncCntl4 = pi->clock_registers.cg_spll_func_cntl_4;
2481 table->ACPILevel.SpllSpreadSpectrum = pi->clock_registers.cg_spll_spread_spectrum;
2482 table->ACPILevel.SpllSpreadSpectrum2 = pi->clock_registers.cg_spll_spread_spectrum_2;
2483 table->ACPILevel.CcPwrDynRm = 0;
2484 table->ACPILevel.CcPwrDynRm1 = 0;
2485
2486 table->ACPILevel.Flags = cpu_to_be32(table->ACPILevel.Flags);
2487 table->ACPILevel.MinVddcPhases = cpu_to_be32(table->ACPILevel.MinVddcPhases);
2488 table->ACPILevel.SclkFrequency = cpu_to_be32(table->ACPILevel.SclkFrequency);
2489 table->ACPILevel.CgSpllFuncCntl = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl);
2490 table->ACPILevel.CgSpllFuncCntl2 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl2);
2491 table->ACPILevel.CgSpllFuncCntl3 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl3);
2492 table->ACPILevel.CgSpllFuncCntl4 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl4);
2493 table->ACPILevel.SpllSpreadSpectrum = cpu_to_be32(table->ACPILevel.SpllSpreadSpectrum);
2494 table->ACPILevel.SpllSpreadSpectrum2 = cpu_to_be32(table->ACPILevel.SpllSpreadSpectrum2);
2495 table->ACPILevel.CcPwrDynRm = cpu_to_be32(table->ACPILevel.CcPwrDynRm);
2496 table->ACPILevel.CcPwrDynRm1 = cpu_to_be32(table->ACPILevel.CcPwrDynRm1);
2497
2498 table->MemoryACPILevel.MinVddc = table->ACPILevel.MinVddc;
2499 table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;
2500
2501 if (pi->vddci_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
2502 if (pi->acpi_vddci)
2503 table->MemoryACPILevel.MinVddci =
2504 cpu_to_be32(pi->acpi_vddci * VOLTAGE_SCALE);
2505 else
2506 table->MemoryACPILevel.MinVddci =
2507 cpu_to_be32(pi->min_vddci_in_pp_table * VOLTAGE_SCALE);
2508 }
2509
2510 if (ci_populate_mvdd_value(rdev, 0, &voltage_level))
2511 table->MemoryACPILevel.MinMvdd = 0;
2512 else
2513 table->MemoryACPILevel.MinMvdd =
2514 cpu_to_be32(voltage_level.Voltage * VOLTAGE_SCALE);
2515
2516 mclk_pwrmgt_cntl |= MRDCK0_RESET | MRDCK1_RESET;
2517 mclk_pwrmgt_cntl &= ~(MRDCK0_PDNB | MRDCK1_PDNB);
2518
2519 dll_cntl &= ~(MRDCK0_BYPASS | MRDCK1_BYPASS);
2520
2521 table->MemoryACPILevel.DllCntl = cpu_to_be32(dll_cntl);
2522 table->MemoryACPILevel.MclkPwrmgtCntl = cpu_to_be32(mclk_pwrmgt_cntl);
2523 table->MemoryACPILevel.MpllAdFuncCntl =
2524 cpu_to_be32(pi->clock_registers.mpll_ad_func_cntl);
2525 table->MemoryACPILevel.MpllDqFuncCntl =
2526 cpu_to_be32(pi->clock_registers.mpll_dq_func_cntl);
2527 table->MemoryACPILevel.MpllFuncCntl =
2528 cpu_to_be32(pi->clock_registers.mpll_func_cntl);
2529 table->MemoryACPILevel.MpllFuncCntl_1 =
2530 cpu_to_be32(pi->clock_registers.mpll_func_cntl_1);
2531 table->MemoryACPILevel.MpllFuncCntl_2 =
2532 cpu_to_be32(pi->clock_registers.mpll_func_cntl_2);
2533 table->MemoryACPILevel.MpllSs1 = cpu_to_be32(pi->clock_registers.mpll_ss1);
2534 table->MemoryACPILevel.MpllSs2 = cpu_to_be32(pi->clock_registers.mpll_ss2);
2535
2536 table->MemoryACPILevel.EnabledForThrottle = 0;
2537 table->MemoryACPILevel.EnabledForActivity = 0;
2538 table->MemoryACPILevel.UpH = 0;
2539 table->MemoryACPILevel.DownH = 100;
2540 table->MemoryACPILevel.VoltageDownH = 0;
2541 table->MemoryACPILevel.ActivityLevel =
2542 cpu_to_be16((u16)pi->mclk_activity_target);
2543
2544 table->MemoryACPILevel.StutterEnable = false;
2545 table->MemoryACPILevel.StrobeEnable = false;
2546 table->MemoryACPILevel.EdcReadEnable = false;
2547 table->MemoryACPILevel.EdcWriteEnable = false;
2548 table->MemoryACPILevel.RttEnable = false;
2549
2550 return 0;
2551 }
2552
2553
2554 static int ci_enable_ulv(struct radeon_device *rdev, bool enable)
2555 {
2556 struct ci_power_info *pi = ci_get_pi(rdev);
2557 struct ci_ulv_parm *ulv = &pi->ulv;
2558
2559 if (ulv->supported) {
2560 if (enable)
2561 return (ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableULV) == PPSMC_Result_OK) ?
2562 0 : -EINVAL;
2563 else
2564 return (ci_send_msg_to_smc(rdev, PPSMC_MSG_DisableULV) == PPSMC_Result_OK) ?
2565 0 : -EINVAL;
2566 }
2567
2568 return 0;
2569 }
2570
2571 static int ci_populate_ulv_level(struct radeon_device *rdev,
2572 SMU7_Discrete_Ulv *state)
2573 {
2574 struct ci_power_info *pi = ci_get_pi(rdev);
2575 u16 ulv_voltage = rdev->pm.dpm.backbias_response_time;
2576
2577 state->CcPwrDynRm = 0;
2578 state->CcPwrDynRm1 = 0;
2579
2580 if (ulv_voltage == 0) {
2581 pi->ulv.supported = false;
2582 return 0;
2583 }
2584
2585 if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
2586 if (ulv_voltage > rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v)
2587 state->VddcOffset = 0;
2588 else
2589 state->VddcOffset =
2590 rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v - ulv_voltage;
2591 } else {
2592 if (ulv_voltage > rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v)
2593 state->VddcOffsetVid = 0;
2594 else
2595 state->VddcOffsetVid = (u8)
2596 ((rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v - ulv_voltage) *
2597 VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
2598 }
2599 state->VddcPhase = pi->vddc_phase_shed_control ? 0 : 1;
2600
2601 state->CcPwrDynRm = cpu_to_be32(state->CcPwrDynRm);
2602 state->CcPwrDynRm1 = cpu_to_be32(state->CcPwrDynRm1);
2603 state->VddcOffset = cpu_to_be16(state->VddcOffset);
2604
2605 return 0;
2606 }
2607
2608 static int ci_calculate_sclk_params(struct radeon_device *rdev,
2609 u32 engine_clock,
2610 SMU7_Discrete_GraphicsLevel *sclk)
2611 {
2612 struct ci_power_info *pi = ci_get_pi(rdev);
2613 struct atom_clock_dividers dividers;
2614 u32 spll_func_cntl_3 = pi->clock_registers.cg_spll_func_cntl_3;
2615 u32 spll_func_cntl_4 = pi->clock_registers.cg_spll_func_cntl_4;
2616 u32 cg_spll_spread_spectrum = pi->clock_registers.cg_spll_spread_spectrum;
2617 u32 cg_spll_spread_spectrum_2 = pi->clock_registers.cg_spll_spread_spectrum_2;
2618 u32 reference_clock = rdev->clock.spll.reference_freq;
2619 u32 reference_divider;
2620 u32 fbdiv;
2621 int ret;
2622
2623 ret = radeon_atom_get_clock_dividers(rdev,
2624 COMPUTE_GPUCLK_INPUT_FLAG_SCLK,
2625 engine_clock, false, &dividers);
2626 if (ret)
2627 return ret;
2628
2629 reference_divider = 1 + dividers.ref_div;
2630 fbdiv = dividers.fb_div & 0x3FFFFFF;
2631
2632 spll_func_cntl_3 &= ~SPLL_FB_DIV_MASK;
2633 spll_func_cntl_3 |= SPLL_FB_DIV(fbdiv);
2634 spll_func_cntl_3 |= SPLL_DITHEN;
2635
2636 if (pi->caps_sclk_ss_support) {
2637 struct radeon_atom_ss ss;
2638 u32 vco_freq = engine_clock * dividers.post_div;
2639
2640 if (radeon_atombios_get_asic_ss_info(rdev, &ss,
2641 ASIC_INTERNAL_ENGINE_SS, vco_freq)) {
2642 u32 clk_s = reference_clock * 5 / (reference_divider * ss.rate);
2643 u32 clk_v = 4 * ss.percentage * fbdiv / (clk_s * 10000);
2644
2645 cg_spll_spread_spectrum &= ~CLK_S_MASK;
2646 cg_spll_spread_spectrum |= CLK_S(clk_s);
2647 cg_spll_spread_spectrum |= SSEN;
2648
2649 cg_spll_spread_spectrum_2 &= ~CLK_V_MASK;
2650 cg_spll_spread_spectrum_2 |= CLK_V(clk_v);
2651 }
2652 }
2653
2654 sclk->SclkFrequency = engine_clock;
2655 sclk->CgSpllFuncCntl3 = spll_func_cntl_3;
2656 sclk->CgSpllFuncCntl4 = spll_func_cntl_4;
2657 sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum;
2658 sclk->SpllSpreadSpectrum2 = cg_spll_spread_spectrum_2;
2659 sclk->SclkDid = (u8)dividers.post_divider;
2660
2661 return 0;
2662 }
2663
2664 static int ci_populate_single_graphic_level(struct radeon_device *rdev,
2665 u32 engine_clock,
2666 u16 sclk_activity_level_t,
2667 SMU7_Discrete_GraphicsLevel *graphic_level)
2668 {
2669 struct ci_power_info *pi = ci_get_pi(rdev);
2670 int ret;
2671
2672 ret = ci_calculate_sclk_params(rdev, engine_clock, graphic_level);
2673 if (ret)
2674 return ret;
2675
2676 ret = ci_get_dependency_volt_by_clk(rdev,
2677 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
2678 engine_clock, &graphic_level->MinVddc);
2679 if (ret)
2680 return ret;
2681
2682 graphic_level->SclkFrequency = engine_clock;
2683
2684 graphic_level->Flags = 0;
2685 graphic_level->MinVddcPhases = 1;
2686
2687 if (pi->vddc_phase_shed_control)
2688 ci_populate_phase_value_based_on_sclk(rdev,
2689 &rdev->pm.dpm.dyn_state.phase_shedding_limits_table,
2690 engine_clock,
2691 &graphic_level->MinVddcPhases);
2692
2693 graphic_level->ActivityLevel = sclk_activity_level_t;
2694
2695 graphic_level->CcPwrDynRm = 0;
2696 graphic_level->CcPwrDynRm1 = 0;
2697 graphic_level->EnabledForActivity = 1;
2698 graphic_level->EnabledForThrottle = 1;
2699 graphic_level->UpH = 0;
2700 graphic_level->DownH = 0;
2701 graphic_level->VoltageDownH = 0;
2702 graphic_level->PowerThrottle = 0;
2703
2704 if (pi->caps_sclk_ds)
2705 graphic_level->DeepSleepDivId = ci_get_sleep_divider_id_from_clock(rdev,
2706 engine_clock,
2707 CISLAND_MINIMUM_ENGINE_CLOCK);
2708
2709 graphic_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
2710
2711 graphic_level->Flags = cpu_to_be32(graphic_level->Flags);
2712 graphic_level->MinVddc = cpu_to_be32(graphic_level->MinVddc * VOLTAGE_SCALE);
2713 graphic_level->MinVddcPhases = cpu_to_be32(graphic_level->MinVddcPhases);
2714 graphic_level->SclkFrequency = cpu_to_be32(graphic_level->SclkFrequency);
2715 graphic_level->ActivityLevel = cpu_to_be16(graphic_level->ActivityLevel);
2716 graphic_level->CgSpllFuncCntl3 = cpu_to_be32(graphic_level->CgSpllFuncCntl3);
2717 graphic_level->CgSpllFuncCntl4 = cpu_to_be32(graphic_level->CgSpllFuncCntl4);
2718 graphic_level->SpllSpreadSpectrum = cpu_to_be32(graphic_level->SpllSpreadSpectrum);
2719 graphic_level->SpllSpreadSpectrum2 = cpu_to_be32(graphic_level->SpllSpreadSpectrum2);
2720 graphic_level->CcPwrDynRm = cpu_to_be32(graphic_level->CcPwrDynRm);
2721 graphic_level->CcPwrDynRm1 = cpu_to_be32(graphic_level->CcPwrDynRm1);
2722
2723 return 0;
2724 }
2725
2726 static int ci_populate_all_graphic_levels(struct radeon_device *rdev)
2727 {
2728 struct ci_power_info *pi = ci_get_pi(rdev);
2729 struct ci_dpm_table *dpm_table = &pi->dpm_table;
2730 u32 level_array_address = pi->dpm_table_start +
2731 offsetof(SMU7_Discrete_DpmTable, GraphicsLevel);
2732 u32 level_array_size = sizeof(SMU7_Discrete_GraphicsLevel) *
2733 SMU7_MAX_LEVELS_GRAPHICS;
2734 SMU7_Discrete_GraphicsLevel *levels = pi->smc_state_table.GraphicsLevel;
2735 u32 i, ret;
2736
2737 memset(levels, 0, level_array_size);
2738
2739 for (i = 0; i < dpm_table->sclk_table.count; i++) {
2740 ret = ci_populate_single_graphic_level(rdev,
2741 dpm_table->sclk_table.dpm_levels[i].value,
2742 (u16)pi->activity_target[i],
2743 &pi->smc_state_table.GraphicsLevel[i]);
2744 if (ret)
2745 return ret;
2746 if (i == (dpm_table->sclk_table.count - 1))
2747 pi->smc_state_table.GraphicsLevel[i].DisplayWatermark =
2748 PPSMC_DISPLAY_WATERMARK_HIGH;
2749 }
2750
2751 pi->smc_state_table.GraphicsDpmLevelCount = (u8)dpm_table->sclk_table.count;
2752 pi->dpm_level_enable_mask.sclk_dpm_enable_mask =
2753 ci_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
2754
2755 ret = ci_copy_bytes_to_smc(rdev, level_array_address,
2756 (u8 *)levels, level_array_size,
2757 pi->sram_end);
2758 if (ret)
2759 return ret;
2760
2761 return 0;
2762 }
2763
2764 static int ci_populate_ulv_state(struct radeon_device *rdev,
2765 SMU7_Discrete_Ulv *ulv_level)
2766 {
2767 return ci_populate_ulv_level(rdev, ulv_level);
2768 }
2769
2770 static int ci_populate_all_memory_levels(struct radeon_device *rdev)
2771 {
2772 struct ci_power_info *pi = ci_get_pi(rdev);
2773 struct ci_dpm_table *dpm_table = &pi->dpm_table;
2774 u32 level_array_address = pi->dpm_table_start +
2775 offsetof(SMU7_Discrete_DpmTable, MemoryLevel);
2776 u32 level_array_size = sizeof(SMU7_Discrete_MemoryLevel) *
2777 SMU7_MAX_LEVELS_MEMORY;
2778 SMU7_Discrete_MemoryLevel *levels = pi->smc_state_table.MemoryLevel;
2779 u32 i, ret;
2780
2781 memset(levels, 0, level_array_size);
2782
2783 for (i = 0; i < dpm_table->mclk_table.count; i++) {
2784 if (dpm_table->mclk_table.dpm_levels[i].value == 0)
2785 return -EINVAL;
2786 ret = ci_populate_single_memory_level(rdev,
2787 dpm_table->mclk_table.dpm_levels[i].value,
2788 &pi->smc_state_table.MemoryLevel[i]);
2789 if (ret)
2790 return ret;
2791 }
2792
2793 pi->smc_state_table.MemoryLevel[0].ActivityLevel = cpu_to_be16(0x1F);
2794
2795 pi->smc_state_table.MemoryDpmLevelCount = (u8)dpm_table->mclk_table.count;
2796 pi->dpm_level_enable_mask.mclk_dpm_enable_mask =
2797 ci_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
2798
2799 pi->smc_state_table.MemoryLevel[dpm_table->mclk_table.count - 1].DisplayWatermark =
2800 PPSMC_DISPLAY_WATERMARK_HIGH;
2801
2802 ret = ci_copy_bytes_to_smc(rdev, level_array_address,
2803 (u8 *)levels, level_array_size,
2804 pi->sram_end);
2805 if (ret)
2806 return ret;
2807
2808 return 0;
2809 }
2810
2811 static void ci_reset_single_dpm_table(struct radeon_device *rdev,
2812 struct ci_single_dpm_table* dpm_table,
2813 u32 count)
2814 {
2815 u32 i;
2816
2817 dpm_table->count = count;
2818 for (i = 0; i < MAX_REGULAR_DPM_NUMBER; i++)
2819 dpm_table->dpm_levels[i].enabled = false;
2820 }
2821
2822 static void ci_setup_pcie_table_entry(struct ci_single_dpm_table* dpm_table,
2823 u32 index, u32 pcie_gen, u32 pcie_lanes)
2824 {
2825 dpm_table->dpm_levels[index].value = pcie_gen;
2826 dpm_table->dpm_levels[index].param1 = pcie_lanes;
2827 dpm_table->dpm_levels[index].enabled = true;
2828 }
2829
2830 static int ci_setup_default_pcie_tables(struct radeon_device *rdev)
2831 {
2832 struct ci_power_info *pi = ci_get_pi(rdev);
2833
2834 if (!pi->use_pcie_performance_levels && !pi->use_pcie_powersaving_levels)
2835 return -EINVAL;
2836
2837 if (pi->use_pcie_performance_levels && !pi->use_pcie_powersaving_levels) {
2838 pi->pcie_gen_powersaving = pi->pcie_gen_performance;
2839 pi->pcie_lane_powersaving = pi->pcie_lane_performance;
2840 } else if (!pi->use_pcie_performance_levels && pi->use_pcie_powersaving_levels) {
2841 pi->pcie_gen_performance = pi->pcie_gen_powersaving;
2842 pi->pcie_lane_performance = pi->pcie_lane_powersaving;
2843 }
2844
2845 ci_reset_single_dpm_table(rdev,
2846 &pi->dpm_table.pcie_speed_table,
2847 SMU7_MAX_LEVELS_LINK);
2848
2849 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 0,
2850 pi->pcie_gen_powersaving.min,
2851 pi->pcie_lane_powersaving.min);
2852 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 1,
2853 pi->pcie_gen_performance.min,
2854 pi->pcie_lane_performance.min);
2855 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 2,
2856 pi->pcie_gen_powersaving.min,
2857 pi->pcie_lane_powersaving.max);
2858 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 3,
2859 pi->pcie_gen_performance.min,
2860 pi->pcie_lane_performance.max);
2861 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 4,
2862 pi->pcie_gen_powersaving.max,
2863 pi->pcie_lane_powersaving.max);
2864 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 5,
2865 pi->pcie_gen_performance.max,
2866 pi->pcie_lane_performance.max);
2867
2868 pi->dpm_table.pcie_speed_table.count = 6;
2869
2870 return 0;
2871 }
2872
2873 static int ci_setup_default_dpm_tables(struct radeon_device *rdev)
2874 {
2875 struct ci_power_info *pi = ci_get_pi(rdev);
2876 struct radeon_clock_voltage_dependency_table *allowed_sclk_vddc_table =
2877 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
2878 struct radeon_clock_voltage_dependency_table *allowed_mclk_table =
2879 &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk;
2880 struct radeon_cac_leakage_table *std_voltage_table =
2881 &rdev->pm.dpm.dyn_state.cac_leakage_table;
2882 u32 i;
2883
2884 if (allowed_sclk_vddc_table == NULL)
2885 return -EINVAL;
2886 if (allowed_sclk_vddc_table->count < 1)
2887 return -EINVAL;
2888 if (allowed_mclk_table == NULL)
2889 return -EINVAL;
2890 if (allowed_mclk_table->count < 1)
2891 return -EINVAL;
2892
2893 memset(&pi->dpm_table, 0, sizeof(struct ci_dpm_table));
2894
2895 ci_reset_single_dpm_table(rdev,
2896 &pi->dpm_table.sclk_table,
2897 SMU7_MAX_LEVELS_GRAPHICS);
2898 ci_reset_single_dpm_table(rdev,
2899 &pi->dpm_table.mclk_table,
2900 SMU7_MAX_LEVELS_MEMORY);
2901 ci_reset_single_dpm_table(rdev,
2902 &pi->dpm_table.vddc_table,
2903 SMU7_MAX_LEVELS_VDDC);
2904 ci_reset_single_dpm_table(rdev,
2905 &pi->dpm_table.vddci_table,
2906 SMU7_MAX_LEVELS_VDDCI);
2907 ci_reset_single_dpm_table(rdev,
2908 &pi->dpm_table.mvdd_table,
2909 SMU7_MAX_LEVELS_MVDD);
2910
2911 pi->dpm_table.sclk_table.count = 0;
2912 for (i = 0; i < allowed_sclk_vddc_table->count; i++) {
2913 if ((i == 0) ||
2914 (pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count-1].value !=
2915 allowed_sclk_vddc_table->entries[i].clk)) {
2916 pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].value =
2917 allowed_sclk_vddc_table->entries[i].clk;
2918 pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].enabled = true;
2919 pi->dpm_table.sclk_table.count++;
2920 }
2921 }
2922
2923 pi->dpm_table.mclk_table.count = 0;
2924 for (i = 0; i < allowed_mclk_table->count; i++) {
2925 if ((i==0) ||
2926 (pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count-1].value !=
2927 allowed_mclk_table->entries[i].clk)) {
2928 pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].value =
2929 allowed_mclk_table->entries[i].clk;
2930 pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].enabled = true;
2931 pi->dpm_table.mclk_table.count++;
2932 }
2933 }
2934
2935 for (i = 0; i < allowed_sclk_vddc_table->count; i++) {
2936 pi->dpm_table.vddc_table.dpm_levels[i].value =
2937 allowed_sclk_vddc_table->entries[i].v;
2938 pi->dpm_table.vddc_table.dpm_levels[i].param1 =
2939 std_voltage_table->entries[i].leakage;
2940 pi->dpm_table.vddc_table.dpm_levels[i].enabled = true;
2941 }
2942 pi->dpm_table.vddc_table.count = allowed_sclk_vddc_table->count;
2943
2944 allowed_mclk_table = &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk;
2945 if (allowed_mclk_table) {
2946 for (i = 0; i < allowed_mclk_table->count; i++) {
2947 pi->dpm_table.vddci_table.dpm_levels[i].value =
2948 allowed_mclk_table->entries[i].v;
2949 pi->dpm_table.vddci_table.dpm_levels[i].enabled = true;
2950 }
2951 pi->dpm_table.vddci_table.count = allowed_mclk_table->count;
2952 }
2953
2954 allowed_mclk_table = &rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk;
2955 if (allowed_mclk_table) {
2956 for (i = 0; i < allowed_mclk_table->count; i++) {
2957 pi->dpm_table.mvdd_table.dpm_levels[i].value =
2958 allowed_mclk_table->entries[i].v;
2959 pi->dpm_table.mvdd_table.dpm_levels[i].enabled = true;
2960 }
2961 pi->dpm_table.mvdd_table.count = allowed_mclk_table->count;
2962 }
2963
2964 ci_setup_default_pcie_tables(rdev);
2965
2966 return 0;
2967 }
2968
2969 static int ci_find_boot_level(struct ci_single_dpm_table *table,
2970 u32 value, u32 *boot_level)
2971 {
2972 u32 i;
2973 int ret = -EINVAL;
2974
2975 for(i = 0; i < table->count; i++) {
2976 if (value == table->dpm_levels[i].value) {
2977 *boot_level = i;
2978 ret = 0;
2979 }
2980 }
2981
2982 return ret;
2983 }
2984
2985 static int ci_init_smc_table(struct radeon_device *rdev)
2986 {
2987 struct ci_power_info *pi = ci_get_pi(rdev);
2988 struct ci_ulv_parm *ulv = &pi->ulv;
2989 struct radeon_ps *radeon_boot_state = rdev->pm.dpm.boot_ps;
2990 SMU7_Discrete_DpmTable *table = &pi->smc_state_table;
2991 int ret;
2992
2993 ret = ci_setup_default_dpm_tables(rdev);
2994 if (ret)
2995 return ret;
2996
2997 if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE)
2998 ci_populate_smc_voltage_tables(rdev, table);
2999
3000 ci_init_fps_limits(rdev);
3001
3002 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_HARDWAREDC)
3003 table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
3004
3005 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC)
3006 table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
3007
3008 if (pi->mem_gddr5)
3009 table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
3010
3011 if (ulv->supported) {
3012 ret = ci_populate_ulv_state(rdev, &pi->smc_state_table.Ulv);
3013 if (ret)
3014 return ret;
3015 WREG32_SMC(CG_ULV_PARAMETER, ulv->cg_ulv_parameter);
3016 }
3017
3018 ret = ci_populate_all_graphic_levels(rdev);
3019 if (ret)
3020 return ret;
3021
3022 ret = ci_populate_all_memory_levels(rdev);
3023 if (ret)
3024 return ret;
3025
3026 ci_populate_smc_link_level(rdev, table);
3027
3028 ret = ci_populate_smc_acpi_level(rdev, table);
3029 if (ret)
3030 return ret;
3031
3032 ret = ci_populate_smc_vce_level(rdev, table);
3033 if (ret)
3034 return ret;
3035
3036 ret = ci_populate_smc_acp_level(rdev, table);
3037 if (ret)
3038 return ret;
3039
3040 ret = ci_populate_smc_samu_level(rdev, table);
3041 if (ret)
3042 return ret;
3043
3044 ret = ci_do_program_memory_timing_parameters(rdev);
3045 if (ret)
3046 return ret;
3047
3048 ret = ci_populate_smc_uvd_level(rdev, table);
3049 if (ret)
3050 return ret;
3051
3052 table->UvdBootLevel = 0;
3053 table->VceBootLevel = 0;
3054 table->AcpBootLevel = 0;
3055 table->SamuBootLevel = 0;
3056 table->GraphicsBootLevel = 0;
3057 table->MemoryBootLevel = 0;
3058
3059 ret = ci_find_boot_level(&pi->dpm_table.sclk_table,
3060 pi->vbios_boot_state.sclk_bootup_value,
3061 (u32 *)&pi->smc_state_table.GraphicsBootLevel);
3062
3063 ret = ci_find_boot_level(&pi->dpm_table.mclk_table,
3064 pi->vbios_boot_state.mclk_bootup_value,
3065 (u32 *)&pi->smc_state_table.MemoryBootLevel);
3066
3067 table->BootVddc = pi->vbios_boot_state.vddc_bootup_value;
3068 table->BootVddci = pi->vbios_boot_state.vddci_bootup_value;
3069 table->BootMVdd = pi->vbios_boot_state.mvdd_bootup_value;
3070
3071 ci_populate_smc_initial_state(rdev, radeon_boot_state);
3072
3073 ret = ci_populate_bapm_parameters_in_dpm_table(rdev);
3074 if (ret)
3075 return ret;
3076
3077 table->UVDInterval = 1;
3078 table->VCEInterval = 1;
3079 table->ACPInterval = 1;
3080 table->SAMUInterval = 1;
3081 table->GraphicsVoltageChangeEnable = 1;
3082 table->GraphicsThermThrottleEnable = 1;
3083 table->GraphicsInterval = 1;
3084 table->VoltageInterval = 1;
3085 table->ThermalInterval = 1;
3086 table->TemperatureLimitHigh = (u16)((pi->thermal_temp_setting.temperature_high *
3087 CISLANDS_Q88_FORMAT_CONVERSION_UNIT) / 1000);
3088 table->TemperatureLimitLow = (u16)((pi->thermal_temp_setting.temperature_low *
3089 CISLANDS_Q88_FORMAT_CONVERSION_UNIT) / 1000);
3090 table->MemoryVoltageChangeEnable = 1;
3091 table->MemoryInterval = 1;
3092 table->VoltageResponseTime = 0;
3093 table->VddcVddciDelta = 4000;
3094 table->PhaseResponseTime = 0;
3095 table->MemoryThermThrottleEnable = 1;
3096 table->PCIeBootLinkLevel = 0;
3097 table->PCIeGenInterval = 1;
3098 if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2)
3099 table->SVI2Enable = 1;
3100 else
3101 table->SVI2Enable = 0;
3102
3103 table->ThermGpio = 17;
3104 table->SclkStepSize = 0x4000;
3105
3106 table->SystemFlags = cpu_to_be32(table->SystemFlags);
3107 table->SmioMaskVddcVid = cpu_to_be32(table->SmioMaskVddcVid);
3108 table->SmioMaskVddcPhase = cpu_to_be32(table->SmioMaskVddcPhase);
3109 table->SmioMaskVddciVid = cpu_to_be32(table->SmioMaskVddciVid);
3110 table->SmioMaskMvddVid = cpu_to_be32(table->SmioMaskMvddVid);
3111 table->SclkStepSize = cpu_to_be32(table->SclkStepSize);
3112 table->TemperatureLimitHigh = cpu_to_be16(table->TemperatureLimitHigh);
3113 table->TemperatureLimitLow = cpu_to_be16(table->TemperatureLimitLow);
3114 table->VddcVddciDelta = cpu_to_be16(table->VddcVddciDelta);
3115 table->VoltageResponseTime = cpu_to_be16(table->VoltageResponseTime);
3116 table->PhaseResponseTime = cpu_to_be16(table->PhaseResponseTime);
3117 table->BootVddc = cpu_to_be16(table->BootVddc * VOLTAGE_SCALE);
3118 table->BootVddci = cpu_to_be16(table->BootVddci * VOLTAGE_SCALE);
3119 table->BootMVdd = cpu_to_be16(table->BootMVdd * VOLTAGE_SCALE);
3120
3121 ret = ci_copy_bytes_to_smc(rdev,
3122 pi->dpm_table_start +
3123 offsetof(SMU7_Discrete_DpmTable, SystemFlags),
3124 (u8 *)&table->SystemFlags,
3125 sizeof(SMU7_Discrete_DpmTable) - 3 * sizeof(SMU7_PIDController),
3126 pi->sram_end);
3127 if (ret)
3128 return ret;
3129
3130 return 0;
3131 }
3132
3133 static void ci_trim_single_dpm_states(struct radeon_device *rdev,
3134 struct ci_single_dpm_table *dpm_table,
3135 u32 low_limit, u32 high_limit)
3136 {
3137 u32 i;
3138
3139 for (i = 0; i < dpm_table->count; i++) {
3140 if ((dpm_table->dpm_levels[i].value < low_limit) ||
3141 (dpm_table->dpm_levels[i].value > high_limit))
3142 dpm_table->dpm_levels[i].enabled = false;
3143 else
3144 dpm_table->dpm_levels[i].enabled = true;
3145 }
3146 }
3147
3148 static void ci_trim_pcie_dpm_states(struct radeon_device *rdev,
3149 u32 speed_low, u32 lanes_low,
3150 u32 speed_high, u32 lanes_high)
3151 {
3152 struct ci_power_info *pi = ci_get_pi(rdev);
3153 struct ci_single_dpm_table *pcie_table = &pi->dpm_table.pcie_speed_table;
3154 u32 i, j;
3155
3156 for (i = 0; i < pcie_table->count; i++) {
3157 if ((pcie_table->dpm_levels[i].value < speed_low) ||
3158 (pcie_table->dpm_levels[i].param1 < lanes_low) ||
3159 (pcie_table->dpm_levels[i].value > speed_high) ||
3160 (pcie_table->dpm_levels[i].param1 > lanes_high))
3161 pcie_table->dpm_levels[i].enabled = false;
3162 else
3163 pcie_table->dpm_levels[i].enabled = true;
3164 }
3165
3166 for (i = 0; i < pcie_table->count; i++) {
3167 if (pcie_table->dpm_levels[i].enabled) {
3168 for (j = i + 1; j < pcie_table->count; j++) {
3169 if (pcie_table->dpm_levels[j].enabled) {
3170 if ((pcie_table->dpm_levels[i].value == pcie_table->dpm_levels[j].value) &&
3171 (pcie_table->dpm_levels[i].param1 == pcie_table->dpm_levels[j].param1))
3172 pcie_table->dpm_levels[j].enabled = false;
3173 }
3174 }
3175 }
3176 }
3177 }
3178
3179 static int ci_trim_dpm_states(struct radeon_device *rdev,
3180 struct radeon_ps *radeon_state)
3181 {
3182 struct ci_ps *state = ci_get_ps(radeon_state);
3183 struct ci_power_info *pi = ci_get_pi(rdev);
3184 u32 high_limit_count;
3185
3186 if (state->performance_level_count < 1)
3187 return -EINVAL;
3188
3189 if (state->performance_level_count == 1)
3190 high_limit_count = 0;
3191 else
3192 high_limit_count = 1;
3193
3194 ci_trim_single_dpm_states(rdev,
3195 &pi->dpm_table.sclk_table,
3196 state->performance_levels[0].sclk,
3197 state->performance_levels[high_limit_count].sclk);
3198
3199 ci_trim_single_dpm_states(rdev,
3200 &pi->dpm_table.mclk_table,
3201 state->performance_levels[0].mclk,
3202 state->performance_levels[high_limit_count].mclk);
3203
3204 ci_trim_pcie_dpm_states(rdev,
3205 state->performance_levels[0].pcie_gen,
3206 state->performance_levels[0].pcie_lane,
3207 state->performance_levels[high_limit_count].pcie_gen,
3208 state->performance_levels[high_limit_count].pcie_lane);
3209
3210 return 0;
3211 }
3212
3213 static int ci_apply_disp_minimum_voltage_request(struct radeon_device *rdev)
3214 {
3215 struct radeon_clock_voltage_dependency_table *disp_voltage_table =
3216 &rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk;
3217 struct radeon_clock_voltage_dependency_table *vddc_table =
3218 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
3219 u32 requested_voltage = 0;
3220 u32 i;
3221
3222 if (disp_voltage_table == NULL)
3223 return -EINVAL;
3224 if (!disp_voltage_table->count)
3225 return -EINVAL;
3226
3227 for (i = 0; i < disp_voltage_table->count; i++) {
3228 if (rdev->clock.current_dispclk == disp_voltage_table->entries[i].clk)
3229 requested_voltage = disp_voltage_table->entries[i].v;
3230 }
3231
3232 for (i = 0; i < vddc_table->count; i++) {
3233 if (requested_voltage <= vddc_table->entries[i].v) {
3234 requested_voltage = vddc_table->entries[i].v;
3235 return (ci_send_msg_to_smc_with_parameter(rdev,
3236 PPSMC_MSG_VddC_Request,
3237 requested_voltage * VOLTAGE_SCALE) == PPSMC_Result_OK) ?
3238 0 : -EINVAL;
3239 }
3240 }
3241
3242 return -EINVAL;
3243 }
3244
3245 static int ci_upload_dpm_level_enable_mask(struct radeon_device *rdev)
3246 {
3247 struct ci_power_info *pi = ci_get_pi(rdev);
3248 PPSMC_Result result;
3249
3250 if (!pi->sclk_dpm_key_disabled) {
3251 if (pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
3252 result = ci_send_msg_to_smc_with_parameter(rdev,
3253 PPSMC_MSG_SCLKDPM_SetEnabledMask,
3254 pi->dpm_level_enable_mask.sclk_dpm_enable_mask);
3255 if (result != PPSMC_Result_OK)
3256 return -EINVAL;
3257 }
3258 }
3259
3260 if (!pi->mclk_dpm_key_disabled) {
3261 if (pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
3262 result = ci_send_msg_to_smc_with_parameter(rdev,
3263 PPSMC_MSG_MCLKDPM_SetEnabledMask,
3264 pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
3265 if (result != PPSMC_Result_OK)
3266 return -EINVAL;
3267 }
3268 }
3269
3270 if (!pi->pcie_dpm_key_disabled) {
3271 if (pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
3272 result = ci_send_msg_to_smc_with_parameter(rdev,
3273 PPSMC_MSG_PCIeDPM_SetEnabledMask,
3274 pi->dpm_level_enable_mask.pcie_dpm_enable_mask);
3275 if (result != PPSMC_Result_OK)
3276 return -EINVAL;
3277 }
3278 }
3279
3280 ci_apply_disp_minimum_voltage_request(rdev);
3281
3282 return 0;
3283 }
3284
3285 static void ci_find_dpm_states_clocks_in_dpm_table(struct radeon_device *rdev,
3286 struct radeon_ps *radeon_state)
3287 {
3288 struct ci_power_info *pi = ci_get_pi(rdev);
3289 struct ci_ps *state = ci_get_ps(radeon_state);
3290 struct ci_single_dpm_table *sclk_table = &pi->dpm_table.sclk_table;
3291 u32 sclk = state->performance_levels[state->performance_level_count-1].sclk;
3292 struct ci_single_dpm_table *mclk_table = &pi->dpm_table.mclk_table;
3293 u32 mclk = state->performance_levels[state->performance_level_count-1].mclk;
3294 u32 i;
3295
3296 pi->need_update_smu7_dpm_table = 0;
3297
3298 for (i = 0; i < sclk_table->count; i++) {
3299 if (sclk == sclk_table->dpm_levels[i].value)
3300 break;
3301 }
3302
3303 if (i >= sclk_table->count) {
3304 pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
3305 } else {
3306 /* XXX check display min clock requirements */
3307 if (0 != CISLAND_MINIMUM_ENGINE_CLOCK)
3308 pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK;
3309 }
3310
3311 for (i = 0; i < mclk_table->count; i++) {
3312 if (mclk == mclk_table->dpm_levels[i].value)
3313 break;
3314 }
3315
3316 if (i >= mclk_table->count)
3317 pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
3318
3319 if (rdev->pm.dpm.current_active_crtc_count !=
3320 rdev->pm.dpm.new_active_crtc_count)
3321 pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK;
3322 }
3323
3324 static int ci_populate_and_upload_sclk_mclk_dpm_levels(struct radeon_device *rdev,
3325 struct radeon_ps *radeon_state)
3326 {
3327 struct ci_power_info *pi = ci_get_pi(rdev);
3328 struct ci_ps *state = ci_get_ps(radeon_state);
3329 u32 sclk = state->performance_levels[state->performance_level_count-1].sclk;
3330 u32 mclk = state->performance_levels[state->performance_level_count-1].mclk;
3331 struct ci_dpm_table *dpm_table = &pi->dpm_table;
3332 int ret;
3333
3334 if (!pi->need_update_smu7_dpm_table)
3335 return 0;
3336
3337 if (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK)
3338 dpm_table->sclk_table.dpm_levels[dpm_table->sclk_table.count-1].value = sclk;
3339
3340 if (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)
3341 dpm_table->mclk_table.dpm_levels[dpm_table->mclk_table.count-1].value = mclk;
3342
3343 if (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK)) {
3344 ret = ci_populate_all_graphic_levels(rdev);
3345 if (ret)
3346 return ret;
3347 }
3348
3349 if (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_MCLK | DPMTABLE_UPDATE_MCLK)) {
3350 ret = ci_populate_all_memory_levels(rdev);
3351 if (ret)
3352 return ret;
3353 }
3354
3355 return 0;
3356 }
3357
3358 static int ci_enable_uvd_dpm(struct radeon_device *rdev, bool enable)
3359 {
3360 struct ci_power_info *pi = ci_get_pi(rdev);
3361 const struct radeon_clock_and_voltage_limits *max_limits;
3362 int i;
3363
3364 if (rdev->pm.dpm.ac_power)
3365 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
3366 else
3367 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
3368
3369 if (enable) {
3370 pi->dpm_level_enable_mask.uvd_dpm_enable_mask = 0;
3371
3372 for (i = rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
3373 if (rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
3374 pi->dpm_level_enable_mask.uvd_dpm_enable_mask |= 1 << i;
3375
3376 if (!pi->caps_uvd_dpm)
3377 break;
3378 }
3379 }
3380
3381 ci_send_msg_to_smc_with_parameter(rdev,
3382 PPSMC_MSG_UVDDPM_SetEnabledMask,
3383 pi->dpm_level_enable_mask.uvd_dpm_enable_mask);
3384
3385 if (pi->last_mclk_dpm_enable_mask & 0x1) {
3386 pi->uvd_enabled = true;
3387 pi->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE;
3388 ci_send_msg_to_smc_with_parameter(rdev,
3389 PPSMC_MSG_MCLKDPM_SetEnabledMask,
3390 pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
3391 }
3392 } else {
3393 if (pi->last_mclk_dpm_enable_mask & 0x1) {
3394 pi->uvd_enabled = false;
3395 pi->dpm_level_enable_mask.mclk_dpm_enable_mask |= 1;
3396 ci_send_msg_to_smc_with_parameter(rdev,
3397 PPSMC_MSG_MCLKDPM_SetEnabledMask,
3398 pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
3399 }
3400 }
3401
3402 return (ci_send_msg_to_smc(rdev, enable ?
3403 PPSMC_MSG_UVDDPM_Enable : PPSMC_MSG_UVDDPM_Disable) == PPSMC_Result_OK) ?
3404 0 : -EINVAL;
3405 }
3406
3407 #if 0
3408 static int ci_enable_vce_dpm(struct radeon_device *rdev, bool enable)
3409 {
3410 struct ci_power_info *pi = ci_get_pi(rdev);
3411 const struct radeon_clock_and_voltage_limits *max_limits;
3412 int i;
3413
3414 if (rdev->pm.dpm.ac_power)
3415 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
3416 else
3417 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
3418
3419 if (enable) {
3420 pi->dpm_level_enable_mask.vce_dpm_enable_mask = 0;
3421 for (i = rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
3422 if (rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
3423 pi->dpm_level_enable_mask.vce_dpm_enable_mask |= 1 << i;
3424
3425 if (!pi->caps_vce_dpm)
3426 break;
3427 }
3428 }
3429
3430 ci_send_msg_to_smc_with_parameter(rdev,
3431 PPSMC_MSG_VCEDPM_SetEnabledMask,
3432 pi->dpm_level_enable_mask.vce_dpm_enable_mask);
3433 }
3434
3435 return (ci_send_msg_to_smc(rdev, enable ?
3436 PPSMC_MSG_VCEDPM_Enable : PPSMC_MSG_VCEDPM_Disable) == PPSMC_Result_OK) ?
3437 0 : -EINVAL;
3438 }
3439
3440 static int ci_enable_samu_dpm(struct radeon_device *rdev, bool enable)
3441 {
3442 struct ci_power_info *pi = ci_get_pi(rdev);
3443 const struct radeon_clock_and_voltage_limits *max_limits;
3444 int i;
3445
3446 if (rdev->pm.dpm.ac_power)
3447 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
3448 else
3449 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
3450
3451 if (enable) {
3452 pi->dpm_level_enable_mask.samu_dpm_enable_mask = 0;
3453 for (i = rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
3454 if (rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
3455 pi->dpm_level_enable_mask.samu_dpm_enable_mask |= 1 << i;
3456
3457 if (!pi->caps_samu_dpm)
3458 break;
3459 }
3460 }
3461
3462 ci_send_msg_to_smc_with_parameter(rdev,
3463 PPSMC_MSG_SAMUDPM_SetEnabledMask,
3464 pi->dpm_level_enable_mask.samu_dpm_enable_mask);
3465 }
3466 return (ci_send_msg_to_smc(rdev, enable ?
3467 PPSMC_MSG_SAMUDPM_Enable : PPSMC_MSG_SAMUDPM_Disable) == PPSMC_Result_OK) ?
3468 0 : -EINVAL;
3469 }
3470
3471 static int ci_enable_acp_dpm(struct radeon_device *rdev, bool enable)
3472 {
3473 struct ci_power_info *pi = ci_get_pi(rdev);
3474 const struct radeon_clock_and_voltage_limits *max_limits;
3475 int i;
3476
3477 if (rdev->pm.dpm.ac_power)
3478 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
3479 else
3480 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
3481
3482 if (enable) {
3483 pi->dpm_level_enable_mask.acp_dpm_enable_mask = 0;
3484 for (i = rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
3485 if (rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
3486 pi->dpm_level_enable_mask.acp_dpm_enable_mask |= 1 << i;
3487
3488 if (!pi->caps_acp_dpm)
3489 break;
3490 }
3491 }
3492
3493 ci_send_msg_to_smc_with_parameter(rdev,
3494 PPSMC_MSG_ACPDPM_SetEnabledMask,
3495 pi->dpm_level_enable_mask.acp_dpm_enable_mask);
3496 }
3497
3498 return (ci_send_msg_to_smc(rdev, enable ?
3499 PPSMC_MSG_ACPDPM_Enable : PPSMC_MSG_ACPDPM_Disable) == PPSMC_Result_OK) ?
3500 0 : -EINVAL;
3501 }
3502 #endif
3503
3504 static int ci_update_uvd_dpm(struct radeon_device *rdev, bool gate)
3505 {
3506 struct ci_power_info *pi = ci_get_pi(rdev);
3507 u32 tmp;
3508
3509 if (!gate) {
3510 if (pi->caps_uvd_dpm ||
3511 (rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count <= 0))
3512 pi->smc_state_table.UvdBootLevel = 0;
3513 else
3514 pi->smc_state_table.UvdBootLevel =
3515 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count - 1;
3516
3517 tmp = RREG32_SMC(DPM_TABLE_475);
3518 tmp &= ~UvdBootLevel_MASK;
3519 tmp |= UvdBootLevel(pi->smc_state_table.UvdBootLevel);
3520 WREG32_SMC(DPM_TABLE_475, tmp);
3521 }
3522
3523 return ci_enable_uvd_dpm(rdev, !gate);
3524 }
3525
3526 #if 0
3527 static u8 ci_get_vce_boot_level(struct radeon_device *rdev)
3528 {
3529 u8 i;
3530 u32 min_evclk = 30000; /* ??? */
3531 struct radeon_vce_clock_voltage_dependency_table *table =
3532 &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
3533
3534 for (i = 0; i < table->count; i++) {
3535 if (table->entries[i].evclk >= min_evclk)
3536 return i;
3537 }
3538
3539 return table->count - 1;
3540 }
3541
3542 static int ci_update_vce_dpm(struct radeon_device *rdev,
3543 struct radeon_ps *radeon_new_state,
3544 struct radeon_ps *radeon_current_state)
3545 {
3546 struct ci_power_info *pi = ci_get_pi(rdev);
3547 bool new_vce_clock_non_zero = (radeon_new_state->evclk != 0);
3548 bool old_vce_clock_non_zero = (radeon_current_state->evclk != 0);
3549 int ret = 0;
3550 u32 tmp;
3551
3552 if (new_vce_clock_non_zero != old_vce_clock_non_zero) {
3553 if (new_vce_clock_non_zero) {
3554 pi->smc_state_table.VceBootLevel = ci_get_vce_boot_level(rdev);
3555
3556 tmp = RREG32_SMC(DPM_TABLE_475);
3557 tmp &= ~VceBootLevel_MASK;
3558 tmp |= VceBootLevel(pi->smc_state_table.VceBootLevel);
3559 WREG32_SMC(DPM_TABLE_475, tmp);
3560
3561 ret = ci_enable_vce_dpm(rdev, true);
3562 } else {
3563 ret = ci_enable_vce_dpm(rdev, false);
3564 }
3565 }
3566 return ret;
3567 }
3568
3569 static int ci_update_samu_dpm(struct radeon_device *rdev, bool gate)
3570 {
3571 return ci_enable_samu_dpm(rdev, gate);
3572 }
3573
3574 static int ci_update_acp_dpm(struct radeon_device *rdev, bool gate)
3575 {
3576 struct ci_power_info *pi = ci_get_pi(rdev);
3577 u32 tmp;
3578
3579 if (!gate) {
3580 pi->smc_state_table.AcpBootLevel = 0;
3581
3582 tmp = RREG32_SMC(DPM_TABLE_475);
3583 tmp &= ~AcpBootLevel_MASK;
3584 tmp |= AcpBootLevel(pi->smc_state_table.AcpBootLevel);
3585 WREG32_SMC(DPM_TABLE_475, tmp);
3586 }
3587
3588 return ci_enable_acp_dpm(rdev, !gate);
3589 }
3590 #endif
3591
3592 static int ci_generate_dpm_level_enable_mask(struct radeon_device *rdev,
3593 struct radeon_ps *radeon_state)
3594 {
3595 struct ci_power_info *pi = ci_get_pi(rdev);
3596 int ret;
3597
3598 ret = ci_trim_dpm_states(rdev, radeon_state);
3599 if (ret)
3600 return ret;
3601
3602 pi->dpm_level_enable_mask.sclk_dpm_enable_mask =
3603 ci_get_dpm_level_enable_mask_value(&pi->dpm_table.sclk_table);
3604 pi->dpm_level_enable_mask.mclk_dpm_enable_mask =
3605 ci_get_dpm_level_enable_mask_value(&pi->dpm_table.mclk_table);
3606 pi->last_mclk_dpm_enable_mask =
3607 pi->dpm_level_enable_mask.mclk_dpm_enable_mask;
3608 if (pi->uvd_enabled) {
3609 if (pi->dpm_level_enable_mask.mclk_dpm_enable_mask & 1)
3610 pi->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE;
3611 }
3612 pi->dpm_level_enable_mask.pcie_dpm_enable_mask =
3613 ci_get_dpm_level_enable_mask_value(&pi->dpm_table.pcie_speed_table);
3614
3615 return 0;
3616 }
3617
3618 static u32 ci_get_lowest_enabled_level(struct radeon_device *rdev,
3619 u32 level_mask)
3620 {
3621 u32 level = 0;
3622
3623 while ((level_mask & (1 << level)) == 0)
3624 level++;
3625
3626 return level;
3627 }
3628
3629
3630 int ci_dpm_force_performance_level(struct radeon_device *rdev,
3631 enum radeon_dpm_forced_level level)
3632 {
3633 struct ci_power_info *pi = ci_get_pi(rdev);
3634 PPSMC_Result smc_result;
3635 u32 tmp, levels, i;
3636 int ret;
3637
3638 if (level == RADEON_DPM_FORCED_LEVEL_HIGH) {
3639 if ((!pi->sclk_dpm_key_disabled) &&
3640 pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
3641 levels = 0;
3642 tmp = pi->dpm_level_enable_mask.sclk_dpm_enable_mask;
3643 while (tmp >>= 1)
3644 levels++;
3645 if (levels) {
3646 ret = ci_dpm_force_state_sclk(rdev, levels);
3647 if (ret)
3648 return ret;
3649 for (i = 0; i < rdev->usec_timeout; i++) {
3650 tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
3651 CURR_SCLK_INDEX_MASK) >> CURR_SCLK_INDEX_SHIFT;
3652 if (tmp == levels)
3653 break;
3654 udelay(1);
3655 }
3656 }
3657 }
3658 if ((!pi->mclk_dpm_key_disabled) &&
3659 pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
3660 levels = 0;
3661 tmp = pi->dpm_level_enable_mask.mclk_dpm_enable_mask;
3662 while (tmp >>= 1)
3663 levels++;
3664 if (levels) {
3665 ret = ci_dpm_force_state_mclk(rdev, levels);
3666 if (ret)
3667 return ret;
3668 for (i = 0; i < rdev->usec_timeout; i++) {
3669 tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
3670 CURR_MCLK_INDEX_MASK) >> CURR_MCLK_INDEX_SHIFT;
3671 if (tmp == levels)
3672 break;
3673 udelay(1);
3674 }
3675 }
3676 }
3677 if ((!pi->pcie_dpm_key_disabled) &&
3678 pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
3679 levels = 0;
3680 tmp = pi->dpm_level_enable_mask.pcie_dpm_enable_mask;
3681 while (tmp >>= 1)
3682 levels++;
3683 if (levels) {
3684 ret = ci_dpm_force_state_pcie(rdev, level);
3685 if (ret)
3686 return ret;
3687 for (i = 0; i < rdev->usec_timeout; i++) {
3688 tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX_1) &
3689 CURR_PCIE_INDEX_MASK) >> CURR_PCIE_INDEX_SHIFT;
3690 if (tmp == levels)
3691 break;
3692 udelay(1);
3693 }
3694 }
3695 }
3696 } else if (level == RADEON_DPM_FORCED_LEVEL_LOW) {
3697 if ((!pi->sclk_dpm_key_disabled) &&
3698 pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
3699 levels = ci_get_lowest_enabled_level(rdev,
3700 pi->dpm_level_enable_mask.sclk_dpm_enable_mask);
3701 ret = ci_dpm_force_state_sclk(rdev, levels);
3702 if (ret)
3703 return ret;
3704 for (i = 0; i < rdev->usec_timeout; i++) {
3705 tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
3706 CURR_SCLK_INDEX_MASK) >> CURR_SCLK_INDEX_SHIFT;
3707 if (tmp == levels)
3708 break;
3709 udelay(1);
3710 }
3711 }
3712 if ((!pi->mclk_dpm_key_disabled) &&
3713 pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
3714 levels = ci_get_lowest_enabled_level(rdev,
3715 pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
3716 ret = ci_dpm_force_state_mclk(rdev, levels);
3717 if (ret)
3718 return ret;
3719 for (i = 0; i < rdev->usec_timeout; i++) {
3720 tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
3721 CURR_MCLK_INDEX_MASK) >> CURR_MCLK_INDEX_SHIFT;
3722 if (tmp == levels)
3723 break;
3724 udelay(1);
3725 }
3726 }
3727 if ((!pi->pcie_dpm_key_disabled) &&
3728 pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
3729 levels = ci_get_lowest_enabled_level(rdev,
3730 pi->dpm_level_enable_mask.pcie_dpm_enable_mask);
3731 ret = ci_dpm_force_state_pcie(rdev, levels);
3732 if (ret)
3733 return ret;
3734 for (i = 0; i < rdev->usec_timeout; i++) {
3735 tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX_1) &
3736 CURR_PCIE_INDEX_MASK) >> CURR_PCIE_INDEX_SHIFT;
3737 if (tmp == levels)
3738 break;
3739 udelay(1);
3740 }
3741 }
3742 } else if (level == RADEON_DPM_FORCED_LEVEL_AUTO) {
3743 if (!pi->sclk_dpm_key_disabled) {
3744 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_NoForcedLevel);
3745 if (smc_result != PPSMC_Result_OK)
3746 return -EINVAL;
3747 }
3748 if (!pi->mclk_dpm_key_disabled) {
3749 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_NoForcedLevel);
3750 if (smc_result != PPSMC_Result_OK)
3751 return -EINVAL;
3752 }
3753 if (!pi->pcie_dpm_key_disabled) {
3754 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PCIeDPM_UnForceLevel);
3755 if (smc_result != PPSMC_Result_OK)
3756 return -EINVAL;
3757 }
3758 }
3759
3760 rdev->pm.dpm.forced_level = level;
3761
3762 return 0;
3763 }
3764
3765 static int ci_set_mc_special_registers(struct radeon_device *rdev,
3766 struct ci_mc_reg_table *table)
3767 {
3768 struct ci_power_info *pi = ci_get_pi(rdev);
3769 u8 i, j, k;
3770 u32 temp_reg;
3771
3772 for (i = 0, j = table->last; i < table->last; i++) {
3773 if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
3774 return -EINVAL;
3775 switch(table->mc_reg_address[i].s1 << 2) {
3776 case MC_SEQ_MISC1:
3777 temp_reg = RREG32(MC_PMG_CMD_EMRS);
3778 table->mc_reg_address[j].s1 = MC_PMG_CMD_EMRS >> 2;
3779 table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_EMRS_LP >> 2;
3780 for (k = 0; k < table->num_entries; k++) {
3781 table->mc_reg_table_entry[k].mc_data[j] =
3782 ((temp_reg & 0xffff0000)) | ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
3783 }
3784 j++;
3785 if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
3786 return -EINVAL;
3787
3788 temp_reg = RREG32(MC_PMG_CMD_MRS);
3789 table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS >> 2;
3790 table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS_LP >> 2;
3791 for (k = 0; k < table->num_entries; k++) {
3792 table->mc_reg_table_entry[k].mc_data[j] =
3793 (temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
3794 if (!pi->mem_gddr5)
3795 table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
3796 }
3797 j++;
3798 if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
3799 return -EINVAL;
3800
3801 if (!pi->mem_gddr5) {
3802 table->mc_reg_address[j].s1 = MC_PMG_AUTO_CMD >> 2;
3803 table->mc_reg_address[j].s0 = MC_PMG_AUTO_CMD >> 2;
3804 for (k = 0; k < table->num_entries; k++) {
3805 table->mc_reg_table_entry[k].mc_data[j] =
3806 (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
3807 }
3808 j++;
3809 if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
3810 return -EINVAL;
3811 }
3812 break;
3813 case MC_SEQ_RESERVE_M:
3814 temp_reg = RREG32(MC_PMG_CMD_MRS1);
3815 table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS1 >> 2;
3816 table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS1_LP >> 2;
3817 for (k = 0; k < table->num_entries; k++) {
3818 table->mc_reg_table_entry[k].mc_data[j] =
3819 (temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
3820 }
3821 j++;
3822 if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
3823 return -EINVAL;
3824 break;
3825 default:
3826 break;
3827 }
3828
3829 }
3830
3831 table->last = j;
3832
3833 return 0;
3834 }
3835
3836 static bool ci_check_s0_mc_reg_index(u16 in_reg, u16 *out_reg)
3837 {
3838 bool result = true;
3839
3840 switch(in_reg) {
3841 case MC_SEQ_RAS_TIMING >> 2:
3842 *out_reg = MC_SEQ_RAS_TIMING_LP >> 2;
3843 break;
3844 case MC_SEQ_DLL_STBY >> 2:
3845 *out_reg = MC_SEQ_DLL_STBY_LP >> 2;
3846 break;
3847 case MC_SEQ_G5PDX_CMD0 >> 2:
3848 *out_reg = MC_SEQ_G5PDX_CMD0_LP >> 2;
3849 break;
3850 case MC_SEQ_G5PDX_CMD1 >> 2:
3851 *out_reg = MC_SEQ_G5PDX_CMD1_LP >> 2;
3852 break;
3853 case MC_SEQ_G5PDX_CTRL >> 2:
3854 *out_reg = MC_SEQ_G5PDX_CTRL_LP >> 2;
3855 break;
3856 case MC_SEQ_CAS_TIMING >> 2:
3857 *out_reg = MC_SEQ_CAS_TIMING_LP >> 2;
3858 break;
3859 case MC_SEQ_MISC_TIMING >> 2:
3860 *out_reg = MC_SEQ_MISC_TIMING_LP >> 2;
3861 break;
3862 case MC_SEQ_MISC_TIMING2 >> 2:
3863 *out_reg = MC_SEQ_MISC_TIMING2_LP >> 2;
3864 break;
3865 case MC_SEQ_PMG_DVS_CMD >> 2:
3866 *out_reg = MC_SEQ_PMG_DVS_CMD_LP >> 2;
3867 break;
3868 case MC_SEQ_PMG_DVS_CTL >> 2:
3869 *out_reg = MC_SEQ_PMG_DVS_CTL_LP >> 2;
3870 break;
3871 case MC_SEQ_RD_CTL_D0 >> 2:
3872 *out_reg = MC_SEQ_RD_CTL_D0_LP >> 2;
3873 break;
3874 case MC_SEQ_RD_CTL_D1 >> 2:
3875 *out_reg = MC_SEQ_RD_CTL_D1_LP >> 2;
3876 break;
3877 case MC_SEQ_WR_CTL_D0 >> 2:
3878 *out_reg = MC_SEQ_WR_CTL_D0_LP >> 2;
3879 break;
3880 case MC_SEQ_WR_CTL_D1 >> 2:
3881 *out_reg = MC_SEQ_WR_CTL_D1_LP >> 2;
3882 break;
3883 case MC_PMG_CMD_EMRS >> 2:
3884 *out_reg = MC_SEQ_PMG_CMD_EMRS_LP >> 2;
3885 break;
3886 case MC_PMG_CMD_MRS >> 2:
3887 *out_reg = MC_SEQ_PMG_CMD_MRS_LP >> 2;
3888 break;
3889 case MC_PMG_CMD_MRS1 >> 2:
3890 *out_reg = MC_SEQ_PMG_CMD_MRS1_LP >> 2;
3891 break;
3892 case MC_SEQ_PMG_TIMING >> 2:
3893 *out_reg = MC_SEQ_PMG_TIMING_LP >> 2;
3894 break;
3895 case MC_PMG_CMD_MRS2 >> 2:
3896 *out_reg = MC_SEQ_PMG_CMD_MRS2_LP >> 2;
3897 break;
3898 case MC_SEQ_WR_CTL_2 >> 2:
3899 *out_reg = MC_SEQ_WR_CTL_2_LP >> 2;
3900 break;
3901 default:
3902 result = false;
3903 break;
3904 }
3905
3906 return result;
3907 }
3908
3909 static void ci_set_valid_flag(struct ci_mc_reg_table *table)
3910 {
3911 u8 i, j;
3912
3913 for (i = 0; i < table->last; i++) {
3914 for (j = 1; j < table->num_entries; j++) {
3915 if (table->mc_reg_table_entry[j-1].mc_data[i] !=
3916 table->mc_reg_table_entry[j].mc_data[i]) {
3917 table->valid_flag |= 1 << i;
3918 break;
3919 }
3920 }
3921 }
3922 }
3923
3924 static void ci_set_s0_mc_reg_index(struct ci_mc_reg_table *table)
3925 {
3926 u32 i;
3927 u16 address;
3928
3929 for (i = 0; i < table->last; i++) {
3930 table->mc_reg_address[i].s0 =
3931 ci_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address) ?
3932 address : table->mc_reg_address[i].s1;
3933 }
3934 }
3935
3936 static int ci_copy_vbios_mc_reg_table(const struct atom_mc_reg_table *table,
3937 struct ci_mc_reg_table *ci_table)
3938 {
3939 u8 i, j;
3940
3941 if (table->last > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
3942 return -EINVAL;
3943 if (table->num_entries > MAX_AC_TIMING_ENTRIES)
3944 return -EINVAL;
3945
3946 for (i = 0; i < table->last; i++)
3947 ci_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
3948
3949 ci_table->last = table->last;
3950
3951 for (i = 0; i < table->num_entries; i++) {
3952 ci_table->mc_reg_table_entry[i].mclk_max =
3953 table->mc_reg_table_entry[i].mclk_max;
3954 for (j = 0; j < table->last; j++)
3955 ci_table->mc_reg_table_entry[i].mc_data[j] =
3956 table->mc_reg_table_entry[i].mc_data[j];
3957 }
3958 ci_table->num_entries = table->num_entries;
3959
3960 return 0;
3961 }
3962
3963 static int ci_initialize_mc_reg_table(struct radeon_device *rdev)
3964 {
3965 struct ci_power_info *pi = ci_get_pi(rdev);
3966 struct atom_mc_reg_table *table;
3967 struct ci_mc_reg_table *ci_table = &pi->mc_reg_table;
3968 u8 module_index = rv770_get_memory_module_index(rdev);
3969 int ret;
3970
3971 table = kzalloc(sizeof(struct atom_mc_reg_table), GFP_KERNEL);
3972 if (!table)
3973 return -ENOMEM;
3974
3975 WREG32(MC_SEQ_RAS_TIMING_LP, RREG32(MC_SEQ_RAS_TIMING));
3976 WREG32(MC_SEQ_CAS_TIMING_LP, RREG32(MC_SEQ_CAS_TIMING));
3977 WREG32(MC_SEQ_DLL_STBY_LP, RREG32(MC_SEQ_DLL_STBY));
3978 WREG32(MC_SEQ_G5PDX_CMD0_LP, RREG32(MC_SEQ_G5PDX_CMD0));
3979 WREG32(MC_SEQ_G5PDX_CMD1_LP, RREG32(MC_SEQ_G5PDX_CMD1));
3980 WREG32(MC_SEQ_G5PDX_CTRL_LP, RREG32(MC_SEQ_G5PDX_CTRL));
3981 WREG32(MC_SEQ_PMG_DVS_CMD_LP, RREG32(MC_SEQ_PMG_DVS_CMD));
3982 WREG32(MC_SEQ_PMG_DVS_CTL_LP, RREG32(MC_SEQ_PMG_DVS_CTL));
3983 WREG32(MC_SEQ_MISC_TIMING_LP, RREG32(MC_SEQ_MISC_TIMING));
3984 WREG32(MC_SEQ_MISC_TIMING2_LP, RREG32(MC_SEQ_MISC_TIMING2));
3985 WREG32(MC_SEQ_PMG_CMD_EMRS_LP, RREG32(MC_PMG_CMD_EMRS));
3986 WREG32(MC_SEQ_PMG_CMD_MRS_LP, RREG32(MC_PMG_CMD_MRS));
3987 WREG32(MC_SEQ_PMG_CMD_MRS1_LP, RREG32(MC_PMG_CMD_MRS1));
3988 WREG32(MC_SEQ_WR_CTL_D0_LP, RREG32(MC_SEQ_WR_CTL_D0));
3989 WREG32(MC_SEQ_WR_CTL_D1_LP, RREG32(MC_SEQ_WR_CTL_D1));
3990 WREG32(MC_SEQ_RD_CTL_D0_LP, RREG32(MC_SEQ_RD_CTL_D0));
3991 WREG32(MC_SEQ_RD_CTL_D1_LP, RREG32(MC_SEQ_RD_CTL_D1));
3992 WREG32(MC_SEQ_PMG_TIMING_LP, RREG32(MC_SEQ_PMG_TIMING));
3993 WREG32(MC_SEQ_PMG_CMD_MRS2_LP, RREG32(MC_PMG_CMD_MRS2));
3994 WREG32(MC_SEQ_WR_CTL_2_LP, RREG32(MC_SEQ_WR_CTL_2));
3995
3996 ret = radeon_atom_init_mc_reg_table(rdev, module_index, table);
3997 if (ret)
3998 goto init_mc_done;
3999
4000 ret = ci_copy_vbios_mc_reg_table(table, ci_table);
4001 if (ret)
4002 goto init_mc_done;
4003
4004 ci_set_s0_mc_reg_index(ci_table);
4005
4006 ret = ci_set_mc_special_registers(rdev, ci_table);
4007 if (ret)
4008 goto init_mc_done;
4009
4010 ci_set_valid_flag(ci_table);
4011
4012 init_mc_done:
4013 kfree(table);
4014
4015 return ret;
4016 }
4017
4018 static int ci_populate_mc_reg_addresses(struct radeon_device *rdev,
4019 SMU7_Discrete_MCRegisters *mc_reg_table)
4020 {
4021 struct ci_power_info *pi = ci_get_pi(rdev);
4022 u32 i, j;
4023
4024 for (i = 0, j = 0; j < pi->mc_reg_table.last; j++) {
4025 if (pi->mc_reg_table.valid_flag & (1 << j)) {
4026 if (i >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4027 return -EINVAL;
4028 mc_reg_table->address[i].s0 = cpu_to_be16(pi->mc_reg_table.mc_reg_address[j].s0);
4029 mc_reg_table->address[i].s1 = cpu_to_be16(pi->mc_reg_table.mc_reg_address[j].s1);
4030 i++;
4031 }
4032 }
4033
4034 mc_reg_table->last = (u8)i;
4035
4036 return 0;
4037 }
4038
4039 static void ci_convert_mc_registers(const struct ci_mc_reg_entry *entry,
4040 SMU7_Discrete_MCRegisterSet *data,
4041 u32 num_entries, u32 valid_flag)
4042 {
4043 u32 i, j;
4044
4045 for (i = 0, j = 0; j < num_entries; j++) {
4046 if (valid_flag & (1 << j)) {
4047 data->value[i] = cpu_to_be32(entry->mc_data[j]);
4048 i++;
4049 }
4050 }
4051 }
4052
4053 static void ci_convert_mc_reg_table_entry_to_smc(struct radeon_device *rdev,
4054 const u32 memory_clock,
4055 SMU7_Discrete_MCRegisterSet *mc_reg_table_data)
4056 {
4057 struct ci_power_info *pi = ci_get_pi(rdev);
4058 u32 i = 0;
4059
4060 for(i = 0; i < pi->mc_reg_table.num_entries; i++) {
4061 if (memory_clock <= pi->mc_reg_table.mc_reg_table_entry[i].mclk_max)
4062 break;
4063 }
4064
4065 if ((i == pi->mc_reg_table.num_entries) && (i > 0))
4066 --i;
4067
4068 ci_convert_mc_registers(&pi->mc_reg_table.mc_reg_table_entry[i],
4069 mc_reg_table_data, pi->mc_reg_table.last,
4070 pi->mc_reg_table.valid_flag);
4071 }
4072
4073 static void ci_convert_mc_reg_table_to_smc(struct radeon_device *rdev,
4074 SMU7_Discrete_MCRegisters *mc_reg_table)
4075 {
4076 struct ci_power_info *pi = ci_get_pi(rdev);
4077 u32 i;
4078
4079 for (i = 0; i < pi->dpm_table.mclk_table.count; i++)
4080 ci_convert_mc_reg_table_entry_to_smc(rdev,
4081 pi->dpm_table.mclk_table.dpm_levels[i].value,
4082 &mc_reg_table->data[i]);
4083 }
4084
4085 static int ci_populate_initial_mc_reg_table(struct radeon_device *rdev)
4086 {
4087 struct ci_power_info *pi = ci_get_pi(rdev);
4088 int ret;
4089
4090 memset(&pi->smc_mc_reg_table, 0, sizeof(SMU7_Discrete_MCRegisters));
4091
4092 ret = ci_populate_mc_reg_addresses(rdev, &pi->smc_mc_reg_table);
4093 if (ret)
4094 return ret;
4095 ci_convert_mc_reg_table_to_smc(rdev, &pi->smc_mc_reg_table);
4096
4097 return ci_copy_bytes_to_smc(rdev,
4098 pi->mc_reg_table_start,
4099 (u8 *)&pi->smc_mc_reg_table,
4100 sizeof(SMU7_Discrete_MCRegisters),
4101 pi->sram_end);
4102 }
4103
4104 static int ci_update_and_upload_mc_reg_table(struct radeon_device *rdev)
4105 {
4106 struct ci_power_info *pi = ci_get_pi(rdev);
4107
4108 if (!(pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK))
4109 return 0;
4110
4111 memset(&pi->smc_mc_reg_table, 0, sizeof(SMU7_Discrete_MCRegisters));
4112
4113 ci_convert_mc_reg_table_to_smc(rdev, &pi->smc_mc_reg_table);
4114
4115 return ci_copy_bytes_to_smc(rdev,
4116 pi->mc_reg_table_start +
4117 offsetof(SMU7_Discrete_MCRegisters, data[0]),
4118 (u8 *)&pi->smc_mc_reg_table.data[0],
4119 sizeof(SMU7_Discrete_MCRegisterSet) *
4120 pi->dpm_table.mclk_table.count,
4121 pi->sram_end);
4122 }
4123
4124 static void ci_enable_voltage_control(struct radeon_device *rdev)
4125 {
4126 u32 tmp = RREG32_SMC(GENERAL_PWRMGT);
4127
4128 tmp |= VOLT_PWRMGT_EN;
4129 WREG32_SMC(GENERAL_PWRMGT, tmp);
4130 }
4131
4132 static enum radeon_pcie_gen ci_get_maximum_link_speed(struct radeon_device *rdev,
4133 struct radeon_ps *radeon_state)
4134 {
4135 struct ci_ps *state = ci_get_ps(radeon_state);
4136 int i;
4137 u16 pcie_speed, max_speed = 0;
4138
4139 for (i = 0; i < state->performance_level_count; i++) {
4140 pcie_speed = state->performance_levels[i].pcie_gen;
4141 if (max_speed < pcie_speed)
4142 max_speed = pcie_speed;
4143 }
4144
4145 return max_speed;
4146 }
4147
4148 static u16 ci_get_current_pcie_speed(struct radeon_device *rdev)
4149 {
4150 u32 speed_cntl = 0;
4151
4152 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL) & LC_CURRENT_DATA_RATE_MASK;
4153 speed_cntl >>= LC_CURRENT_DATA_RATE_SHIFT;
4154
4155 return (u16)speed_cntl;
4156 }
4157
4158 static int ci_get_current_pcie_lane_number(struct radeon_device *rdev)
4159 {
4160 u32 link_width = 0;
4161
4162 link_width = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL) & LC_LINK_WIDTH_RD_MASK;
4163 link_width >>= LC_LINK_WIDTH_RD_SHIFT;
4164
4165 switch (link_width) {
4166 case RADEON_PCIE_LC_LINK_WIDTH_X1:
4167 return 1;
4168 case RADEON_PCIE_LC_LINK_WIDTH_X2:
4169 return 2;
4170 case RADEON_PCIE_LC_LINK_WIDTH_X4:
4171 return 4;
4172 case RADEON_PCIE_LC_LINK_WIDTH_X8:
4173 return 8;
4174 case RADEON_PCIE_LC_LINK_WIDTH_X12:
4175 /* not actually supported */
4176 return 12;
4177 case RADEON_PCIE_LC_LINK_WIDTH_X0:
4178 case RADEON_PCIE_LC_LINK_WIDTH_X16:
4179 default:
4180 return 16;
4181 }
4182 }
4183
4184 static void ci_request_link_speed_change_before_state_change(struct radeon_device *rdev,
4185 struct radeon_ps *radeon_new_state,
4186 struct radeon_ps *radeon_current_state)
4187 {
4188 struct ci_power_info *pi = ci_get_pi(rdev);
4189 enum radeon_pcie_gen target_link_speed =
4190 ci_get_maximum_link_speed(rdev, radeon_new_state);
4191 enum radeon_pcie_gen current_link_speed;
4192
4193 if (pi->force_pcie_gen == RADEON_PCIE_GEN_INVALID)
4194 current_link_speed = ci_get_maximum_link_speed(rdev, radeon_current_state);
4195 else
4196 current_link_speed = pi->force_pcie_gen;
4197
4198 pi->force_pcie_gen = RADEON_PCIE_GEN_INVALID;
4199 pi->pspp_notify_required = false;
4200 if (target_link_speed > current_link_speed) {
4201 switch (target_link_speed) {
4202 case RADEON_PCIE_GEN3:
4203 if (radeon_acpi_pcie_performance_request(rdev, PCIE_PERF_REQ_PECI_GEN3, false) == 0)
4204 break;
4205 pi->force_pcie_gen = RADEON_PCIE_GEN2;
4206 if (current_link_speed == RADEON_PCIE_GEN2)
4207 break;
4208 case RADEON_PCIE_GEN2:
4209 if (radeon_acpi_pcie_performance_request(rdev, PCIE_PERF_REQ_PECI_GEN2, false) == 0)
4210 break;
4211 default:
4212 pi->force_pcie_gen = ci_get_current_pcie_speed(rdev);
4213 break;
4214 }
4215 } else {
4216 if (target_link_speed < current_link_speed)
4217 pi->pspp_notify_required = true;
4218 }
4219 }
4220
4221 static void ci_notify_link_speed_change_after_state_change(struct radeon_device *rdev,
4222 struct radeon_ps *radeon_new_state,
4223 struct radeon_ps *radeon_current_state)
4224 {
4225 struct ci_power_info *pi = ci_get_pi(rdev);
4226 enum radeon_pcie_gen target_link_speed =
4227 ci_get_maximum_link_speed(rdev, radeon_new_state);
4228 u8 request;
4229
4230 if (pi->pspp_notify_required) {
4231 if (target_link_speed == RADEON_PCIE_GEN3)
4232 request = PCIE_PERF_REQ_PECI_GEN3;
4233 else if (target_link_speed == RADEON_PCIE_GEN2)
4234 request = PCIE_PERF_REQ_PECI_GEN2;
4235 else
4236 request = PCIE_PERF_REQ_PECI_GEN1;
4237
4238 if ((request == PCIE_PERF_REQ_PECI_GEN1) &&
4239 (ci_get_current_pcie_speed(rdev) > 0))
4240 return;
4241
4242 radeon_acpi_pcie_performance_request(rdev, request, false);
4243 }
4244 }
4245
4246 static int ci_set_private_data_variables_based_on_pptable(struct radeon_device *rdev)
4247 {
4248 struct ci_power_info *pi = ci_get_pi(rdev);
4249 struct radeon_clock_voltage_dependency_table *allowed_sclk_vddc_table =
4250 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
4251 struct radeon_clock_voltage_dependency_table *allowed_mclk_vddc_table =
4252 &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk;
4253 struct radeon_clock_voltage_dependency_table *allowed_mclk_vddci_table =
4254 &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk;
4255
4256 if (allowed_sclk_vddc_table == NULL)
4257 return -EINVAL;
4258 if (allowed_sclk_vddc_table->count < 1)
4259 return -EINVAL;
4260 if (allowed_mclk_vddc_table == NULL)
4261 return -EINVAL;
4262 if (allowed_mclk_vddc_table->count < 1)
4263 return -EINVAL;
4264 if (allowed_mclk_vddci_table == NULL)
4265 return -EINVAL;
4266 if (allowed_mclk_vddci_table->count < 1)
4267 return -EINVAL;
4268
4269 pi->min_vddc_in_pp_table = allowed_sclk_vddc_table->entries[0].v;
4270 pi->max_vddc_in_pp_table =
4271 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
4272
4273 pi->min_vddci_in_pp_table = allowed_mclk_vddci_table->entries[0].v;
4274 pi->max_vddci_in_pp_table =
4275 allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
4276
4277 rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk =
4278 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
4279 rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.mclk =
4280 allowed_mclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
4281 rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddc =
4282 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
4283 rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddci =
4284 allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
4285
4286 return 0;
4287 }
4288
4289 static void ci_patch_with_vddc_leakage(struct radeon_device *rdev, u16 *vddc)
4290 {
4291 struct ci_power_info *pi = ci_get_pi(rdev);
4292 struct ci_leakage_voltage *leakage_table = &pi->vddc_leakage;
4293 u32 leakage_index;
4294
4295 for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) {
4296 if (leakage_table->leakage_id[leakage_index] == *vddc) {
4297 *vddc = leakage_table->actual_voltage[leakage_index];
4298 break;
4299 }
4300 }
4301 }
4302
4303 static void ci_patch_with_vddci_leakage(struct radeon_device *rdev, u16 *vddci)
4304 {
4305 struct ci_power_info *pi = ci_get_pi(rdev);
4306 struct ci_leakage_voltage *leakage_table = &pi->vddci_leakage;
4307 u32 leakage_index;
4308
4309 for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) {
4310 if (leakage_table->leakage_id[leakage_index] == *vddci) {
4311 *vddci = leakage_table->actual_voltage[leakage_index];
4312 break;
4313 }
4314 }
4315 }
4316
4317 static void ci_patch_clock_voltage_dependency_table_with_vddc_leakage(struct radeon_device *rdev,
4318 struct radeon_clock_voltage_dependency_table *table)
4319 {
4320 u32 i;
4321
4322 if (table) {
4323 for (i = 0; i < table->count; i++)
4324 ci_patch_with_vddc_leakage(rdev, &table->entries[i].v);
4325 }
4326 }
4327
4328 static void ci_patch_clock_voltage_dependency_table_with_vddci_leakage(struct radeon_device *rdev,
4329 struct radeon_clock_voltage_dependency_table *table)
4330 {
4331 u32 i;
4332
4333 if (table) {
4334 for (i = 0; i < table->count; i++)
4335 ci_patch_with_vddci_leakage(rdev, &table->entries[i].v);
4336 }
4337 }
4338
4339 static void ci_patch_vce_clock_voltage_dependency_table_with_vddc_leakage(struct radeon_device *rdev,
4340 struct radeon_vce_clock_voltage_dependency_table *table)
4341 {
4342 u32 i;
4343
4344 if (table) {
4345 for (i = 0; i < table->count; i++)
4346 ci_patch_with_vddc_leakage(rdev, &table->entries[i].v);
4347 }
4348 }
4349
4350 static void ci_patch_uvd_clock_voltage_dependency_table_with_vddc_leakage(struct radeon_device *rdev,
4351 struct radeon_uvd_clock_voltage_dependency_table *table)
4352 {
4353 u32 i;
4354
4355 if (table) {
4356 for (i = 0; i < table->count; i++)
4357 ci_patch_with_vddc_leakage(rdev, &table->entries[i].v);
4358 }
4359 }
4360
4361 static void ci_patch_vddc_phase_shed_limit_table_with_vddc_leakage(struct radeon_device *rdev,
4362 struct radeon_phase_shedding_limits_table *table)
4363 {
4364 u32 i;
4365
4366 if (table) {
4367 for (i = 0; i < table->count; i++)
4368 ci_patch_with_vddc_leakage(rdev, &table->entries[i].voltage);
4369 }
4370 }
4371
4372 static void ci_patch_clock_voltage_limits_with_vddc_leakage(struct radeon_device *rdev,
4373 struct radeon_clock_and_voltage_limits *table)
4374 {
4375 if (table) {
4376 ci_patch_with_vddc_leakage(rdev, (u16 *)&table->vddc);
4377 ci_patch_with_vddci_leakage(rdev, (u16 *)&table->vddci);
4378 }
4379 }
4380
4381 static void ci_patch_cac_leakage_table_with_vddc_leakage(struct radeon_device *rdev,
4382 struct radeon_cac_leakage_table *table)
4383 {
4384 u32 i;
4385
4386 if (table) {
4387 for (i = 0; i < table->count; i++)
4388 ci_patch_with_vddc_leakage(rdev, &table->entries[i].vddc);
4389 }
4390 }
4391
4392 static void ci_patch_dependency_tables_with_leakage(struct radeon_device *rdev)
4393 {
4394
4395 ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
4396 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk);
4397 ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
4398 &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk);
4399 ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
4400 &rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk);
4401 ci_patch_clock_voltage_dependency_table_with_vddci_leakage(rdev,
4402 &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk);
4403 ci_patch_vce_clock_voltage_dependency_table_with_vddc_leakage(rdev,
4404 &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table);
4405 ci_patch_uvd_clock_voltage_dependency_table_with_vddc_leakage(rdev,
4406 &rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table);
4407 ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
4408 &rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table);
4409 ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
4410 &rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table);
4411 ci_patch_vddc_phase_shed_limit_table_with_vddc_leakage(rdev,
4412 &rdev->pm.dpm.dyn_state.phase_shedding_limits_table);
4413 ci_patch_clock_voltage_limits_with_vddc_leakage(rdev,
4414 &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac);
4415 ci_patch_clock_voltage_limits_with_vddc_leakage(rdev,
4416 &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc);
4417 ci_patch_cac_leakage_table_with_vddc_leakage(rdev,
4418 &rdev->pm.dpm.dyn_state.cac_leakage_table);
4419
4420 }
4421
4422 static void ci_get_memory_type(struct radeon_device *rdev)
4423 {
4424 struct ci_power_info *pi = ci_get_pi(rdev);
4425 u32 tmp;
4426
4427 tmp = RREG32(MC_SEQ_MISC0);
4428
4429 if (((tmp & MC_SEQ_MISC0_GDDR5_MASK) >> MC_SEQ_MISC0_GDDR5_SHIFT) ==
4430 MC_SEQ_MISC0_GDDR5_VALUE)
4431 pi->mem_gddr5 = true;
4432 else
4433 pi->mem_gddr5 = false;
4434
4435 }
4436
4437 void ci_update_current_ps(struct radeon_device *rdev,
4438 struct radeon_ps *rps)
4439 {
4440 struct ci_ps *new_ps = ci_get_ps(rps);
4441 struct ci_power_info *pi = ci_get_pi(rdev);
4442
4443 pi->current_rps = *rps;
4444 pi->current_ps = *new_ps;
4445 pi->current_rps.ps_priv = &pi->current_ps;
4446 }
4447
4448 void ci_update_requested_ps(struct radeon_device *rdev,
4449 struct radeon_ps *rps)
4450 {
4451 struct ci_ps *new_ps = ci_get_ps(rps);
4452 struct ci_power_info *pi = ci_get_pi(rdev);
4453
4454 pi->requested_rps = *rps;
4455 pi->requested_ps = *new_ps;
4456 pi->requested_rps.ps_priv = &pi->requested_ps;
4457 }
4458
4459 int ci_dpm_pre_set_power_state(struct radeon_device *rdev)
4460 {
4461 struct ci_power_info *pi = ci_get_pi(rdev);
4462 struct radeon_ps requested_ps = *rdev->pm.dpm.requested_ps;
4463 struct radeon_ps *new_ps = &requested_ps;
4464
4465 ci_update_requested_ps(rdev, new_ps);
4466
4467 ci_apply_state_adjust_rules(rdev, &pi->requested_rps);
4468
4469 return 0;
4470 }
4471
4472 void ci_dpm_post_set_power_state(struct radeon_device *rdev)
4473 {
4474 struct ci_power_info *pi = ci_get_pi(rdev);
4475 struct radeon_ps *new_ps = &pi->requested_rps;
4476
4477 ci_update_current_ps(rdev, new_ps);
4478 }
4479
4480
4481 void ci_dpm_setup_asic(struct radeon_device *rdev)
4482 {
4483 ci_read_clock_registers(rdev);
4484 ci_get_memory_type(rdev);
4485 ci_enable_acpi_power_management(rdev);
4486 ci_init_sclk_t(rdev);
4487 }
4488
4489 int ci_dpm_enable(struct radeon_device *rdev)
4490 {
4491 struct ci_power_info *pi = ci_get_pi(rdev);
4492 struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
4493 int ret;
4494
4495 if (ci_is_smc_running(rdev))
4496 return -EINVAL;
4497 if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
4498 ci_enable_voltage_control(rdev);
4499 ret = ci_construct_voltage_tables(rdev);
4500 if (ret) {
4501 DRM_ERROR("ci_construct_voltage_tables failed\n");
4502 return ret;
4503 }
4504 }
4505 if (pi->caps_dynamic_ac_timing) {
4506 ret = ci_initialize_mc_reg_table(rdev);
4507 if (ret)
4508 pi->caps_dynamic_ac_timing = false;
4509 }
4510 if (pi->dynamic_ss)
4511 ci_enable_spread_spectrum(rdev, true);
4512 if (pi->thermal_protection)
4513 ci_enable_thermal_protection(rdev, true);
4514 ci_program_sstp(rdev);
4515 ci_enable_display_gap(rdev);
4516 ci_program_vc(rdev);
4517 ret = ci_upload_firmware(rdev);
4518 if (ret) {
4519 DRM_ERROR("ci_upload_firmware failed\n");
4520 return ret;
4521 }
4522 ret = ci_process_firmware_header(rdev);
4523 if (ret) {
4524 DRM_ERROR("ci_process_firmware_header failed\n");
4525 return ret;
4526 }
4527 ret = ci_initial_switch_from_arb_f0_to_f1(rdev);
4528 if (ret) {
4529 DRM_ERROR("ci_initial_switch_from_arb_f0_to_f1 failed\n");
4530 return ret;
4531 }
4532 ret = ci_init_smc_table(rdev);
4533 if (ret) {
4534 DRM_ERROR("ci_init_smc_table failed\n");
4535 return ret;
4536 }
4537 ret = ci_init_arb_table_index(rdev);
4538 if (ret) {
4539 DRM_ERROR("ci_init_arb_table_index failed\n");
4540 return ret;
4541 }
4542 if (pi->caps_dynamic_ac_timing) {
4543 ret = ci_populate_initial_mc_reg_table(rdev);
4544 if (ret) {
4545 DRM_ERROR("ci_populate_initial_mc_reg_table failed\n");
4546 return ret;
4547 }
4548 }
4549 ret = ci_populate_pm_base(rdev);
4550 if (ret) {
4551 DRM_ERROR("ci_populate_pm_base failed\n");
4552 return ret;
4553 }
4554 ci_dpm_start_smc(rdev);
4555 ci_enable_vr_hot_gpio_interrupt(rdev);
4556 ret = ci_notify_smc_display_change(rdev, false);
4557 if (ret) {
4558 DRM_ERROR("ci_notify_smc_display_change failed\n");
4559 return ret;
4560 }
4561 ci_enable_sclk_control(rdev, true);
4562 ret = ci_enable_ulv(rdev, true);
4563 if (ret) {
4564 DRM_ERROR("ci_enable_ulv failed\n");
4565 return ret;
4566 }
4567 ret = ci_enable_ds_master_switch(rdev, true);
4568 if (ret) {
4569 DRM_ERROR("ci_enable_ds_master_switch failed\n");
4570 return ret;
4571 }
4572 ret = ci_start_dpm(rdev);
4573 if (ret) {
4574 DRM_ERROR("ci_start_dpm failed\n");
4575 return ret;
4576 }
4577 ret = ci_enable_didt(rdev, true);
4578 if (ret) {
4579 DRM_ERROR("ci_enable_didt failed\n");
4580 return ret;
4581 }
4582 ret = ci_enable_smc_cac(rdev, true);
4583 if (ret) {
4584 DRM_ERROR("ci_enable_smc_cac failed\n");
4585 return ret;
4586 }
4587 ret = ci_enable_power_containment(rdev, true);
4588 if (ret) {
4589 DRM_ERROR("ci_enable_power_containment failed\n");
4590 return ret;
4591 }
4592 if (rdev->irq.installed &&
4593 r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
4594 #if 0
4595 PPSMC_Result result;
4596 #endif
4597 ret = ci_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
4598 if (ret) {
4599 DRM_ERROR("ci_set_thermal_temperature_range failed\n");
4600 return ret;
4601 }
4602 rdev->irq.dpm_thermal = true;
4603 radeon_irq_set(rdev);
4604 #if 0
4605 result = ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableThermalInterrupt);
4606
4607 if (result != PPSMC_Result_OK)
4608 DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
4609 #endif
4610 }
4611
4612 ci_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
4613
4614 ci_update_current_ps(rdev, boot_ps);
4615
4616 return 0;
4617 }
4618
4619 void ci_dpm_disable(struct radeon_device *rdev)
4620 {
4621 struct ci_power_info *pi = ci_get_pi(rdev);
4622 struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
4623
4624 if (!ci_is_smc_running(rdev))
4625 return;
4626
4627 if (pi->thermal_protection)
4628 ci_enable_thermal_protection(rdev, false);
4629 ci_enable_power_containment(rdev, false);
4630 ci_enable_smc_cac(rdev, false);
4631 ci_enable_didt(rdev, false);
4632 ci_enable_spread_spectrum(rdev, false);
4633 ci_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, false);
4634 ci_stop_dpm(rdev);
4635 ci_enable_ds_master_switch(rdev, true);
4636 ci_enable_ulv(rdev, false);
4637 ci_clear_vc(rdev);
4638 ci_reset_to_default(rdev);
4639 ci_dpm_stop_smc(rdev);
4640 ci_force_switch_to_arb_f0(rdev);
4641
4642 ci_update_current_ps(rdev, boot_ps);
4643 }
4644
4645 int ci_dpm_set_power_state(struct radeon_device *rdev)
4646 {
4647 struct ci_power_info *pi = ci_get_pi(rdev);
4648 struct radeon_ps *new_ps = &pi->requested_rps;
4649 struct radeon_ps *old_ps = &pi->current_rps;
4650 int ret;
4651
4652 ci_find_dpm_states_clocks_in_dpm_table(rdev, new_ps);
4653 if (pi->pcie_performance_request)
4654 ci_request_link_speed_change_before_state_change(rdev, new_ps, old_ps);
4655 ret = ci_freeze_sclk_mclk_dpm(rdev);
4656 if (ret) {
4657 DRM_ERROR("ci_freeze_sclk_mclk_dpm failed\n");
4658 return ret;
4659 }
4660 ret = ci_populate_and_upload_sclk_mclk_dpm_levels(rdev, new_ps);
4661 if (ret) {
4662 DRM_ERROR("ci_populate_and_upload_sclk_mclk_dpm_levels failed\n");
4663 return ret;
4664 }
4665 ret = ci_generate_dpm_level_enable_mask(rdev, new_ps);
4666 if (ret) {
4667 DRM_ERROR("ci_generate_dpm_level_enable_mask failed\n");
4668 return ret;
4669 }
4670 #if 0
4671 ret = ci_update_vce_dpm(rdev, new_ps, old_ps);
4672 if (ret) {
4673 DRM_ERROR("ci_update_vce_dpm failed\n");
4674 return ret;
4675 }
4676 #endif
4677 ret = ci_update_uvd_dpm(rdev, false);
4678 if (ret) {
4679 DRM_ERROR("ci_update_uvd_dpm failed\n");
4680 return ret;
4681 }
4682 ret = ci_update_sclk_t(rdev);
4683 if (ret) {
4684 DRM_ERROR("ci_update_sclk_t failed\n");
4685 return ret;
4686 }
4687 if (pi->caps_dynamic_ac_timing) {
4688 ret = ci_update_and_upload_mc_reg_table(rdev);
4689 if (ret) {
4690 DRM_ERROR("ci_update_and_upload_mc_reg_table failed\n");
4691 return ret;
4692 }
4693 }
4694 ret = ci_program_memory_timing_parameters(rdev);
4695 if (ret) {
4696 DRM_ERROR("ci_program_memory_timing_parameters failed\n");
4697 return ret;
4698 }
4699 ret = ci_unfreeze_sclk_mclk_dpm(rdev);
4700 if (ret) {
4701 DRM_ERROR("ci_unfreeze_sclk_mclk_dpm failed\n");
4702 return ret;
4703 }
4704 ret = ci_upload_dpm_level_enable_mask(rdev);
4705 if (ret) {
4706 DRM_ERROR("ci_upload_dpm_level_enable_mask failed\n");
4707 return ret;
4708 }
4709 if (pi->pcie_performance_request)
4710 ci_notify_link_speed_change_after_state_change(rdev, new_ps, old_ps);
4711
4712 ret = ci_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_AUTO);
4713 if (ret) {
4714 DRM_ERROR("ci_dpm_force_performance_level failed\n");
4715 return ret;
4716 }
4717
4718 return 0;
4719 }
4720
4721 int ci_dpm_power_control_set_level(struct radeon_device *rdev)
4722 {
4723 return ci_power_control_set_level(rdev);
4724 }
4725
4726 void ci_dpm_reset_asic(struct radeon_device *rdev)
4727 {
4728 ci_set_boot_state(rdev);
4729 }
4730
4731 void ci_dpm_display_configuration_changed(struct radeon_device *rdev)
4732 {
4733 ci_program_display_gap(rdev);
4734 }
4735
4736 union power_info {
4737 struct _ATOM_POWERPLAY_INFO info;
4738 struct _ATOM_POWERPLAY_INFO_V2 info_2;
4739 struct _ATOM_POWERPLAY_INFO_V3 info_3;
4740 struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
4741 struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
4742 struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
4743 };
4744
4745 union pplib_clock_info {
4746 struct _ATOM_PPLIB_R600_CLOCK_INFO r600;
4747 struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780;
4748 struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
4749 struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
4750 struct _ATOM_PPLIB_SI_CLOCK_INFO si;
4751 struct _ATOM_PPLIB_CI_CLOCK_INFO ci;
4752 };
4753
4754 union pplib_power_state {
4755 struct _ATOM_PPLIB_STATE v1;
4756 struct _ATOM_PPLIB_STATE_V2 v2;
4757 };
4758
4759 static void ci_parse_pplib_non_clock_info(struct radeon_device *rdev,
4760 struct radeon_ps *rps,
4761 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info,
4762 u8 table_rev)
4763 {
4764 rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings);
4765 rps->class = le16_to_cpu(non_clock_info->usClassification);
4766 rps->class2 = le16_to_cpu(non_clock_info->usClassification2);
4767
4768 if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
4769 rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
4770 rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
4771 } else {
4772 rps->vclk = 0;
4773 rps->dclk = 0;
4774 }
4775
4776 if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT)
4777 rdev->pm.dpm.boot_ps = rps;
4778 if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
4779 rdev->pm.dpm.uvd_ps = rps;
4780 }
4781
4782 static void ci_parse_pplib_clock_info(struct radeon_device *rdev,
4783 struct radeon_ps *rps, int index,
4784 union pplib_clock_info *clock_info)
4785 {
4786 struct ci_power_info *pi = ci_get_pi(rdev);
4787 struct ci_ps *ps = ci_get_ps(rps);
4788 struct ci_pl *pl = &ps->performance_levels[index];
4789
4790 ps->performance_level_count = index + 1;
4791
4792 pl->sclk = le16_to_cpu(clock_info->ci.usEngineClockLow);
4793 pl->sclk |= clock_info->ci.ucEngineClockHigh << 16;
4794 pl->mclk = le16_to_cpu(clock_info->ci.usMemoryClockLow);
4795 pl->mclk |= clock_info->ci.ucMemoryClockHigh << 16;
4796
4797 pl->pcie_gen = r600_get_pcie_gen_support(rdev,
4798 pi->sys_pcie_mask,
4799 pi->vbios_boot_state.pcie_gen_bootup_value,
4800 clock_info->ci.ucPCIEGen);
4801 pl->pcie_lane = r600_get_pcie_lane_support(rdev,
4802 pi->vbios_boot_state.pcie_lane_bootup_value,
4803 le16_to_cpu(clock_info->ci.usPCIELane));
4804
4805 if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) {
4806 pi->acpi_pcie_gen = pl->pcie_gen;
4807 }
4808
4809 if (rps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) {
4810 pi->ulv.supported = true;
4811 pi->ulv.pl = *pl;
4812 pi->ulv.cg_ulv_parameter = CISLANDS_CGULVPARAMETER_DFLT;
4813 }
4814
4815 /* patch up boot state */
4816 if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) {
4817 pl->mclk = pi->vbios_boot_state.mclk_bootup_value;
4818 pl->sclk = pi->vbios_boot_state.sclk_bootup_value;
4819 pl->pcie_gen = pi->vbios_boot_state.pcie_gen_bootup_value;
4820 pl->pcie_lane = pi->vbios_boot_state.pcie_lane_bootup_value;
4821 }
4822
4823 switch (rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
4824 case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
4825 pi->use_pcie_powersaving_levels = true;
4826 if (pi->pcie_gen_powersaving.max < pl->pcie_gen)
4827 pi->pcie_gen_powersaving.max = pl->pcie_gen;
4828 if (pi->pcie_gen_powersaving.min > pl->pcie_gen)
4829 pi->pcie_gen_powersaving.min = pl->pcie_gen;
4830 if (pi->pcie_lane_powersaving.max < pl->pcie_lane)
4831 pi->pcie_lane_powersaving.max = pl->pcie_lane;
4832 if (pi->pcie_lane_powersaving.min > pl->pcie_lane)
4833 pi->pcie_lane_powersaving.min = pl->pcie_lane;
4834 break;
4835 case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
4836 pi->use_pcie_performance_levels = true;
4837 if (pi->pcie_gen_performance.max < pl->pcie_gen)
4838 pi->pcie_gen_performance.max = pl->pcie_gen;
4839 if (pi->pcie_gen_performance.min > pl->pcie_gen)
4840 pi->pcie_gen_performance.min = pl->pcie_gen;
4841 if (pi->pcie_lane_performance.max < pl->pcie_lane)
4842 pi->pcie_lane_performance.max = pl->pcie_lane;
4843 if (pi->pcie_lane_performance.min > pl->pcie_lane)
4844 pi->pcie_lane_performance.min = pl->pcie_lane;
4845 break;
4846 default:
4847 break;
4848 }
4849 }
4850
4851 static int ci_parse_power_table(struct radeon_device *rdev)
4852 {
4853 struct radeon_mode_info *mode_info = &rdev->mode_info;
4854 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
4855 union pplib_power_state *power_state;
4856 int i, j, k, non_clock_array_index, clock_array_index;
4857 union pplib_clock_info *clock_info;
4858 struct _StateArray *state_array;
4859 struct _ClockInfoArray *clock_info_array;
4860 struct _NonClockInfoArray *non_clock_info_array;
4861 union power_info *power_info;
4862 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
4863 u16 data_offset;
4864 u8 frev, crev;
4865 u8 *power_state_offset;
4866 struct ci_ps *ps;
4867
4868 if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
4869 &frev, &crev, &data_offset))
4870 return -EINVAL;
4871 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
4872
4873 state_array = (struct _StateArray *)
4874 (mode_info->atom_context->bios + data_offset +
4875 le16_to_cpu(power_info->pplib.usStateArrayOffset));
4876 clock_info_array = (struct _ClockInfoArray *)
4877 (mode_info->atom_context->bios + data_offset +
4878 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset));
4879 non_clock_info_array = (struct _NonClockInfoArray *)
4880 (mode_info->atom_context->bios + data_offset +
4881 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
4882
4883 rdev->pm.dpm.ps = kzalloc(sizeof(struct radeon_ps) *
4884 state_array->ucNumEntries, GFP_KERNEL);
4885 if (!rdev->pm.dpm.ps)
4886 return -ENOMEM;
4887 power_state_offset = (u8 *)state_array->states;
4888 rdev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps);
4889 rdev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
4890 rdev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
4891 for (i = 0; i < state_array->ucNumEntries; i++) {
4892 power_state = (union pplib_power_state *)power_state_offset;
4893 non_clock_array_index = power_state->v2.nonClockInfoIndex;
4894 non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
4895 &non_clock_info_array->nonClockInfo[non_clock_array_index];
4896 if (!rdev->pm.power_state[i].clock_info)
4897 return -EINVAL;
4898 ps = kzalloc(sizeof(struct ci_ps), GFP_KERNEL);
4899 if (ps == NULL) {
4900 kfree(rdev->pm.dpm.ps);
4901 return -ENOMEM;
4902 }
4903 rdev->pm.dpm.ps[i].ps_priv = ps;
4904 ci_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i],
4905 non_clock_info,
4906 non_clock_info_array->ucEntrySize);
4907 k = 0;
4908 for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
4909 clock_array_index = power_state->v2.clockInfoIndex[j];
4910 if (clock_array_index >= clock_info_array->ucNumEntries)
4911 continue;
4912 if (k >= CISLANDS_MAX_HARDWARE_POWERLEVELS)
4913 break;
4914 clock_info = (union pplib_clock_info *)
4915 &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize];
4916 ci_parse_pplib_clock_info(rdev,
4917 &rdev->pm.dpm.ps[i], k,
4918 clock_info);
4919 k++;
4920 }
4921 power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
4922 }
4923 rdev->pm.dpm.num_ps = state_array->ucNumEntries;
4924 return 0;
4925 }
4926
4927 int ci_get_vbios_boot_values(struct radeon_device *rdev,
4928 struct ci_vbios_boot_state *boot_state)
4929 {
4930 struct radeon_mode_info *mode_info = &rdev->mode_info;
4931 int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
4932 ATOM_FIRMWARE_INFO_V2_2 *firmware_info;
4933 u8 frev, crev;
4934 u16 data_offset;
4935
4936 if (atom_parse_data_header(mode_info->atom_context, index, NULL,
4937 &frev, &crev, &data_offset)) {
4938 firmware_info =
4939 (ATOM_FIRMWARE_INFO_V2_2 *)(mode_info->atom_context->bios +
4940 data_offset);
4941 boot_state->mvdd_bootup_value = le16_to_cpu(firmware_info->usBootUpMVDDCVoltage);
4942 boot_state->vddc_bootup_value = le16_to_cpu(firmware_info->usBootUpVDDCVoltage);
4943 boot_state->vddci_bootup_value = le16_to_cpu(firmware_info->usBootUpVDDCIVoltage);
4944 boot_state->pcie_gen_bootup_value = ci_get_current_pcie_speed(rdev);
4945 boot_state->pcie_lane_bootup_value = ci_get_current_pcie_lane_number(rdev);
4946 boot_state->sclk_bootup_value = le32_to_cpu(firmware_info->ulDefaultEngineClock);
4947 boot_state->mclk_bootup_value = le32_to_cpu(firmware_info->ulDefaultMemoryClock);
4948
4949 return 0;
4950 }
4951 return -EINVAL;
4952 }
4953
4954 void ci_dpm_fini(struct radeon_device *rdev)
4955 {
4956 int i;
4957
4958 for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
4959 kfree(rdev->pm.dpm.ps[i].ps_priv);
4960 }
4961 kfree(rdev->pm.dpm.ps);
4962 kfree(rdev->pm.dpm.priv);
4963 kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries);
4964 r600_free_extended_power_table(rdev);
4965 }
4966
4967 int ci_dpm_init(struct radeon_device *rdev)
4968 {
4969 int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
4970 u16 data_offset, size;
4971 u8 frev, crev;
4972 struct ci_power_info *pi;
4973 int ret;
4974 u32 mask;
4975
4976 pi = kzalloc(sizeof(struct ci_power_info), GFP_KERNEL);
4977 if (pi == NULL)
4978 return -ENOMEM;
4979 rdev->pm.dpm.priv = pi;
4980
4981 ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask);
4982 if (ret)
4983 pi->sys_pcie_mask = 0;
4984 else
4985 pi->sys_pcie_mask = mask;
4986 pi->force_pcie_gen = RADEON_PCIE_GEN_INVALID;
4987
4988 pi->pcie_gen_performance.max = RADEON_PCIE_GEN1;
4989 pi->pcie_gen_performance.min = RADEON_PCIE_GEN3;
4990 pi->pcie_gen_powersaving.max = RADEON_PCIE_GEN1;
4991 pi->pcie_gen_powersaving.min = RADEON_PCIE_GEN3;
4992
4993 pi->pcie_lane_performance.max = 0;
4994 pi->pcie_lane_performance.min = 16;
4995 pi->pcie_lane_powersaving.max = 0;
4996 pi->pcie_lane_powersaving.min = 16;
4997
4998 ret = ci_get_vbios_boot_values(rdev, &pi->vbios_boot_state);
4999 if (ret) {
5000 ci_dpm_fini(rdev);
5001 return ret;
5002 }
5003 ret = ci_parse_power_table(rdev);
5004 if (ret) {
5005 ci_dpm_fini(rdev);
5006 return ret;
5007 }
5008 ret = r600_parse_extended_power_table(rdev);
5009 if (ret) {
5010 ci_dpm_fini(rdev);
5011 return ret;
5012 }
5013
5014 pi->dll_default_on = false;
5015 pi->sram_end = SMC_RAM_END;
5016
5017 pi->activity_target[0] = CISLAND_TARGETACTIVITY_DFLT;
5018 pi->activity_target[1] = CISLAND_TARGETACTIVITY_DFLT;
5019 pi->activity_target[2] = CISLAND_TARGETACTIVITY_DFLT;
5020 pi->activity_target[3] = CISLAND_TARGETACTIVITY_DFLT;
5021 pi->activity_target[4] = CISLAND_TARGETACTIVITY_DFLT;
5022 pi->activity_target[5] = CISLAND_TARGETACTIVITY_DFLT;
5023 pi->activity_target[6] = CISLAND_TARGETACTIVITY_DFLT;
5024 pi->activity_target[7] = CISLAND_TARGETACTIVITY_DFLT;
5025
5026 pi->mclk_activity_target = CISLAND_MCLK_TARGETACTIVITY_DFLT;
5027
5028 pi->sclk_dpm_key_disabled = 0;
5029 pi->mclk_dpm_key_disabled = 0;
5030 pi->pcie_dpm_key_disabled = 0;
5031
5032 pi->caps_sclk_ds = true;
5033
5034 pi->mclk_strobe_mode_threshold = 40000;
5035 pi->mclk_stutter_mode_threshold = 40000;
5036 pi->mclk_edc_enable_threshold = 40000;
5037 pi->mclk_edc_wr_enable_threshold = 40000;
5038
5039 ci_initialize_powertune_defaults(rdev);
5040
5041 pi->caps_fps = false;
5042
5043 pi->caps_sclk_throttle_low_notification = false;
5044
5045 ci_get_leakage_voltages(rdev);
5046 ci_patch_dependency_tables_with_leakage(rdev);
5047 ci_set_private_data_variables_based_on_pptable(rdev);
5048
5049 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries =
5050 kzalloc(4 * sizeof(struct radeon_clock_voltage_dependency_entry), GFP_KERNEL);
5051 if (!rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) {
5052 ci_dpm_fini(rdev);
5053 return -ENOMEM;
5054 }
5055 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count = 4;
5056 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].clk = 0;
5057 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].v = 0;
5058 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].clk = 36000;
5059 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].v = 720;
5060 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].clk = 54000;
5061 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].v = 810;
5062 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].clk = 72000;
5063 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].v = 900;
5064
5065 rdev->pm.dpm.dyn_state.mclk_sclk_ratio = 4;
5066 rdev->pm.dpm.dyn_state.sclk_mclk_delta = 15000;
5067 rdev->pm.dpm.dyn_state.vddc_vddci_delta = 200;
5068
5069 rdev->pm.dpm.dyn_state.valid_sclk_values.count = 0;
5070 rdev->pm.dpm.dyn_state.valid_sclk_values.values = NULL;
5071 rdev->pm.dpm.dyn_state.valid_mclk_values.count = 0;
5072 rdev->pm.dpm.dyn_state.valid_mclk_values.values = NULL;
5073
5074 pi->thermal_temp_setting.temperature_low = 99500;
5075 pi->thermal_temp_setting.temperature_high = 100000;
5076 pi->thermal_temp_setting.temperature_shutdown = 104000;
5077
5078 pi->uvd_enabled = false;
5079
5080 pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_NONE;
5081 pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_NONE;
5082 pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_NONE;
5083 if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT))
5084 pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
5085 else if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2))
5086 pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
5087
5088 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL) {
5089 if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT))
5090 pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
5091 else if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2))
5092 pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
5093 else
5094 rdev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL;
5095 }
5096
5097 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_MVDDCONTROL) {
5098 if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT))
5099 pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
5100 else if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2))
5101 pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
5102 else
5103 rdev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_MVDDCONTROL;
5104 }
5105
5106 pi->vddc_phase_shed_control = true;
5107
5108 #if defined(CONFIG_ACPI)
5109 pi->pcie_performance_request =
5110 radeon_acpi_is_pcie_performance_request_supported(rdev);
5111 #else
5112 pi->pcie_performance_request = false;
5113 #endif
5114
5115 if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
5116 &frev, &crev, &data_offset)) {
5117 pi->caps_sclk_ss_support = true;
5118 pi->caps_mclk_ss_support = true;
5119 pi->dynamic_ss = true;
5120 } else {
5121 pi->caps_sclk_ss_support = false;
5122 pi->caps_mclk_ss_support = false;
5123 pi->dynamic_ss = true;
5124 }
5125
5126 if (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE)
5127 pi->thermal_protection = true;
5128 else
5129 pi->thermal_protection = false;
5130
5131 pi->caps_dynamic_ac_timing = true;
5132
5133 return 0;
5134 }
5135
5136 void ci_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
5137 struct seq_file *m)
5138 {
5139 u32 sclk = ci_get_average_sclk_freq(rdev);
5140 u32 mclk = ci_get_average_mclk_freq(rdev);
5141
5142 seq_printf(m, "power level avg sclk: %u mclk: %u\n",
5143 sclk, mclk);
5144 }
5145
5146 void ci_dpm_print_power_state(struct radeon_device *rdev,
5147 struct radeon_ps *rps)
5148 {
5149 struct ci_ps *ps = ci_get_ps(rps);
5150 struct ci_pl *pl;
5151 int i;
5152
5153 r600_dpm_print_class_info(rps->class, rps->class2);
5154 r600_dpm_print_cap_info(rps->caps);
5155 printk("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
5156 for (i = 0; i < ps->performance_level_count; i++) {
5157 pl = &ps->performance_levels[i];
5158 printk("\t\tpower level %d sclk: %u mclk: %u pcie gen: %u pcie lanes: %u\n",
5159 i, pl->sclk, pl->mclk, pl->pcie_gen + 1, pl->pcie_lane);
5160 }
5161 r600_dpm_print_ps_status(rdev, rps);
5162 }
5163
5164 u32 ci_dpm_get_sclk(struct radeon_device *rdev, bool low)
5165 {
5166 struct ci_power_info *pi = ci_get_pi(rdev);
5167 struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps);
5168
5169 if (low)
5170 return requested_state->performance_levels[0].sclk;
5171 else
5172 return requested_state->performance_levels[requested_state->performance_level_count - 1].sclk;
5173 }
5174
5175 u32 ci_dpm_get_mclk(struct radeon_device *rdev, bool low)
5176 {
5177 struct ci_power_info *pi = ci_get_pi(rdev);
5178 struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps);
5179
5180 if (low)
5181 return requested_state->performance_levels[0].mclk;
5182 else
5183 return requested_state->performance_levels[requested_state->performance_level_count - 1].mclk;
5184 }