2 * Copyright 2013 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 #include "amdgpu_pm.h"
27 #include "amdgpu_dpm.h"
28 #include "amdgpu_atombios.h"
33 #include "../include/pptable.h"
34 #include <linux/math64.h>
35 #include <linux/seq_file.h>
36 #include <linux/firmware.h>
38 #define MC_CG_ARB_FREQ_F0 0x0a
39 #define MC_CG_ARB_FREQ_F1 0x0b
40 #define MC_CG_ARB_FREQ_F2 0x0c
41 #define MC_CG_ARB_FREQ_F3 0x0d
43 #define SMC_RAM_END 0x20000
45 #define SCLK_MIN_DEEPSLEEP_FREQ 1350
48 /* sizeof(ATOM_PPLIB_EXTENDEDHEADER) */
49 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2 12
50 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3 14
51 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4 16
52 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5 18
53 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6 20
54 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7 22
56 #define BIOS_SCRATCH_4 0x5cd
58 MODULE_FIRMWARE("radeon/tahiti_smc.bin");
59 MODULE_FIRMWARE("radeon/pitcairn_smc.bin");
60 MODULE_FIRMWARE("radeon/pitcairn_k_smc.bin");
61 MODULE_FIRMWARE("radeon/verde_smc.bin");
62 MODULE_FIRMWARE("radeon/verde_k_smc.bin");
63 MODULE_FIRMWARE("radeon/oland_smc.bin");
64 MODULE_FIRMWARE("radeon/oland_k_smc.bin");
65 MODULE_FIRMWARE("radeon/hainan_smc.bin");
66 MODULE_FIRMWARE("radeon/hainan_k_smc.bin");
67 MODULE_FIRMWARE("radeon/banks_k_2_smc.bin");
70 struct _ATOM_POWERPLAY_INFO info
;
71 struct _ATOM_POWERPLAY_INFO_V2 info_2
;
72 struct _ATOM_POWERPLAY_INFO_V3 info_3
;
73 struct _ATOM_PPLIB_POWERPLAYTABLE pplib
;
74 struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2
;
75 struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3
;
76 struct _ATOM_PPLIB_POWERPLAYTABLE4 pplib4
;
77 struct _ATOM_PPLIB_POWERPLAYTABLE5 pplib5
;
81 struct _ATOM_PPLIB_FANTABLE fan
;
82 struct _ATOM_PPLIB_FANTABLE2 fan2
;
83 struct _ATOM_PPLIB_FANTABLE3 fan3
;
86 union pplib_clock_info
{
87 struct _ATOM_PPLIB_R600_CLOCK_INFO r600
;
88 struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780
;
89 struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen
;
90 struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo
;
91 struct _ATOM_PPLIB_SI_CLOCK_INFO si
;
94 static const u32 r600_utc
[R600_PM_NUMBER_OF_TC
] =
113 static const u32 r600_dtc
[R600_PM_NUMBER_OF_TC
] =
132 static const struct si_cac_config_reg cac_weights_tahiti
[] =
134 { 0x0, 0x0000ffff, 0, 0xc, SISLANDS_CACCONFIG_CGIND
},
135 { 0x0, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
136 { 0x1, 0x0000ffff, 0, 0x101, SISLANDS_CACCONFIG_CGIND
},
137 { 0x1, 0xffff0000, 16, 0xc, SISLANDS_CACCONFIG_CGIND
},
138 { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
139 { 0x3, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
140 { 0x3, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
141 { 0x4, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
142 { 0x4, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
143 { 0x5, 0x0000ffff, 0, 0x8fc, SISLANDS_CACCONFIG_CGIND
},
144 { 0x5, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
145 { 0x6, 0x0000ffff, 0, 0x95, SISLANDS_CACCONFIG_CGIND
},
146 { 0x6, 0xffff0000, 16, 0x34e, SISLANDS_CACCONFIG_CGIND
},
147 { 0x18f, 0x0000ffff, 0, 0x1a1, SISLANDS_CACCONFIG_CGIND
},
148 { 0x7, 0x0000ffff, 0, 0xda, SISLANDS_CACCONFIG_CGIND
},
149 { 0x7, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
150 { 0x8, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
151 { 0x8, 0xffff0000, 16, 0x46, SISLANDS_CACCONFIG_CGIND
},
152 { 0x9, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
153 { 0xa, 0x0000ffff, 0, 0x208, SISLANDS_CACCONFIG_CGIND
},
154 { 0xb, 0x0000ffff, 0, 0xe7, SISLANDS_CACCONFIG_CGIND
},
155 { 0xb, 0xffff0000, 16, 0x948, SISLANDS_CACCONFIG_CGIND
},
156 { 0xc, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
157 { 0xd, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
158 { 0xd, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
159 { 0xe, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
160 { 0xf, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
161 { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
162 { 0x10, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
163 { 0x10, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
164 { 0x11, 0x0000ffff, 0, 0x167, SISLANDS_CACCONFIG_CGIND
},
165 { 0x11, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
166 { 0x12, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
167 { 0x13, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
168 { 0x13, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND
},
169 { 0x14, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
170 { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
171 { 0x15, 0xffff0000, 16, 0x2, SISLANDS_CACCONFIG_CGIND
},
172 { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
173 { 0x16, 0x0000ffff, 0, 0x31, SISLANDS_CACCONFIG_CGIND
},
174 { 0x16, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
175 { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
176 { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
177 { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
178 { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
179 { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
180 { 0x1a, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
181 { 0x1a, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
182 { 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
183 { 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
184 { 0x1c, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
185 { 0x1c, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
186 { 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
187 { 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
188 { 0x1e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
189 { 0x1e, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
190 { 0x1f, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
191 { 0x1f, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
192 { 0x20, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
193 { 0x6d, 0x0000ffff, 0, 0x18e, SISLANDS_CACCONFIG_CGIND
},
197 static const struct si_cac_config_reg lcac_tahiti
[] =
199 { 0x143, 0x0001fffe, 1, 0x3, SISLANDS_CACCONFIG_CGIND
},
200 { 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
201 { 0x146, 0x0001fffe, 1, 0x3, SISLANDS_CACCONFIG_CGIND
},
202 { 0x146, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
203 { 0x149, 0x0001fffe, 1, 0x3, SISLANDS_CACCONFIG_CGIND
},
204 { 0x149, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
205 { 0x14c, 0x0001fffe, 1, 0x3, SISLANDS_CACCONFIG_CGIND
},
206 { 0x14c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
207 { 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
208 { 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
209 { 0x9b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
210 { 0x9b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
211 { 0x9e, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
212 { 0x9e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
213 { 0x101, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
214 { 0x101, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
215 { 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
216 { 0x104, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
217 { 0x107, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
218 { 0x107, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
219 { 0x10a, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
220 { 0x10a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
221 { 0x10d, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
222 { 0x10d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
223 { 0x8c, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND
},
224 { 0x8c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
225 { 0x8f, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND
},
226 { 0x8f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
227 { 0x92, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND
},
228 { 0x92, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
229 { 0x95, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND
},
230 { 0x95, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
231 { 0x14f, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND
},
232 { 0x14f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
233 { 0x152, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND
},
234 { 0x152, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
235 { 0x155, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND
},
236 { 0x155, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
237 { 0x158, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND
},
238 { 0x158, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
239 { 0x110, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND
},
240 { 0x110, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
241 { 0x113, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND
},
242 { 0x113, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
243 { 0x116, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND
},
244 { 0x116, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
245 { 0x119, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND
},
246 { 0x119, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
247 { 0x11c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
248 { 0x11c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
249 { 0x11f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
250 { 0x11f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
251 { 0x122, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
252 { 0x122, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
253 { 0x125, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
254 { 0x125, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
255 { 0x128, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
256 { 0x128, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
257 { 0x12b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
258 { 0x12b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
259 { 0x15b, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND
},
260 { 0x15b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
261 { 0x15e, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND
},
262 { 0x15e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
263 { 0x161, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND
},
264 { 0x161, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
265 { 0x164, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND
},
266 { 0x164, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
267 { 0x167, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND
},
268 { 0x167, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
269 { 0x16a, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND
},
270 { 0x16a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
271 { 0x16d, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND
},
272 { 0x16d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
273 { 0x170, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
274 { 0x170, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
275 { 0x173, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
276 { 0x173, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
277 { 0x176, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
278 { 0x176, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
279 { 0x179, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
280 { 0x179, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
281 { 0x17c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
282 { 0x17c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
283 { 0x17f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
284 { 0x17f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
289 static const struct si_cac_config_reg cac_override_tahiti
[] =
294 static const struct si_powertune_data powertune_data_tahiti
=
325 static const struct si_dte_data dte_data_tahiti
=
327 { 1159409, 0, 0, 0, 0 },
336 { 27, 31, 35, 39, 43, 47, 54, 61, 67, 74, 81, 88, 95, 0, 0, 0 },
337 { 240888759, 221057860, 235370597, 162287531, 158510299, 131423027, 116673180, 103067515, 87941937, 76209048, 68209175, 64090048, 58301890, 0, 0, 0 },
338 { 12024, 11189, 11451, 8411, 7939, 6666, 5681, 4905, 4241, 3720, 3354, 3122, 2890, 0, 0, 0 },
344 static const struct si_dte_data dte_data_tahiti_le
=
346 { 0x1E8480, 0x7A1200, 0x2160EC0, 0x3938700, 0 },
347 { 0x7D, 0x7D, 0x4E4, 0xB00, 0 },
355 { 0x78, 0x7C, 0x82, 0x88, 0x8E, 0x94, 0x9A, 0xA0, 0xA6, 0xAC, 0xB0, 0xB4, 0xB8, 0xBC, 0xC0, 0xC4 },
356 { 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700 },
357 { 0x2AF8, 0x2AF8, 0x29BB, 0x27F9, 0x2637, 0x2475, 0x22B3, 0x20F1, 0x1F2F, 0x1D6D, 0x1734, 0x1414, 0x10F4, 0xDD4, 0xAB4, 0x794 },
363 static const struct si_dte_data dte_data_tahiti_pro
=
365 { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
366 { 0x0, 0x0, 0x0, 0x0, 0x0 },
374 { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
375 { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
376 { 0x7D0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
381 static const struct si_dte_data dte_data_new_zealand
=
383 { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0 },
384 { 0x29B, 0x3E9, 0x537, 0x7D2, 0 },
392 { 0x82, 0xA0, 0xB4, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE },
393 { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
394 { 0xDAC, 0x1388, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685 },
399 static const struct si_dte_data dte_data_aruba_pro
=
401 { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
402 { 0x0, 0x0, 0x0, 0x0, 0x0 },
410 { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
411 { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
412 { 0x1000, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
417 static const struct si_dte_data dte_data_malta
=
419 { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
420 { 0x0, 0x0, 0x0, 0x0, 0x0 },
428 { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
429 { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
430 { 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
435 static const struct si_cac_config_reg cac_weights_pitcairn
[] =
437 { 0x0, 0x0000ffff, 0, 0x8a, SISLANDS_CACCONFIG_CGIND
},
438 { 0x0, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
439 { 0x1, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
440 { 0x1, 0xffff0000, 16, 0x24d, SISLANDS_CACCONFIG_CGIND
},
441 { 0x2, 0x0000ffff, 0, 0x19, SISLANDS_CACCONFIG_CGIND
},
442 { 0x3, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND
},
443 { 0x3, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
444 { 0x4, 0x0000ffff, 0, 0x76, SISLANDS_CACCONFIG_CGIND
},
445 { 0x4, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
446 { 0x5, 0x0000ffff, 0, 0xc11, SISLANDS_CACCONFIG_CGIND
},
447 { 0x5, 0xffff0000, 16, 0x7f3, SISLANDS_CACCONFIG_CGIND
},
448 { 0x6, 0x0000ffff, 0, 0x403, SISLANDS_CACCONFIG_CGIND
},
449 { 0x6, 0xffff0000, 16, 0x367, SISLANDS_CACCONFIG_CGIND
},
450 { 0x18f, 0x0000ffff, 0, 0x4c9, SISLANDS_CACCONFIG_CGIND
},
451 { 0x7, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
452 { 0x7, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
453 { 0x8, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
454 { 0x8, 0xffff0000, 16, 0x45d, SISLANDS_CACCONFIG_CGIND
},
455 { 0x9, 0x0000ffff, 0, 0x36d, SISLANDS_CACCONFIG_CGIND
},
456 { 0xa, 0x0000ffff, 0, 0x534, SISLANDS_CACCONFIG_CGIND
},
457 { 0xb, 0x0000ffff, 0, 0x5da, SISLANDS_CACCONFIG_CGIND
},
458 { 0xb, 0xffff0000, 16, 0x880, SISLANDS_CACCONFIG_CGIND
},
459 { 0xc, 0x0000ffff, 0, 0x201, SISLANDS_CACCONFIG_CGIND
},
460 { 0xd, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
461 { 0xd, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
462 { 0xe, 0x0000ffff, 0, 0x9f, SISLANDS_CACCONFIG_CGIND
},
463 { 0xf, 0x0000ffff, 0, 0x1f, SISLANDS_CACCONFIG_CGIND
},
464 { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
465 { 0x10, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
466 { 0x10, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
467 { 0x11, 0x0000ffff, 0, 0x5de, SISLANDS_CACCONFIG_CGIND
},
468 { 0x11, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
469 { 0x12, 0x0000ffff, 0, 0x7b, SISLANDS_CACCONFIG_CGIND
},
470 { 0x13, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
471 { 0x13, 0xffff0000, 16, 0x13, SISLANDS_CACCONFIG_CGIND
},
472 { 0x14, 0x0000ffff, 0, 0xf9, SISLANDS_CACCONFIG_CGIND
},
473 { 0x15, 0x0000ffff, 0, 0x66, SISLANDS_CACCONFIG_CGIND
},
474 { 0x15, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
475 { 0x4e, 0x0000ffff, 0, 0x13, SISLANDS_CACCONFIG_CGIND
},
476 { 0x16, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
477 { 0x16, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
478 { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
479 { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
480 { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
481 { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
482 { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
483 { 0x1a, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
484 { 0x1a, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
485 { 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
486 { 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
487 { 0x1c, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
488 { 0x1c, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
489 { 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
490 { 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
491 { 0x1e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
492 { 0x1e, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
493 { 0x1f, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
494 { 0x1f, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
495 { 0x20, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
496 { 0x6d, 0x0000ffff, 0, 0x186, SISLANDS_CACCONFIG_CGIND
},
500 static const struct si_cac_config_reg lcac_pitcairn
[] =
502 { 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
503 { 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
504 { 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
505 { 0x104, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
506 { 0x110, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND
},
507 { 0x110, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
508 { 0x14f, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND
},
509 { 0x14f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
510 { 0x8c, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND
},
511 { 0x8c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
512 { 0x143, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
513 { 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
514 { 0x9b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
515 { 0x9b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
516 { 0x107, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
517 { 0x107, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
518 { 0x113, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND
},
519 { 0x113, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
520 { 0x152, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND
},
521 { 0x152, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
522 { 0x8f, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND
},
523 { 0x8f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
524 { 0x146, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
525 { 0x146, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
526 { 0x9e, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
527 { 0x9e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
528 { 0x10a, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
529 { 0x10a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
530 { 0x116, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND
},
531 { 0x116, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
532 { 0x155, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND
},
533 { 0x155, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
534 { 0x92, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND
},
535 { 0x92, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
536 { 0x149, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
537 { 0x149, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
538 { 0x101, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
539 { 0x101, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
540 { 0x10d, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
541 { 0x10d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
542 { 0x119, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND
},
543 { 0x119, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
544 { 0x158, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND
},
545 { 0x158, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
546 { 0x95, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND
},
547 { 0x95, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
548 { 0x14c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
549 { 0x14c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
550 { 0x11c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
551 { 0x11c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
552 { 0x11f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
553 { 0x11f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
554 { 0x122, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
555 { 0x122, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
556 { 0x125, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
557 { 0x125, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
558 { 0x128, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
559 { 0x128, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
560 { 0x12b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
561 { 0x12b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
562 { 0x164, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND
},
563 { 0x164, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
564 { 0x167, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND
},
565 { 0x167, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
566 { 0x16a, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND
},
567 { 0x16a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
568 { 0x15e, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND
},
569 { 0x15e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
570 { 0x161, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND
},
571 { 0x161, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
572 { 0x15b, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND
},
573 { 0x15b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
574 { 0x16d, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND
},
575 { 0x16d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
576 { 0x170, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
577 { 0x170, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
578 { 0x173, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
579 { 0x173, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
580 { 0x176, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
581 { 0x176, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
582 { 0x179, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
583 { 0x179, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
584 { 0x17c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
585 { 0x17c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
586 { 0x17f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
587 { 0x17f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
591 static const struct si_cac_config_reg cac_override_pitcairn
[] =
596 static const struct si_powertune_data powertune_data_pitcairn
=
627 static const struct si_dte_data dte_data_pitcairn
=
638 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
639 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
640 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
645 static const struct si_dte_data dte_data_curacao_xt
=
647 { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
648 { 0x0, 0x0, 0x0, 0x0, 0x0 },
656 { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
657 { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
658 { 0x1D17, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
663 static const struct si_dte_data dte_data_curacao_pro
=
665 { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
666 { 0x0, 0x0, 0x0, 0x0, 0x0 },
674 { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
675 { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
676 { 0x1D17, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
681 static const struct si_dte_data dte_data_neptune_xt
=
683 { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
684 { 0x0, 0x0, 0x0, 0x0, 0x0 },
692 { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
693 { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
694 { 0x3A2F, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
699 static const struct si_cac_config_reg cac_weights_chelsea_pro
[] =
701 { 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND
},
702 { 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND
},
703 { 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND
},
704 { 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND
},
705 { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
706 { 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND
},
707 { 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND
},
708 { 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND
},
709 { 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND
},
710 { 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND
},
711 { 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND
},
712 { 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND
},
713 { 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND
},
714 { 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND
},
715 { 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND
},
716 { 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND
},
717 { 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND
},
718 { 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND
},
719 { 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND
},
720 { 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND
},
721 { 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND
},
722 { 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND
},
723 { 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND
},
724 { 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND
},
725 { 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND
},
726 { 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND
},
727 { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND
},
728 { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
729 { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
730 { 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND
},
731 { 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND
},
732 { 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND
},
733 { 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND
},
734 { 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND
},
735 { 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND
},
736 { 0x14, 0x0000ffff, 0, 0x2BD, SISLANDS_CACCONFIG_CGIND
},
737 { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
738 { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND
},
739 { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
740 { 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND
},
741 { 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND
},
742 { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
743 { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
744 { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
745 { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
746 { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
747 { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
748 { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
749 { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
750 { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
751 { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
752 { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
753 { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
754 { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
755 { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
756 { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
757 { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
758 { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
759 { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
760 { 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND
},
764 static const struct si_cac_config_reg cac_weights_chelsea_xt
[] =
766 { 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND
},
767 { 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND
},
768 { 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND
},
769 { 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND
},
770 { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
771 { 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND
},
772 { 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND
},
773 { 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND
},
774 { 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND
},
775 { 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND
},
776 { 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND
},
777 { 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND
},
778 { 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND
},
779 { 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND
},
780 { 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND
},
781 { 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND
},
782 { 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND
},
783 { 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND
},
784 { 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND
},
785 { 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND
},
786 { 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND
},
787 { 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND
},
788 { 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND
},
789 { 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND
},
790 { 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND
},
791 { 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND
},
792 { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND
},
793 { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
794 { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
795 { 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND
},
796 { 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND
},
797 { 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND
},
798 { 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND
},
799 { 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND
},
800 { 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND
},
801 { 0x14, 0x0000ffff, 0, 0x30A, SISLANDS_CACCONFIG_CGIND
},
802 { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
803 { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND
},
804 { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
805 { 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND
},
806 { 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND
},
807 { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
808 { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
809 { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
810 { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
811 { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
812 { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
813 { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
814 { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
815 { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
816 { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
817 { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
818 { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
819 { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
820 { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
821 { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
822 { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
823 { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
824 { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
825 { 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND
},
829 static const struct si_cac_config_reg cac_weights_heathrow
[] =
831 { 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND
},
832 { 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND
},
833 { 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND
},
834 { 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND
},
835 { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
836 { 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND
},
837 { 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND
},
838 { 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND
},
839 { 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND
},
840 { 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND
},
841 { 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND
},
842 { 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND
},
843 { 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND
},
844 { 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND
},
845 { 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND
},
846 { 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND
},
847 { 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND
},
848 { 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND
},
849 { 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND
},
850 { 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND
},
851 { 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND
},
852 { 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND
},
853 { 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND
},
854 { 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND
},
855 { 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND
},
856 { 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND
},
857 { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND
},
858 { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
859 { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
860 { 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND
},
861 { 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND
},
862 { 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND
},
863 { 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND
},
864 { 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND
},
865 { 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND
},
866 { 0x14, 0x0000ffff, 0, 0x362, SISLANDS_CACCONFIG_CGIND
},
867 { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
868 { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND
},
869 { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
870 { 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND
},
871 { 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND
},
872 { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
873 { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
874 { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
875 { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
876 { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
877 { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
878 { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
879 { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
880 { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
881 { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
882 { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
883 { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
884 { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
885 { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
886 { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
887 { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
888 { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
889 { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
890 { 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND
},
894 static const struct si_cac_config_reg cac_weights_cape_verde_pro
[] =
896 { 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND
},
897 { 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND
},
898 { 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND
},
899 { 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND
},
900 { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
901 { 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND
},
902 { 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND
},
903 { 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND
},
904 { 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND
},
905 { 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND
},
906 { 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND
},
907 { 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND
},
908 { 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND
},
909 { 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND
},
910 { 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND
},
911 { 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND
},
912 { 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND
},
913 { 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND
},
914 { 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND
},
915 { 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND
},
916 { 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND
},
917 { 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND
},
918 { 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND
},
919 { 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND
},
920 { 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND
},
921 { 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND
},
922 { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND
},
923 { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
924 { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
925 { 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND
},
926 { 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND
},
927 { 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND
},
928 { 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND
},
929 { 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND
},
930 { 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND
},
931 { 0x14, 0x0000ffff, 0, 0x315, SISLANDS_CACCONFIG_CGIND
},
932 { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
933 { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND
},
934 { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
935 { 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND
},
936 { 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND
},
937 { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
938 { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
939 { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
940 { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
941 { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
942 { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
943 { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
944 { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
945 { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
946 { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
947 { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
948 { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
949 { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
950 { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
951 { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
952 { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
953 { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
954 { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
955 { 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND
},
959 static const struct si_cac_config_reg cac_weights_cape_verde
[] =
961 { 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND
},
962 { 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND
},
963 { 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND
},
964 { 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND
},
965 { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
966 { 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND
},
967 { 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND
},
968 { 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND
},
969 { 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND
},
970 { 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND
},
971 { 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND
},
972 { 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND
},
973 { 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND
},
974 { 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND
},
975 { 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND
},
976 { 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND
},
977 { 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND
},
978 { 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND
},
979 { 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND
},
980 { 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND
},
981 { 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND
},
982 { 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND
},
983 { 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND
},
984 { 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND
},
985 { 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND
},
986 { 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND
},
987 { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND
},
988 { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
989 { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
990 { 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND
},
991 { 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND
},
992 { 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND
},
993 { 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND
},
994 { 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND
},
995 { 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND
},
996 { 0x14, 0x0000ffff, 0, 0x3BA, SISLANDS_CACCONFIG_CGIND
},
997 { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
998 { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND
},
999 { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1000 { 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND
},
1001 { 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND
},
1002 { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1003 { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1004 { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1005 { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1006 { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1007 { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1008 { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
1009 { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1010 { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
1011 { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1012 { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
1013 { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1014 { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
1015 { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1016 { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
1017 { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1018 { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
1019 { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1020 { 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND
},
1024 static const struct si_cac_config_reg lcac_cape_verde
[] =
1026 { 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
1027 { 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1028 { 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
1029 { 0x104, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1030 { 0x110, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND
},
1031 { 0x110, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1032 { 0x14f, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND
},
1033 { 0x14f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1034 { 0x8c, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND
},
1035 { 0x8c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1036 { 0x143, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1037 { 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1038 { 0x9b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
1039 { 0x9b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1040 { 0x107, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
1041 { 0x107, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1042 { 0x113, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND
},
1043 { 0x113, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1044 { 0x152, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND
},
1045 { 0x152, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1046 { 0x8f, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1047 { 0x8f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1048 { 0x146, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1049 { 0x146, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1050 { 0x11c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
1051 { 0x11c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1052 { 0x11f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
1053 { 0x11f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1054 { 0x164, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
1055 { 0x164, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1056 { 0x167, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
1057 { 0x167, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1058 { 0x16a, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
1059 { 0x16a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1060 { 0x15e, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
1061 { 0x15e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1062 { 0x161, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
1063 { 0x161, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1064 { 0x15b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
1065 { 0x15b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1066 { 0x16d, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
1067 { 0x16d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1068 { 0x170, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1069 { 0x170, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1070 { 0x173, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
1071 { 0x173, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1072 { 0x176, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1073 { 0x176, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1074 { 0x179, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1075 { 0x179, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1076 { 0x17c, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1077 { 0x17c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1078 { 0x17f, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1079 { 0x17f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1083 static const struct si_cac_config_reg cac_override_cape_verde
[] =
1088 static const struct si_powertune_data powertune_data_cape_verde
=
1090 ((1 << 16) | 0x6993),
1119 static const struct si_dte_data dte_data_cape_verde
=
1130 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
1131 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
1132 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
1137 static const struct si_dte_data dte_data_venus_xtx
=
1139 { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
1140 { 0x71C, 0xAAB, 0xE39, 0x11C7, 0x0 },
1148 { 0x96, 0xB4, 0xFF, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
1149 { 0x895440, 0x3D0900, 0x989680, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
1150 { 0xD6D8, 0x88B8, 0x1555, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
1155 static const struct si_dte_data dte_data_venus_xt
=
1157 { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
1158 { 0xBDA, 0x11C7, 0x17B4, 0x1DA1, 0x0 },
1166 { 0x96, 0xB4, 0xFF, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
1167 { 0x895440, 0x3D0900, 0x989680, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
1168 { 0xAFC8, 0x88B8, 0x238E, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
1173 static const struct si_dte_data dte_data_venus_pro
=
1175 { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
1176 { 0x11C7, 0x1AAB, 0x238E, 0x2C72, 0x0 },
1184 { 0x96, 0xB4, 0xFF, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
1185 { 0x895440, 0x3D0900, 0x989680, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
1186 { 0x88B8, 0x88B8, 0x3555, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
1191 static const struct si_cac_config_reg cac_weights_oland
[] =
1193 { 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND
},
1194 { 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND
},
1195 { 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND
},
1196 { 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND
},
1197 { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1198 { 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND
},
1199 { 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND
},
1200 { 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND
},
1201 { 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND
},
1202 { 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND
},
1203 { 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND
},
1204 { 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND
},
1205 { 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND
},
1206 { 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND
},
1207 { 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND
},
1208 { 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND
},
1209 { 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND
},
1210 { 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND
},
1211 { 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND
},
1212 { 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND
},
1213 { 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND
},
1214 { 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND
},
1215 { 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND
},
1216 { 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND
},
1217 { 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND
},
1218 { 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND
},
1219 { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND
},
1220 { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1221 { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1222 { 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND
},
1223 { 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND
},
1224 { 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND
},
1225 { 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND
},
1226 { 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND
},
1227 { 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND
},
1228 { 0x14, 0x0000ffff, 0, 0x3BA, SISLANDS_CACCONFIG_CGIND
},
1229 { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1230 { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND
},
1231 { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1232 { 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND
},
1233 { 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND
},
1234 { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1235 { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1236 { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1237 { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1238 { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1239 { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1240 { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
1241 { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1242 { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
1243 { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1244 { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
1245 { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1246 { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
1247 { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1248 { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
1249 { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1250 { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
1251 { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1252 { 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND
},
1256 static const struct si_cac_config_reg cac_weights_mars_pro
[] =
1258 { 0x0, 0x0000ffff, 0, 0x43, SISLANDS_CACCONFIG_CGIND
},
1259 { 0x0, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND
},
1260 { 0x1, 0x0000ffff, 0, 0xAF, SISLANDS_CACCONFIG_CGIND
},
1261 { 0x1, 0xffff0000, 16, 0x2A, SISLANDS_CACCONFIG_CGIND
},
1262 { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1263 { 0x3, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND
},
1264 { 0x3, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND
},
1265 { 0x4, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND
},
1266 { 0x4, 0xffff0000, 16, 0x59, SISLANDS_CACCONFIG_CGIND
},
1267 { 0x5, 0x0000ffff, 0, 0x1A5, SISLANDS_CACCONFIG_CGIND
},
1268 { 0x5, 0xffff0000, 16, 0x1D6, SISLANDS_CACCONFIG_CGIND
},
1269 { 0x6, 0x0000ffff, 0, 0x2A3, SISLANDS_CACCONFIG_CGIND
},
1270 { 0x6, 0xffff0000, 16, 0x8FD, SISLANDS_CACCONFIG_CGIND
},
1271 { 0x18f, 0x0000ffff, 0, 0x76, SISLANDS_CACCONFIG_CGIND
},
1272 { 0x7, 0x0000ffff, 0, 0x8A, SISLANDS_CACCONFIG_CGIND
},
1273 { 0x7, 0xffff0000, 16, 0xA3, SISLANDS_CACCONFIG_CGIND
},
1274 { 0x8, 0x0000ffff, 0, 0x71, SISLANDS_CACCONFIG_CGIND
},
1275 { 0x8, 0xffff0000, 16, 0x36, SISLANDS_CACCONFIG_CGIND
},
1276 { 0x9, 0x0000ffff, 0, 0xA6, SISLANDS_CACCONFIG_CGIND
},
1277 { 0xa, 0x0000ffff, 0, 0x81, SISLANDS_CACCONFIG_CGIND
},
1278 { 0xb, 0x0000ffff, 0, 0x3D2, SISLANDS_CACCONFIG_CGIND
},
1279 { 0xb, 0xffff0000, 16, 0x27C, SISLANDS_CACCONFIG_CGIND
},
1280 { 0xc, 0x0000ffff, 0, 0xA96, SISLANDS_CACCONFIG_CGIND
},
1281 { 0xd, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND
},
1282 { 0xd, 0xffff0000, 16, 0x5, SISLANDS_CACCONFIG_CGIND
},
1283 { 0xe, 0x0000ffff, 0, 0xB, SISLANDS_CACCONFIG_CGIND
},
1284 { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND
},
1285 { 0xf, 0xffff0000, 16, 0x2, SISLANDS_CACCONFIG_CGIND
},
1286 { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1287 { 0x10, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND
},
1288 { 0x11, 0x0000ffff, 0, 0x15, SISLANDS_CACCONFIG_CGIND
},
1289 { 0x11, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND
},
1290 { 0x12, 0x0000ffff, 0, 0x36, SISLANDS_CACCONFIG_CGIND
},
1291 { 0x13, 0x0000ffff, 0, 0x10, SISLANDS_CACCONFIG_CGIND
},
1292 { 0x13, 0xffff0000, 16, 0x10, SISLANDS_CACCONFIG_CGIND
},
1293 { 0x14, 0x0000ffff, 0, 0x2, SISLANDS_CACCONFIG_CGIND
},
1294 { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1295 { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND
},
1296 { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1297 { 0x16, 0x0000ffff, 0, 0x32, SISLANDS_CACCONFIG_CGIND
},
1298 { 0x16, 0xffff0000, 16, 0x7E, SISLANDS_CACCONFIG_CGIND
},
1299 { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1300 { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1301 { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1302 { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1303 { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1304 { 0x1a, 0x0000ffff, 0, 0x280, SISLANDS_CACCONFIG_CGIND
},
1305 { 0x1a, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND
},
1306 { 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1307 { 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1308 { 0x1c, 0x0000ffff, 0, 0x3C, SISLANDS_CACCONFIG_CGIND
},
1309 { 0x1c, 0xffff0000, 16, 0x203, SISLANDS_CACCONFIG_CGIND
},
1310 { 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1311 { 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1312 { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1313 { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
1314 { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1315 { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
1316 { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1317 { 0x6d, 0x0000ffff, 0, 0xB4, SISLANDS_CACCONFIG_CGIND
},
1321 static const struct si_cac_config_reg cac_weights_mars_xt
[] =
1323 { 0x0, 0x0000ffff, 0, 0x43, SISLANDS_CACCONFIG_CGIND
},
1324 { 0x0, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND
},
1325 { 0x1, 0x0000ffff, 0, 0xAF, SISLANDS_CACCONFIG_CGIND
},
1326 { 0x1, 0xffff0000, 16, 0x2A, SISLANDS_CACCONFIG_CGIND
},
1327 { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1328 { 0x3, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND
},
1329 { 0x3, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND
},
1330 { 0x4, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND
},
1331 { 0x4, 0xffff0000, 16, 0x59, SISLANDS_CACCONFIG_CGIND
},
1332 { 0x5, 0x0000ffff, 0, 0x1A5, SISLANDS_CACCONFIG_CGIND
},
1333 { 0x5, 0xffff0000, 16, 0x1D6, SISLANDS_CACCONFIG_CGIND
},
1334 { 0x6, 0x0000ffff, 0, 0x2A3, SISLANDS_CACCONFIG_CGIND
},
1335 { 0x6, 0xffff0000, 16, 0x8FD, SISLANDS_CACCONFIG_CGIND
},
1336 { 0x18f, 0x0000ffff, 0, 0x76, SISLANDS_CACCONFIG_CGIND
},
1337 { 0x7, 0x0000ffff, 0, 0x8A, SISLANDS_CACCONFIG_CGIND
},
1338 { 0x7, 0xffff0000, 16, 0xA3, SISLANDS_CACCONFIG_CGIND
},
1339 { 0x8, 0x0000ffff, 0, 0x71, SISLANDS_CACCONFIG_CGIND
},
1340 { 0x8, 0xffff0000, 16, 0x36, SISLANDS_CACCONFIG_CGIND
},
1341 { 0x9, 0x0000ffff, 0, 0xA6, SISLANDS_CACCONFIG_CGIND
},
1342 { 0xa, 0x0000ffff, 0, 0x81, SISLANDS_CACCONFIG_CGIND
},
1343 { 0xb, 0x0000ffff, 0, 0x3D2, SISLANDS_CACCONFIG_CGIND
},
1344 { 0xb, 0xffff0000, 16, 0x27C, SISLANDS_CACCONFIG_CGIND
},
1345 { 0xc, 0x0000ffff, 0, 0xA96, SISLANDS_CACCONFIG_CGIND
},
1346 { 0xd, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND
},
1347 { 0xd, 0xffff0000, 16, 0x5, SISLANDS_CACCONFIG_CGIND
},
1348 { 0xe, 0x0000ffff, 0, 0xB, SISLANDS_CACCONFIG_CGIND
},
1349 { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND
},
1350 { 0xf, 0xffff0000, 16, 0x2, SISLANDS_CACCONFIG_CGIND
},
1351 { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1352 { 0x10, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND
},
1353 { 0x11, 0x0000ffff, 0, 0x15, SISLANDS_CACCONFIG_CGIND
},
1354 { 0x11, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND
},
1355 { 0x12, 0x0000ffff, 0, 0x36, SISLANDS_CACCONFIG_CGIND
},
1356 { 0x13, 0x0000ffff, 0, 0x10, SISLANDS_CACCONFIG_CGIND
},
1357 { 0x13, 0xffff0000, 16, 0x10, SISLANDS_CACCONFIG_CGIND
},
1358 { 0x14, 0x0000ffff, 0, 0x60, SISLANDS_CACCONFIG_CGIND
},
1359 { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1360 { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND
},
1361 { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1362 { 0x16, 0x0000ffff, 0, 0x32, SISLANDS_CACCONFIG_CGIND
},
1363 { 0x16, 0xffff0000, 16, 0x7E, SISLANDS_CACCONFIG_CGIND
},
1364 { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1365 { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1366 { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1367 { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1368 { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1369 { 0x1a, 0x0000ffff, 0, 0x280, SISLANDS_CACCONFIG_CGIND
},
1370 { 0x1a, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND
},
1371 { 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1372 { 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1373 { 0x1c, 0x0000ffff, 0, 0x3C, SISLANDS_CACCONFIG_CGIND
},
1374 { 0x1c, 0xffff0000, 16, 0x203, SISLANDS_CACCONFIG_CGIND
},
1375 { 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1376 { 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1377 { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1378 { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
1379 { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1380 { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
1381 { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1382 { 0x6d, 0x0000ffff, 0, 0xB4, SISLANDS_CACCONFIG_CGIND
},
1386 static const struct si_cac_config_reg cac_weights_oland_pro
[] =
1388 { 0x0, 0x0000ffff, 0, 0x43, SISLANDS_CACCONFIG_CGIND
},
1389 { 0x0, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND
},
1390 { 0x1, 0x0000ffff, 0, 0xAF, SISLANDS_CACCONFIG_CGIND
},
1391 { 0x1, 0xffff0000, 16, 0x2A, SISLANDS_CACCONFIG_CGIND
},
1392 { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1393 { 0x3, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND
},
1394 { 0x3, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND
},
1395 { 0x4, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND
},
1396 { 0x4, 0xffff0000, 16, 0x59, SISLANDS_CACCONFIG_CGIND
},
1397 { 0x5, 0x0000ffff, 0, 0x1A5, SISLANDS_CACCONFIG_CGIND
},
1398 { 0x5, 0xffff0000, 16, 0x1D6, SISLANDS_CACCONFIG_CGIND
},
1399 { 0x6, 0x0000ffff, 0, 0x2A3, SISLANDS_CACCONFIG_CGIND
},
1400 { 0x6, 0xffff0000, 16, 0x8FD, SISLANDS_CACCONFIG_CGIND
},
1401 { 0x18f, 0x0000ffff, 0, 0x76, SISLANDS_CACCONFIG_CGIND
},
1402 { 0x7, 0x0000ffff, 0, 0x8A, SISLANDS_CACCONFIG_CGIND
},
1403 { 0x7, 0xffff0000, 16, 0xA3, SISLANDS_CACCONFIG_CGIND
},
1404 { 0x8, 0x0000ffff, 0, 0x71, SISLANDS_CACCONFIG_CGIND
},
1405 { 0x8, 0xffff0000, 16, 0x36, SISLANDS_CACCONFIG_CGIND
},
1406 { 0x9, 0x0000ffff, 0, 0xA6, SISLANDS_CACCONFIG_CGIND
},
1407 { 0xa, 0x0000ffff, 0, 0x81, SISLANDS_CACCONFIG_CGIND
},
1408 { 0xb, 0x0000ffff, 0, 0x3D2, SISLANDS_CACCONFIG_CGIND
},
1409 { 0xb, 0xffff0000, 16, 0x27C, SISLANDS_CACCONFIG_CGIND
},
1410 { 0xc, 0x0000ffff, 0, 0xA96, SISLANDS_CACCONFIG_CGIND
},
1411 { 0xd, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND
},
1412 { 0xd, 0xffff0000, 16, 0x5, SISLANDS_CACCONFIG_CGIND
},
1413 { 0xe, 0x0000ffff, 0, 0xB, SISLANDS_CACCONFIG_CGIND
},
1414 { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND
},
1415 { 0xf, 0xffff0000, 16, 0x2, SISLANDS_CACCONFIG_CGIND
},
1416 { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1417 { 0x10, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND
},
1418 { 0x11, 0x0000ffff, 0, 0x15, SISLANDS_CACCONFIG_CGIND
},
1419 { 0x11, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND
},
1420 { 0x12, 0x0000ffff, 0, 0x36, SISLANDS_CACCONFIG_CGIND
},
1421 { 0x13, 0x0000ffff, 0, 0x10, SISLANDS_CACCONFIG_CGIND
},
1422 { 0x13, 0xffff0000, 16, 0x10, SISLANDS_CACCONFIG_CGIND
},
1423 { 0x14, 0x0000ffff, 0, 0x90, SISLANDS_CACCONFIG_CGIND
},
1424 { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1425 { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND
},
1426 { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1427 { 0x16, 0x0000ffff, 0, 0x32, SISLANDS_CACCONFIG_CGIND
},
1428 { 0x16, 0xffff0000, 16, 0x7E, SISLANDS_CACCONFIG_CGIND
},
1429 { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1430 { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1431 { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1432 { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1433 { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1434 { 0x1a, 0x0000ffff, 0, 0x280, SISLANDS_CACCONFIG_CGIND
},
1435 { 0x1a, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND
},
1436 { 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1437 { 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1438 { 0x1c, 0x0000ffff, 0, 0x3C, SISLANDS_CACCONFIG_CGIND
},
1439 { 0x1c, 0xffff0000, 16, 0x203, SISLANDS_CACCONFIG_CGIND
},
1440 { 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1441 { 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1442 { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1443 { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
1444 { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1445 { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
1446 { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1447 { 0x6d, 0x0000ffff, 0, 0xB4, SISLANDS_CACCONFIG_CGIND
},
1451 static const struct si_cac_config_reg cac_weights_oland_xt
[] =
1453 { 0x0, 0x0000ffff, 0, 0x43, SISLANDS_CACCONFIG_CGIND
},
1454 { 0x0, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND
},
1455 { 0x1, 0x0000ffff, 0, 0xAF, SISLANDS_CACCONFIG_CGIND
},
1456 { 0x1, 0xffff0000, 16, 0x2A, SISLANDS_CACCONFIG_CGIND
},
1457 { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1458 { 0x3, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND
},
1459 { 0x3, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND
},
1460 { 0x4, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND
},
1461 { 0x4, 0xffff0000, 16, 0x59, SISLANDS_CACCONFIG_CGIND
},
1462 { 0x5, 0x0000ffff, 0, 0x1A5, SISLANDS_CACCONFIG_CGIND
},
1463 { 0x5, 0xffff0000, 16, 0x1D6, SISLANDS_CACCONFIG_CGIND
},
1464 { 0x6, 0x0000ffff, 0, 0x2A3, SISLANDS_CACCONFIG_CGIND
},
1465 { 0x6, 0xffff0000, 16, 0x8FD, SISLANDS_CACCONFIG_CGIND
},
1466 { 0x18f, 0x0000ffff, 0, 0x76, SISLANDS_CACCONFIG_CGIND
},
1467 { 0x7, 0x0000ffff, 0, 0x8A, SISLANDS_CACCONFIG_CGIND
},
1468 { 0x7, 0xffff0000, 16, 0xA3, SISLANDS_CACCONFIG_CGIND
},
1469 { 0x8, 0x0000ffff, 0, 0x71, SISLANDS_CACCONFIG_CGIND
},
1470 { 0x8, 0xffff0000, 16, 0x36, SISLANDS_CACCONFIG_CGIND
},
1471 { 0x9, 0x0000ffff, 0, 0xA6, SISLANDS_CACCONFIG_CGIND
},
1472 { 0xa, 0x0000ffff, 0, 0x81, SISLANDS_CACCONFIG_CGIND
},
1473 { 0xb, 0x0000ffff, 0, 0x3D2, SISLANDS_CACCONFIG_CGIND
},
1474 { 0xb, 0xffff0000, 16, 0x27C, SISLANDS_CACCONFIG_CGIND
},
1475 { 0xc, 0x0000ffff, 0, 0xA96, SISLANDS_CACCONFIG_CGIND
},
1476 { 0xd, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND
},
1477 { 0xd, 0xffff0000, 16, 0x5, SISLANDS_CACCONFIG_CGIND
},
1478 { 0xe, 0x0000ffff, 0, 0xB, SISLANDS_CACCONFIG_CGIND
},
1479 { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND
},
1480 { 0xf, 0xffff0000, 16, 0x2, SISLANDS_CACCONFIG_CGIND
},
1481 { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1482 { 0x10, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND
},
1483 { 0x11, 0x0000ffff, 0, 0x15, SISLANDS_CACCONFIG_CGIND
},
1484 { 0x11, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND
},
1485 { 0x12, 0x0000ffff, 0, 0x36, SISLANDS_CACCONFIG_CGIND
},
1486 { 0x13, 0x0000ffff, 0, 0x10, SISLANDS_CACCONFIG_CGIND
},
1487 { 0x13, 0xffff0000, 16, 0x10, SISLANDS_CACCONFIG_CGIND
},
1488 { 0x14, 0x0000ffff, 0, 0x120, SISLANDS_CACCONFIG_CGIND
},
1489 { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1490 { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND
},
1491 { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1492 { 0x16, 0x0000ffff, 0, 0x32, SISLANDS_CACCONFIG_CGIND
},
1493 { 0x16, 0xffff0000, 16, 0x7E, SISLANDS_CACCONFIG_CGIND
},
1494 { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1495 { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1496 { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1497 { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1498 { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1499 { 0x1a, 0x0000ffff, 0, 0x280, SISLANDS_CACCONFIG_CGIND
},
1500 { 0x1a, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND
},
1501 { 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1502 { 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1503 { 0x1c, 0x0000ffff, 0, 0x3C, SISLANDS_CACCONFIG_CGIND
},
1504 { 0x1c, 0xffff0000, 16, 0x203, SISLANDS_CACCONFIG_CGIND
},
1505 { 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1506 { 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1507 { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1508 { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
1509 { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1510 { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
1511 { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1512 { 0x6d, 0x0000ffff, 0, 0xB4, SISLANDS_CACCONFIG_CGIND
},
1516 static const struct si_cac_config_reg lcac_oland
[] =
1518 { 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
1519 { 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1520 { 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
1521 { 0x104, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1522 { 0x110, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND
},
1523 { 0x110, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1524 { 0x14f, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND
},
1525 { 0x14f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1526 { 0x8c, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND
},
1527 { 0x8c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1528 { 0x143, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND
},
1529 { 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1530 { 0x11c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
1531 { 0x11c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1532 { 0x11f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
1533 { 0x11f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1534 { 0x164, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1535 { 0x164, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1536 { 0x167, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1537 { 0x167, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1538 { 0x16a, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1539 { 0x16a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1540 { 0x15e, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1541 { 0x15e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1542 { 0x161, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1543 { 0x161, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1544 { 0x15b, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1545 { 0x15b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1546 { 0x16d, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
1547 { 0x16d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1548 { 0x170, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1549 { 0x170, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1550 { 0x173, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1551 { 0x173, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1552 { 0x176, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1553 { 0x176, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1554 { 0x179, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1555 { 0x179, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1556 { 0x17c, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1557 { 0x17c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1558 { 0x17f, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1559 { 0x17f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1563 static const struct si_cac_config_reg lcac_mars_pro
[] =
1565 { 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
1566 { 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1567 { 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
1568 { 0x104, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1569 { 0x110, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND
},
1570 { 0x110, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1571 { 0x14f, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND
},
1572 { 0x14f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1573 { 0x8c, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND
},
1574 { 0x8c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1575 { 0x143, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
1576 { 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1577 { 0x11c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
1578 { 0x11c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1579 { 0x11f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
1580 { 0x11f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1581 { 0x164, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1582 { 0x164, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1583 { 0x167, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1584 { 0x167, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1585 { 0x16a, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1586 { 0x16a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1587 { 0x15e, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1588 { 0x15e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1589 { 0x161, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1590 { 0x161, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1591 { 0x15b, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1592 { 0x15b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1593 { 0x16d, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
1594 { 0x16d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1595 { 0x170, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1596 { 0x170, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1597 { 0x173, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1598 { 0x173, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1599 { 0x176, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1600 { 0x176, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1601 { 0x179, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1602 { 0x179, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1603 { 0x17c, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1604 { 0x17c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1605 { 0x17f, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1606 { 0x17f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1610 static const struct si_cac_config_reg cac_override_oland
[] =
1615 static const struct si_powertune_data powertune_data_oland
=
1617 ((1 << 16) | 0x6993),
1646 static const struct si_powertune_data powertune_data_mars_pro
=
1648 ((1 << 16) | 0x6993),
1677 static const struct si_dte_data dte_data_oland
=
1688 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
1689 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
1690 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
1695 static const struct si_dte_data dte_data_mars_pro
=
1697 { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
1698 { 0x0, 0x0, 0x0, 0x0, 0x0 },
1706 { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
1707 { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
1708 { 0xF627, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
1713 static const struct si_dte_data dte_data_sun_xt
=
1715 { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
1716 { 0x0, 0x0, 0x0, 0x0, 0x0 },
1724 { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
1725 { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
1726 { 0xD555, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
1732 static const struct si_cac_config_reg cac_weights_hainan
[] =
1734 { 0x0, 0x0000ffff, 0, 0x2d9, SISLANDS_CACCONFIG_CGIND
},
1735 { 0x0, 0xffff0000, 16, 0x22b, SISLANDS_CACCONFIG_CGIND
},
1736 { 0x1, 0x0000ffff, 0, 0x21c, SISLANDS_CACCONFIG_CGIND
},
1737 { 0x1, 0xffff0000, 16, 0x1dc, SISLANDS_CACCONFIG_CGIND
},
1738 { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1739 { 0x3, 0x0000ffff, 0, 0x24e, SISLANDS_CACCONFIG_CGIND
},
1740 { 0x3, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1741 { 0x4, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1742 { 0x4, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1743 { 0x5, 0x0000ffff, 0, 0x35e, SISLANDS_CACCONFIG_CGIND
},
1744 { 0x5, 0xffff0000, 16, 0x1143, SISLANDS_CACCONFIG_CGIND
},
1745 { 0x6, 0x0000ffff, 0, 0xe17, SISLANDS_CACCONFIG_CGIND
},
1746 { 0x6, 0xffff0000, 16, 0x441, SISLANDS_CACCONFIG_CGIND
},
1747 { 0x18f, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1748 { 0x7, 0x0000ffff, 0, 0x28b, SISLANDS_CACCONFIG_CGIND
},
1749 { 0x7, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1750 { 0x8, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1751 { 0x8, 0xffff0000, 16, 0xabe, SISLANDS_CACCONFIG_CGIND
},
1752 { 0x9, 0x0000ffff, 0, 0xf11, SISLANDS_CACCONFIG_CGIND
},
1753 { 0xa, 0x0000ffff, 0, 0x907, SISLANDS_CACCONFIG_CGIND
},
1754 { 0xb, 0x0000ffff, 0, 0xb45, SISLANDS_CACCONFIG_CGIND
},
1755 { 0xb, 0xffff0000, 16, 0xd1e, SISLANDS_CACCONFIG_CGIND
},
1756 { 0xc, 0x0000ffff, 0, 0xa2c, SISLANDS_CACCONFIG_CGIND
},
1757 { 0xd, 0x0000ffff, 0, 0x62, SISLANDS_CACCONFIG_CGIND
},
1758 { 0xd, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1759 { 0xe, 0x0000ffff, 0, 0x1f3, SISLANDS_CACCONFIG_CGIND
},
1760 { 0xf, 0x0000ffff, 0, 0x42, SISLANDS_CACCONFIG_CGIND
},
1761 { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1762 { 0x10, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1763 { 0x10, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1764 { 0x11, 0x0000ffff, 0, 0x709, SISLANDS_CACCONFIG_CGIND
},
1765 { 0x11, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1766 { 0x12, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1767 { 0x13, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1768 { 0x13, 0xffff0000, 16, 0x3a, SISLANDS_CACCONFIG_CGIND
},
1769 { 0x14, 0x0000ffff, 0, 0x357, SISLANDS_CACCONFIG_CGIND
},
1770 { 0x15, 0x0000ffff, 0, 0x9f, SISLANDS_CACCONFIG_CGIND
},
1771 { 0x15, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1772 { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1773 { 0x16, 0x0000ffff, 0, 0x314, SISLANDS_CACCONFIG_CGIND
},
1774 { 0x16, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1775 { 0x17, 0x0000ffff, 0, 0x6d, SISLANDS_CACCONFIG_CGIND
},
1776 { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1777 { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1778 { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1779 { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1780 { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1781 { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
1782 { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1783 { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
1784 { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1785 { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
1786 { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1787 { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
1788 { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1789 { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
1790 { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1791 { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
1792 { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1793 { 0x6d, 0x0000ffff, 0, 0x1b9, SISLANDS_CACCONFIG_CGIND
},
1797 static const struct si_powertune_data powertune_data_hainan
=
1799 ((1 << 16) | 0x6993),
1828 static struct rv7xx_power_info
*rv770_get_pi(struct amdgpu_device
*adev
);
1829 static struct evergreen_power_info
*evergreen_get_pi(struct amdgpu_device
*adev
);
1830 static struct ni_power_info
*ni_get_pi(struct amdgpu_device
*adev
);
1831 static struct si_ps
*si_get_ps(struct amdgpu_ps
*rps
);
1833 static int si_populate_voltage_value(struct amdgpu_device
*adev
,
1834 const struct atom_voltage_table
*table
,
1835 u16 value
, SISLANDS_SMC_VOLTAGE_VALUE
*voltage
);
1836 static int si_get_std_voltage_value(struct amdgpu_device
*adev
,
1837 SISLANDS_SMC_VOLTAGE_VALUE
*voltage
,
1839 static int si_write_smc_soft_register(struct amdgpu_device
*adev
,
1840 u16 reg_offset
, u32 value
);
1841 static int si_convert_power_level_to_smc(struct amdgpu_device
*adev
,
1842 struct rv7xx_pl
*pl
,
1843 SISLANDS_SMC_HW_PERFORMANCE_LEVEL
*level
);
1844 static int si_calculate_sclk_params(struct amdgpu_device
*adev
,
1846 SISLANDS_SMC_SCLK_VALUE
*sclk
);
1848 static void si_thermal_start_smc_fan_control(struct amdgpu_device
*adev
);
1849 static void si_fan_ctrl_set_default_mode(struct amdgpu_device
*adev
);
1850 static void si_dpm_set_dpm_funcs(struct amdgpu_device
*adev
);
1851 static void si_dpm_set_irq_funcs(struct amdgpu_device
*adev
);
1853 static struct si_power_info
*si_get_pi(struct amdgpu_device
*adev
)
1855 struct si_power_info
*pi
= adev
->pm
.dpm
.priv
;
1859 static void si_calculate_leakage_for_v_and_t_formula(const struct ni_leakage_coeffients
*coeff
,
1860 u16 v
, s32 t
, u32 ileakage
, u32
*leakage
)
1862 s64 kt
, kv
, leakage_w
, i_leakage
, vddc
;
1863 s64 temperature
, t_slope
, t_intercept
, av
, bv
, t_ref
;
1866 i_leakage
= div64_s64(drm_int2fixp(ileakage
), 100);
1867 vddc
= div64_s64(drm_int2fixp(v
), 1000);
1868 temperature
= div64_s64(drm_int2fixp(t
), 1000);
1870 t_slope
= div64_s64(drm_int2fixp(coeff
->t_slope
), 100000000);
1871 t_intercept
= div64_s64(drm_int2fixp(coeff
->t_intercept
), 100000000);
1872 av
= div64_s64(drm_int2fixp(coeff
->av
), 100000000);
1873 bv
= div64_s64(drm_int2fixp(coeff
->bv
), 100000000);
1874 t_ref
= drm_int2fixp(coeff
->t_ref
);
1876 tmp
= drm_fixp_mul(t_slope
, vddc
) + t_intercept
;
1877 kt
= drm_fixp_exp(drm_fixp_mul(tmp
, temperature
));
1878 kt
= drm_fixp_div(kt
, drm_fixp_exp(drm_fixp_mul(tmp
, t_ref
)));
1879 kv
= drm_fixp_mul(av
, drm_fixp_exp(drm_fixp_mul(bv
, vddc
)));
1881 leakage_w
= drm_fixp_mul(drm_fixp_mul(drm_fixp_mul(i_leakage
, kt
), kv
), vddc
);
1883 *leakage
= drm_fixp2int(leakage_w
* 1000);
1886 static void si_calculate_leakage_for_v_and_t(struct amdgpu_device
*adev
,
1887 const struct ni_leakage_coeffients
*coeff
,
1893 si_calculate_leakage_for_v_and_t_formula(coeff
, v
, t
, i_leakage
, leakage
);
1896 static void si_calculate_leakage_for_v_formula(const struct ni_leakage_coeffients
*coeff
,
1897 const u32 fixed_kt
, u16 v
,
1898 u32 ileakage
, u32
*leakage
)
1900 s64 kt
, kv
, leakage_w
, i_leakage
, vddc
;
1902 i_leakage
= div64_s64(drm_int2fixp(ileakage
), 100);
1903 vddc
= div64_s64(drm_int2fixp(v
), 1000);
1905 kt
= div64_s64(drm_int2fixp(fixed_kt
), 100000000);
1906 kv
= drm_fixp_mul(div64_s64(drm_int2fixp(coeff
->av
), 100000000),
1907 drm_fixp_exp(drm_fixp_mul(div64_s64(drm_int2fixp(coeff
->bv
), 100000000), vddc
)));
1909 leakage_w
= drm_fixp_mul(drm_fixp_mul(drm_fixp_mul(i_leakage
, kt
), kv
), vddc
);
1911 *leakage
= drm_fixp2int(leakage_w
* 1000);
1914 static void si_calculate_leakage_for_v(struct amdgpu_device
*adev
,
1915 const struct ni_leakage_coeffients
*coeff
,
1921 si_calculate_leakage_for_v_formula(coeff
, fixed_kt
, v
, i_leakage
, leakage
);
1925 static void si_update_dte_from_pl2(struct amdgpu_device
*adev
,
1926 struct si_dte_data
*dte_data
)
1928 u32 p_limit1
= adev
->pm
.dpm
.tdp_limit
;
1929 u32 p_limit2
= adev
->pm
.dpm
.near_tdp_limit
;
1930 u32 k
= dte_data
->k
;
1931 u32 t_max
= dte_data
->max_t
;
1932 u32 t_split
[5] = { 10, 15, 20, 25, 30 };
1933 u32 t_0
= dte_data
->t0
;
1936 if (p_limit2
!= 0 && p_limit2
<= p_limit1
) {
1937 dte_data
->tdep_count
= 3;
1939 for (i
= 0; i
< k
; i
++) {
1941 (t_split
[i
] * (t_max
- t_0
/(u32
)1000) * (1 << 14)) /
1942 (p_limit2
* (u32
)100);
1945 dte_data
->tdep_r
[1] = dte_data
->r
[4] * 2;
1947 for (i
= 2; i
< SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE
; i
++) {
1948 dte_data
->tdep_r
[i
] = dte_data
->r
[4];
1951 DRM_ERROR("Invalid PL2! DTE will not be updated.\n");
1955 static struct rv7xx_power_info
*rv770_get_pi(struct amdgpu_device
*adev
)
1957 struct rv7xx_power_info
*pi
= adev
->pm
.dpm
.priv
;
1962 static struct ni_power_info
*ni_get_pi(struct amdgpu_device
*adev
)
1964 struct ni_power_info
*pi
= adev
->pm
.dpm
.priv
;
1969 static struct si_ps
*si_get_ps(struct amdgpu_ps
*aps
)
1971 struct si_ps
*ps
= aps
->ps_priv
;
1976 static void si_initialize_powertune_defaults(struct amdgpu_device
*adev
)
1978 struct ni_power_info
*ni_pi
= ni_get_pi(adev
);
1979 struct si_power_info
*si_pi
= si_get_pi(adev
);
1980 bool update_dte_from_pl2
= false;
1982 if (adev
->asic_type
== CHIP_TAHITI
) {
1983 si_pi
->cac_weights
= cac_weights_tahiti
;
1984 si_pi
->lcac_config
= lcac_tahiti
;
1985 si_pi
->cac_override
= cac_override_tahiti
;
1986 si_pi
->powertune_data
= &powertune_data_tahiti
;
1987 si_pi
->dte_data
= dte_data_tahiti
;
1989 switch (adev
->pdev
->device
) {
1991 si_pi
->dte_data
.enable_dte_by_default
= true;
1994 si_pi
->dte_data
= dte_data_new_zealand
;
2000 si_pi
->dte_data
= dte_data_aruba_pro
;
2001 update_dte_from_pl2
= true;
2004 si_pi
->dte_data
= dte_data_malta
;
2005 update_dte_from_pl2
= true;
2008 si_pi
->dte_data
= dte_data_tahiti_pro
;
2009 update_dte_from_pl2
= true;
2012 if (si_pi
->dte_data
.enable_dte_by_default
== true)
2013 DRM_ERROR("DTE is not enabled!\n");
2016 } else if (adev
->asic_type
== CHIP_PITCAIRN
) {
2017 si_pi
->cac_weights
= cac_weights_pitcairn
;
2018 si_pi
->lcac_config
= lcac_pitcairn
;
2019 si_pi
->cac_override
= cac_override_pitcairn
;
2020 si_pi
->powertune_data
= &powertune_data_pitcairn
;
2022 switch (adev
->pdev
->device
) {
2025 si_pi
->dte_data
= dte_data_curacao_xt
;
2026 update_dte_from_pl2
= true;
2030 si_pi
->dte_data
= dte_data_curacao_pro
;
2031 update_dte_from_pl2
= true;
2035 si_pi
->dte_data
= dte_data_neptune_xt
;
2036 update_dte_from_pl2
= true;
2039 si_pi
->dte_data
= dte_data_pitcairn
;
2042 } else if (adev
->asic_type
== CHIP_VERDE
) {
2043 si_pi
->lcac_config
= lcac_cape_verde
;
2044 si_pi
->cac_override
= cac_override_cape_verde
;
2045 si_pi
->powertune_data
= &powertune_data_cape_verde
;
2047 switch (adev
->pdev
->device
) {
2052 si_pi
->cac_weights
= cac_weights_cape_verde_pro
;
2053 si_pi
->dte_data
= dte_data_cape_verde
;
2056 si_pi
->cac_weights
= cac_weights_cape_verde_pro
;
2057 si_pi
->dte_data
= dte_data_sun_xt
;
2061 si_pi
->cac_weights
= cac_weights_heathrow
;
2062 si_pi
->dte_data
= dte_data_cape_verde
;
2066 si_pi
->cac_weights
= cac_weights_chelsea_xt
;
2067 si_pi
->dte_data
= dte_data_cape_verde
;
2070 si_pi
->cac_weights
= cac_weights_chelsea_pro
;
2071 si_pi
->dte_data
= dte_data_cape_verde
;
2074 si_pi
->cac_weights
= cac_weights_heathrow
;
2075 si_pi
->dte_data
= dte_data_venus_xtx
;
2078 si_pi
->cac_weights
= cac_weights_heathrow
;
2079 si_pi
->dte_data
= dte_data_venus_xt
;
2085 si_pi
->cac_weights
= cac_weights_chelsea_pro
;
2086 si_pi
->dte_data
= dte_data_venus_pro
;
2089 si_pi
->cac_weights
= cac_weights_cape_verde
;
2090 si_pi
->dte_data
= dte_data_cape_verde
;
2093 } else if (adev
->asic_type
== CHIP_OLAND
) {
2094 si_pi
->lcac_config
= lcac_mars_pro
;
2095 si_pi
->cac_override
= cac_override_oland
;
2096 si_pi
->powertune_data
= &powertune_data_mars_pro
;
2097 si_pi
->dte_data
= dte_data_mars_pro
;
2099 switch (adev
->pdev
->device
) {
2104 si_pi
->cac_weights
= cac_weights_mars_pro
;
2105 update_dte_from_pl2
= true;
2111 si_pi
->cac_weights
= cac_weights_mars_xt
;
2112 update_dte_from_pl2
= true;
2117 si_pi
->cac_weights
= cac_weights_oland_pro
;
2118 update_dte_from_pl2
= true;
2121 si_pi
->cac_weights
= cac_weights_oland_xt
;
2122 update_dte_from_pl2
= true;
2125 si_pi
->cac_weights
= cac_weights_oland
;
2126 si_pi
->lcac_config
= lcac_oland
;
2127 si_pi
->cac_override
= cac_override_oland
;
2128 si_pi
->powertune_data
= &powertune_data_oland
;
2129 si_pi
->dte_data
= dte_data_oland
;
2132 } else if (adev
->asic_type
== CHIP_HAINAN
) {
2133 si_pi
->cac_weights
= cac_weights_hainan
;
2134 si_pi
->lcac_config
= lcac_oland
;
2135 si_pi
->cac_override
= cac_override_oland
;
2136 si_pi
->powertune_data
= &powertune_data_hainan
;
2137 si_pi
->dte_data
= dte_data_sun_xt
;
2138 update_dte_from_pl2
= true;
2140 DRM_ERROR("Unknown SI asic revision, failed to initialize PowerTune!\n");
2144 ni_pi
->enable_power_containment
= false;
2145 ni_pi
->enable_cac
= false;
2146 ni_pi
->enable_sq_ramping
= false;
2147 si_pi
->enable_dte
= false;
2149 if (si_pi
->powertune_data
->enable_powertune_by_default
) {
2150 ni_pi
->enable_power_containment
= true;
2151 ni_pi
->enable_cac
= true;
2152 if (si_pi
->dte_data
.enable_dte_by_default
) {
2153 si_pi
->enable_dte
= true;
2154 if (update_dte_from_pl2
)
2155 si_update_dte_from_pl2(adev
, &si_pi
->dte_data
);
2158 ni_pi
->enable_sq_ramping
= true;
2161 ni_pi
->driver_calculate_cac_leakage
= true;
2162 ni_pi
->cac_configuration_required
= true;
2164 if (ni_pi
->cac_configuration_required
) {
2165 ni_pi
->support_cac_long_term_average
= true;
2166 si_pi
->dyn_powertune_data
.l2_lta_window_size
=
2167 si_pi
->powertune_data
->l2_lta_window_size_default
;
2168 si_pi
->dyn_powertune_data
.lts_truncate
=
2169 si_pi
->powertune_data
->lts_truncate_default
;
2171 ni_pi
->support_cac_long_term_average
= false;
2172 si_pi
->dyn_powertune_data
.l2_lta_window_size
= 0;
2173 si_pi
->dyn_powertune_data
.lts_truncate
= 0;
2176 si_pi
->dyn_powertune_data
.disable_uvd_powertune
= false;
2179 static u32
si_get_smc_power_scaling_factor(struct amdgpu_device
*adev
)
2184 static u32
si_calculate_cac_wintime(struct amdgpu_device
*adev
)
2189 u32 cac_window_size
;
2191 xclk
= amdgpu_asic_get_xclk(adev
);
2196 cac_window
= RREG32(CG_CAC_CTRL
) & CAC_WINDOW_MASK
;
2197 cac_window_size
= ((cac_window
& 0xFFFF0000) >> 16) * (cac_window
& 0x0000FFFF);
2199 wintime
= (cac_window_size
* 100) / xclk
;
2204 static u32
si_scale_power_for_smc(u32 power_in_watts
, u32 scaling_factor
)
2206 return power_in_watts
;
2209 static int si_calculate_adjusted_tdp_limits(struct amdgpu_device
*adev
,
2210 bool adjust_polarity
,
2213 u32
*near_tdp_limit
)
2215 u32 adjustment_delta
, max_tdp_limit
;
2217 if (tdp_adjustment
> (u32
)adev
->pm
.dpm
.tdp_od_limit
)
2220 max_tdp_limit
= ((100 + 100) * adev
->pm
.dpm
.tdp_limit
) / 100;
2222 if (adjust_polarity
) {
2223 *tdp_limit
= ((100 + tdp_adjustment
) * adev
->pm
.dpm
.tdp_limit
) / 100;
2224 *near_tdp_limit
= adev
->pm
.dpm
.near_tdp_limit_adjusted
+ (*tdp_limit
- adev
->pm
.dpm
.tdp_limit
);
2226 *tdp_limit
= ((100 - tdp_adjustment
) * adev
->pm
.dpm
.tdp_limit
) / 100;
2227 adjustment_delta
= adev
->pm
.dpm
.tdp_limit
- *tdp_limit
;
2228 if (adjustment_delta
< adev
->pm
.dpm
.near_tdp_limit_adjusted
)
2229 *near_tdp_limit
= adev
->pm
.dpm
.near_tdp_limit_adjusted
- adjustment_delta
;
2231 *near_tdp_limit
= 0;
2234 if ((*tdp_limit
<= 0) || (*tdp_limit
> max_tdp_limit
))
2236 if ((*near_tdp_limit
<= 0) || (*near_tdp_limit
> *tdp_limit
))
2242 static int si_populate_smc_tdp_limits(struct amdgpu_device
*adev
,
2243 struct amdgpu_ps
*amdgpu_state
)
2245 struct ni_power_info
*ni_pi
= ni_get_pi(adev
);
2246 struct si_power_info
*si_pi
= si_get_pi(adev
);
2248 if (ni_pi
->enable_power_containment
) {
2249 SISLANDS_SMC_STATETABLE
*smc_table
= &si_pi
->smc_statetable
;
2250 PP_SIslands_PAPMParameters
*papm_parm
;
2251 struct amdgpu_ppm_table
*ppm
= adev
->pm
.dpm
.dyn_state
.ppm_table
;
2252 u32 scaling_factor
= si_get_smc_power_scaling_factor(adev
);
2257 if (scaling_factor
== 0)
2260 memset(smc_table
, 0, sizeof(SISLANDS_SMC_STATETABLE
));
2262 ret
= si_calculate_adjusted_tdp_limits(adev
,
2264 adev
->pm
.dpm
.tdp_adjustment
,
2270 smc_table
->dpm2Params
.TDPLimit
=
2271 cpu_to_be32(si_scale_power_for_smc(tdp_limit
, scaling_factor
) * 1000);
2272 smc_table
->dpm2Params
.NearTDPLimit
=
2273 cpu_to_be32(si_scale_power_for_smc(near_tdp_limit
, scaling_factor
) * 1000);
2274 smc_table
->dpm2Params
.SafePowerLimit
=
2275 cpu_to_be32(si_scale_power_for_smc((near_tdp_limit
* SISLANDS_DPM2_TDP_SAFE_LIMIT_PERCENT
) / 100, scaling_factor
) * 1000);
2277 ret
= amdgpu_si_copy_bytes_to_smc(adev
,
2278 (si_pi
->state_table_start
+ offsetof(SISLANDS_SMC_STATETABLE
, dpm2Params
) +
2279 offsetof(PP_SIslands_DPM2Parameters
, TDPLimit
)),
2280 (u8
*)(&(smc_table
->dpm2Params
.TDPLimit
)),
2286 if (si_pi
->enable_ppm
) {
2287 papm_parm
= &si_pi
->papm_parm
;
2288 memset(papm_parm
, 0, sizeof(PP_SIslands_PAPMParameters
));
2289 papm_parm
->NearTDPLimitTherm
= cpu_to_be32(ppm
->dgpu_tdp
);
2290 papm_parm
->dGPU_T_Limit
= cpu_to_be32(ppm
->tj_max
);
2291 papm_parm
->dGPU_T_Warning
= cpu_to_be32(95);
2292 papm_parm
->dGPU_T_Hysteresis
= cpu_to_be32(5);
2293 papm_parm
->PlatformPowerLimit
= 0xffffffff;
2294 papm_parm
->NearTDPLimitPAPM
= 0xffffffff;
2296 ret
= amdgpu_si_copy_bytes_to_smc(adev
, si_pi
->papm_cfg_table_start
,
2298 sizeof(PP_SIslands_PAPMParameters
),
2307 static int si_populate_smc_tdp_limits_2(struct amdgpu_device
*adev
,
2308 struct amdgpu_ps
*amdgpu_state
)
2310 struct ni_power_info
*ni_pi
= ni_get_pi(adev
);
2311 struct si_power_info
*si_pi
= si_get_pi(adev
);
2313 if (ni_pi
->enable_power_containment
) {
2314 SISLANDS_SMC_STATETABLE
*smc_table
= &si_pi
->smc_statetable
;
2315 u32 scaling_factor
= si_get_smc_power_scaling_factor(adev
);
2318 memset(smc_table
, 0, sizeof(SISLANDS_SMC_STATETABLE
));
2320 smc_table
->dpm2Params
.NearTDPLimit
=
2321 cpu_to_be32(si_scale_power_for_smc(adev
->pm
.dpm
.near_tdp_limit_adjusted
, scaling_factor
) * 1000);
2322 smc_table
->dpm2Params
.SafePowerLimit
=
2323 cpu_to_be32(si_scale_power_for_smc((adev
->pm
.dpm
.near_tdp_limit_adjusted
* SISLANDS_DPM2_TDP_SAFE_LIMIT_PERCENT
) / 100, scaling_factor
) * 1000);
2325 ret
= amdgpu_si_copy_bytes_to_smc(adev
,
2326 (si_pi
->state_table_start
+
2327 offsetof(SISLANDS_SMC_STATETABLE
, dpm2Params
) +
2328 offsetof(PP_SIslands_DPM2Parameters
, NearTDPLimit
)),
2329 (u8
*)(&(smc_table
->dpm2Params
.NearTDPLimit
)),
2339 static u16
si_calculate_power_efficiency_ratio(struct amdgpu_device
*adev
,
2340 const u16 prev_std_vddc
,
2341 const u16 curr_std_vddc
)
2343 u64 margin
= (u64
)SISLANDS_DPM2_PWREFFICIENCYRATIO_MARGIN
;
2344 u64 prev_vddc
= (u64
)prev_std_vddc
;
2345 u64 curr_vddc
= (u64
)curr_std_vddc
;
2346 u64 pwr_efficiency_ratio
, n
, d
;
2348 if ((prev_vddc
== 0) || (curr_vddc
== 0))
2351 n
= div64_u64((u64
)1024 * curr_vddc
* curr_vddc
* ((u64
)1000 + margin
), (u64
)1000);
2352 d
= prev_vddc
* prev_vddc
;
2353 pwr_efficiency_ratio
= div64_u64(n
, d
);
2355 if (pwr_efficiency_ratio
> (u64
)0xFFFF)
2358 return (u16
)pwr_efficiency_ratio
;
2361 static bool si_should_disable_uvd_powertune(struct amdgpu_device
*adev
,
2362 struct amdgpu_ps
*amdgpu_state
)
2364 struct si_power_info
*si_pi
= si_get_pi(adev
);
2366 if (si_pi
->dyn_powertune_data
.disable_uvd_powertune
&&
2367 amdgpu_state
->vclk
&& amdgpu_state
->dclk
)
2373 struct evergreen_power_info
*evergreen_get_pi(struct amdgpu_device
*adev
)
2375 struct evergreen_power_info
*pi
= adev
->pm
.dpm
.priv
;
2380 static int si_populate_power_containment_values(struct amdgpu_device
*adev
,
2381 struct amdgpu_ps
*amdgpu_state
,
2382 SISLANDS_SMC_SWSTATE
*smc_state
)
2384 struct evergreen_power_info
*eg_pi
= evergreen_get_pi(adev
);
2385 struct ni_power_info
*ni_pi
= ni_get_pi(adev
);
2386 struct si_ps
*state
= si_get_ps(amdgpu_state
);
2387 SISLANDS_SMC_VOLTAGE_VALUE vddc
;
2394 u16 pwr_efficiency_ratio
;
2396 bool disable_uvd_power_tune
;
2399 if (ni_pi
->enable_power_containment
== false)
2402 if (state
->performance_level_count
== 0)
2405 if (smc_state
->levelCount
!= state
->performance_level_count
)
2408 disable_uvd_power_tune
= si_should_disable_uvd_powertune(adev
, amdgpu_state
);
2410 smc_state
->levels
[0].dpm2
.MaxPS
= 0;
2411 smc_state
->levels
[0].dpm2
.NearTDPDec
= 0;
2412 smc_state
->levels
[0].dpm2
.AboveSafeInc
= 0;
2413 smc_state
->levels
[0].dpm2
.BelowSafeInc
= 0;
2414 smc_state
->levels
[0].dpm2
.PwrEfficiencyRatio
= 0;
2416 for (i
= 1; i
< state
->performance_level_count
; i
++) {
2417 prev_sclk
= state
->performance_levels
[i
-1].sclk
;
2418 max_sclk
= state
->performance_levels
[i
].sclk
;
2420 max_ps_percent
= SISLANDS_DPM2_MAXPS_PERCENT_M
;
2422 max_ps_percent
= SISLANDS_DPM2_MAXPS_PERCENT_H
;
2424 if (prev_sclk
> max_sclk
)
2427 if ((max_ps_percent
== 0) ||
2428 (prev_sclk
== max_sclk
) ||
2429 disable_uvd_power_tune
)
2430 min_sclk
= max_sclk
;
2432 min_sclk
= prev_sclk
;
2434 min_sclk
= (prev_sclk
* (u32
)max_ps_percent
) / 100;
2436 if (min_sclk
< state
->performance_levels
[0].sclk
)
2437 min_sclk
= state
->performance_levels
[0].sclk
;
2442 ret
= si_populate_voltage_value(adev
, &eg_pi
->vddc_voltage_table
,
2443 state
->performance_levels
[i
-1].vddc
, &vddc
);
2447 ret
= si_get_std_voltage_value(adev
, &vddc
, &prev_std_vddc
);
2451 ret
= si_populate_voltage_value(adev
, &eg_pi
->vddc_voltage_table
,
2452 state
->performance_levels
[i
].vddc
, &vddc
);
2456 ret
= si_get_std_voltage_value(adev
, &vddc
, &curr_std_vddc
);
2460 pwr_efficiency_ratio
= si_calculate_power_efficiency_ratio(adev
,
2461 prev_std_vddc
, curr_std_vddc
);
2463 smc_state
->levels
[i
].dpm2
.MaxPS
= (u8
)((SISLANDS_DPM2_MAX_PULSE_SKIP
* (max_sclk
- min_sclk
)) / max_sclk
);
2464 smc_state
->levels
[i
].dpm2
.NearTDPDec
= SISLANDS_DPM2_NEAR_TDP_DEC
;
2465 smc_state
->levels
[i
].dpm2
.AboveSafeInc
= SISLANDS_DPM2_ABOVE_SAFE_INC
;
2466 smc_state
->levels
[i
].dpm2
.BelowSafeInc
= SISLANDS_DPM2_BELOW_SAFE_INC
;
2467 smc_state
->levels
[i
].dpm2
.PwrEfficiencyRatio
= cpu_to_be16(pwr_efficiency_ratio
);
2473 static int si_populate_sq_ramping_values(struct amdgpu_device
*adev
,
2474 struct amdgpu_ps
*amdgpu_state
,
2475 SISLANDS_SMC_SWSTATE
*smc_state
)
2477 struct ni_power_info
*ni_pi
= ni_get_pi(adev
);
2478 struct si_ps
*state
= si_get_ps(amdgpu_state
);
2479 u32 sq_power_throttle
, sq_power_throttle2
;
2480 bool enable_sq_ramping
= ni_pi
->enable_sq_ramping
;
2483 if (state
->performance_level_count
== 0)
2486 if (smc_state
->levelCount
!= state
->performance_level_count
)
2489 if (adev
->pm
.dpm
.sq_ramping_threshold
== 0)
2492 if (SISLANDS_DPM2_SQ_RAMP_MAX_POWER
> (MAX_POWER_MASK
>> MAX_POWER_SHIFT
))
2493 enable_sq_ramping
= false;
2495 if (SISLANDS_DPM2_SQ_RAMP_MIN_POWER
> (MIN_POWER_MASK
>> MIN_POWER_SHIFT
))
2496 enable_sq_ramping
= false;
2498 if (SISLANDS_DPM2_SQ_RAMP_MAX_POWER_DELTA
> (MAX_POWER_DELTA_MASK
>> MAX_POWER_DELTA_SHIFT
))
2499 enable_sq_ramping
= false;
2501 if (SISLANDS_DPM2_SQ_RAMP_STI_SIZE
> (STI_SIZE_MASK
>> STI_SIZE_SHIFT
))
2502 enable_sq_ramping
= false;
2504 if (SISLANDS_DPM2_SQ_RAMP_LTI_RATIO
> (LTI_RATIO_MASK
>> LTI_RATIO_SHIFT
))
2505 enable_sq_ramping
= false;
2507 for (i
= 0; i
< state
->performance_level_count
; i
++) {
2508 sq_power_throttle
= 0;
2509 sq_power_throttle2
= 0;
2511 if ((state
->performance_levels
[i
].sclk
>= adev
->pm
.dpm
.sq_ramping_threshold
) &&
2512 enable_sq_ramping
) {
2513 sq_power_throttle
|= MAX_POWER(SISLANDS_DPM2_SQ_RAMP_MAX_POWER
);
2514 sq_power_throttle
|= MIN_POWER(SISLANDS_DPM2_SQ_RAMP_MIN_POWER
);
2515 sq_power_throttle2
|= MAX_POWER_DELTA(SISLANDS_DPM2_SQ_RAMP_MAX_POWER_DELTA
);
2516 sq_power_throttle2
|= STI_SIZE(SISLANDS_DPM2_SQ_RAMP_STI_SIZE
);
2517 sq_power_throttle2
|= LTI_RATIO(SISLANDS_DPM2_SQ_RAMP_LTI_RATIO
);
2519 sq_power_throttle
|= MAX_POWER_MASK
| MIN_POWER_MASK
;
2520 sq_power_throttle2
|= MAX_POWER_DELTA_MASK
| STI_SIZE_MASK
| LTI_RATIO_MASK
;
2523 smc_state
->levels
[i
].SQPowerThrottle
= cpu_to_be32(sq_power_throttle
);
2524 smc_state
->levels
[i
].SQPowerThrottle_2
= cpu_to_be32(sq_power_throttle2
);
2530 static int si_enable_power_containment(struct amdgpu_device
*adev
,
2531 struct amdgpu_ps
*amdgpu_new_state
,
2534 struct ni_power_info
*ni_pi
= ni_get_pi(adev
);
2535 PPSMC_Result smc_result
;
2538 if (ni_pi
->enable_power_containment
) {
2540 if (!si_should_disable_uvd_powertune(adev
, amdgpu_new_state
)) {
2541 smc_result
= amdgpu_si_send_msg_to_smc(adev
, PPSMC_TDPClampingActive
);
2542 if (smc_result
!= PPSMC_Result_OK
) {
2544 ni_pi
->pc_enabled
= false;
2546 ni_pi
->pc_enabled
= true;
2550 smc_result
= amdgpu_si_send_msg_to_smc(adev
, PPSMC_TDPClampingInactive
);
2551 if (smc_result
!= PPSMC_Result_OK
)
2553 ni_pi
->pc_enabled
= false;
2560 static int si_initialize_smc_dte_tables(struct amdgpu_device
*adev
)
2562 struct si_power_info
*si_pi
= si_get_pi(adev
);
2564 struct si_dte_data
*dte_data
= &si_pi
->dte_data
;
2565 Smc_SIslands_DTE_Configuration
*dte_tables
= NULL
;
2570 if (dte_data
== NULL
)
2571 si_pi
->enable_dte
= false;
2573 if (si_pi
->enable_dte
== false)
2576 if (dte_data
->k
<= 0)
2579 dte_tables
= kzalloc(sizeof(Smc_SIslands_DTE_Configuration
), GFP_KERNEL
);
2580 if (dte_tables
== NULL
) {
2581 si_pi
->enable_dte
= false;
2585 table_size
= dte_data
->k
;
2587 if (table_size
> SMC_SISLANDS_DTE_MAX_FILTER_STAGES
)
2588 table_size
= SMC_SISLANDS_DTE_MAX_FILTER_STAGES
;
2590 tdep_count
= dte_data
->tdep_count
;
2591 if (tdep_count
> SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE
)
2592 tdep_count
= SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE
;
2594 dte_tables
->K
= cpu_to_be32(table_size
);
2595 dte_tables
->T0
= cpu_to_be32(dte_data
->t0
);
2596 dte_tables
->MaxT
= cpu_to_be32(dte_data
->max_t
);
2597 dte_tables
->WindowSize
= dte_data
->window_size
;
2598 dte_tables
->temp_select
= dte_data
->temp_select
;
2599 dte_tables
->DTE_mode
= dte_data
->dte_mode
;
2600 dte_tables
->Tthreshold
= cpu_to_be32(dte_data
->t_threshold
);
2605 for (i
= 0; i
< table_size
; i
++) {
2606 dte_tables
->tau
[i
] = cpu_to_be32(dte_data
->tau
[i
]);
2607 dte_tables
->R
[i
] = cpu_to_be32(dte_data
->r
[i
]);
2610 dte_tables
->Tdep_count
= tdep_count
;
2612 for (i
= 0; i
< (u32
)tdep_count
; i
++) {
2613 dte_tables
->T_limits
[i
] = dte_data
->t_limits
[i
];
2614 dte_tables
->Tdep_tau
[i
] = cpu_to_be32(dte_data
->tdep_tau
[i
]);
2615 dte_tables
->Tdep_R
[i
] = cpu_to_be32(dte_data
->tdep_r
[i
]);
2618 ret
= amdgpu_si_copy_bytes_to_smc(adev
, si_pi
->dte_table_start
,
2620 sizeof(Smc_SIslands_DTE_Configuration
),
2627 static int si_get_cac_std_voltage_max_min(struct amdgpu_device
*adev
,
2630 struct si_power_info
*si_pi
= si_get_pi(adev
);
2631 struct amdgpu_cac_leakage_table
*table
=
2632 &adev
->pm
.dpm
.dyn_state
.cac_leakage_table
;
2642 for (i
= 0; i
< table
->count
; i
++) {
2643 if (table
->entries
[i
].vddc
> *max
)
2644 *max
= table
->entries
[i
].vddc
;
2645 if (table
->entries
[i
].vddc
< *min
)
2646 *min
= table
->entries
[i
].vddc
;
2649 if (si_pi
->powertune_data
->lkge_lut_v0_percent
> 100)
2652 v0_loadline
= (*min
) * (100 - si_pi
->powertune_data
->lkge_lut_v0_percent
) / 100;
2654 if (v0_loadline
> 0xFFFFUL
)
2657 *min
= (u16
)v0_loadline
;
2659 if ((*min
> *max
) || (*max
== 0) || (*min
== 0))
2665 static u16
si_get_cac_std_voltage_step(u16 max
, u16 min
)
2667 return ((max
- min
) + (SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES
- 1)) /
2668 SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES
;
2671 static int si_init_dte_leakage_table(struct amdgpu_device
*adev
,
2672 PP_SIslands_CacConfig
*cac_tables
,
2673 u16 vddc_max
, u16 vddc_min
, u16 vddc_step
,
2676 struct si_power_info
*si_pi
= si_get_pi(adev
);
2684 scaling_factor
= si_get_smc_power_scaling_factor(adev
);
2686 for (i
= 0; i
< SMC_SISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES
; i
++) {
2687 t
= (1000 * (i
* t_step
+ t0
));
2689 for (j
= 0; j
< SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES
; j
++) {
2690 voltage
= vddc_max
- (vddc_step
* j
);
2692 si_calculate_leakage_for_v_and_t(adev
,
2693 &si_pi
->powertune_data
->leakage_coefficients
,
2696 si_pi
->dyn_powertune_data
.cac_leakage
,
2699 smc_leakage
= si_scale_power_for_smc(leakage
, scaling_factor
) / 4;
2701 if (smc_leakage
> 0xFFFF)
2702 smc_leakage
= 0xFFFF;
2704 cac_tables
->cac_lkge_lut
[i
][SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES
-1-j
] =
2705 cpu_to_be16((u16
)smc_leakage
);
2711 static int si_init_simplified_leakage_table(struct amdgpu_device
*adev
,
2712 PP_SIslands_CacConfig
*cac_tables
,
2713 u16 vddc_max
, u16 vddc_min
, u16 vddc_step
)
2715 struct si_power_info
*si_pi
= si_get_pi(adev
);
2722 scaling_factor
= si_get_smc_power_scaling_factor(adev
);
2724 for (j
= 0; j
< SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES
; j
++) {
2725 voltage
= vddc_max
- (vddc_step
* j
);
2727 si_calculate_leakage_for_v(adev
,
2728 &si_pi
->powertune_data
->leakage_coefficients
,
2729 si_pi
->powertune_data
->fixed_kt
,
2731 si_pi
->dyn_powertune_data
.cac_leakage
,
2734 smc_leakage
= si_scale_power_for_smc(leakage
, scaling_factor
) / 4;
2736 if (smc_leakage
> 0xFFFF)
2737 smc_leakage
= 0xFFFF;
2739 for (i
= 0; i
< SMC_SISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES
; i
++)
2740 cac_tables
->cac_lkge_lut
[i
][SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES
-1-j
] =
2741 cpu_to_be16((u16
)smc_leakage
);
2746 static int si_initialize_smc_cac_tables(struct amdgpu_device
*adev
)
2748 struct ni_power_info
*ni_pi
= ni_get_pi(adev
);
2749 struct si_power_info
*si_pi
= si_get_pi(adev
);
2750 PP_SIslands_CacConfig
*cac_tables
= NULL
;
2751 u16 vddc_max
, vddc_min
, vddc_step
;
2753 u32 load_line_slope
, reg
;
2755 u32 ticks_per_us
= amdgpu_asic_get_xclk(adev
) / 100;
2757 if (ni_pi
->enable_cac
== false)
2760 cac_tables
= kzalloc(sizeof(PP_SIslands_CacConfig
), GFP_KERNEL
);
2764 reg
= RREG32(CG_CAC_CTRL
) & ~CAC_WINDOW_MASK
;
2765 reg
|= CAC_WINDOW(si_pi
->powertune_data
->cac_window
);
2766 WREG32(CG_CAC_CTRL
, reg
);
2768 si_pi
->dyn_powertune_data
.cac_leakage
= adev
->pm
.dpm
.cac_leakage
;
2769 si_pi
->dyn_powertune_data
.dc_pwr_value
=
2770 si_pi
->powertune_data
->dc_cac
[NISLANDS_DCCAC_LEVEL_0
];
2771 si_pi
->dyn_powertune_data
.wintime
= si_calculate_cac_wintime(adev
);
2772 si_pi
->dyn_powertune_data
.shift_n
= si_pi
->powertune_data
->shift_n_default
;
2774 si_pi
->dyn_powertune_data
.leakage_minimum_temperature
= 80 * 1000;
2776 ret
= si_get_cac_std_voltage_max_min(adev
, &vddc_max
, &vddc_min
);
2780 vddc_step
= si_get_cac_std_voltage_step(vddc_max
, vddc_min
);
2781 vddc_min
= vddc_max
- (vddc_step
* (SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES
- 1));
2785 if (si_pi
->enable_dte
|| ni_pi
->driver_calculate_cac_leakage
)
2786 ret
= si_init_dte_leakage_table(adev
, cac_tables
,
2787 vddc_max
, vddc_min
, vddc_step
,
2790 ret
= si_init_simplified_leakage_table(adev
, cac_tables
,
2791 vddc_max
, vddc_min
, vddc_step
);
2795 load_line_slope
= ((u32
)adev
->pm
.dpm
.load_line_slope
<< SMC_SISLANDS_SCALE_R
) / 100;
2797 cac_tables
->l2numWin_TDP
= cpu_to_be32(si_pi
->dyn_powertune_data
.l2_lta_window_size
);
2798 cac_tables
->lts_truncate_n
= si_pi
->dyn_powertune_data
.lts_truncate
;
2799 cac_tables
->SHIFT_N
= si_pi
->dyn_powertune_data
.shift_n
;
2800 cac_tables
->lkge_lut_V0
= cpu_to_be32((u32
)vddc_min
);
2801 cac_tables
->lkge_lut_Vstep
= cpu_to_be32((u32
)vddc_step
);
2802 cac_tables
->R_LL
= cpu_to_be32(load_line_slope
);
2803 cac_tables
->WinTime
= cpu_to_be32(si_pi
->dyn_powertune_data
.wintime
);
2804 cac_tables
->calculation_repeats
= cpu_to_be32(2);
2805 cac_tables
->dc_cac
= cpu_to_be32(0);
2806 cac_tables
->log2_PG_LKG_SCALE
= 12;
2807 cac_tables
->cac_temp
= si_pi
->powertune_data
->operating_temp
;
2808 cac_tables
->lkge_lut_T0
= cpu_to_be32((u32
)t0
);
2809 cac_tables
->lkge_lut_Tstep
= cpu_to_be32((u32
)t_step
);
2811 ret
= amdgpu_si_copy_bytes_to_smc(adev
, si_pi
->cac_table_start
,
2813 sizeof(PP_SIslands_CacConfig
),
2819 ret
= si_write_smc_soft_register(adev
, SI_SMC_SOFT_REGISTER_ticks_per_us
, ticks_per_us
);
2823 ni_pi
->enable_cac
= false;
2824 ni_pi
->enable_power_containment
= false;
2832 static int si_program_cac_config_registers(struct amdgpu_device
*adev
,
2833 const struct si_cac_config_reg
*cac_config_regs
)
2835 const struct si_cac_config_reg
*config_regs
= cac_config_regs
;
2836 u32 data
= 0, offset
;
2841 while (config_regs
->offset
!= 0xFFFFFFFF) {
2842 switch (config_regs
->type
) {
2843 case SISLANDS_CACCONFIG_CGIND
:
2844 offset
= SMC_CG_IND_START
+ config_regs
->offset
;
2845 if (offset
< SMC_CG_IND_END
)
2846 data
= RREG32_SMC(offset
);
2849 data
= RREG32(config_regs
->offset
);
2853 data
&= ~config_regs
->mask
;
2854 data
|= ((config_regs
->value
<< config_regs
->shift
) & config_regs
->mask
);
2856 switch (config_regs
->type
) {
2857 case SISLANDS_CACCONFIG_CGIND
:
2858 offset
= SMC_CG_IND_START
+ config_regs
->offset
;
2859 if (offset
< SMC_CG_IND_END
)
2860 WREG32_SMC(offset
, data
);
2863 WREG32(config_regs
->offset
, data
);
2871 static int si_initialize_hardware_cac_manager(struct amdgpu_device
*adev
)
2873 struct ni_power_info
*ni_pi
= ni_get_pi(adev
);
2874 struct si_power_info
*si_pi
= si_get_pi(adev
);
2877 if ((ni_pi
->enable_cac
== false) ||
2878 (ni_pi
->cac_configuration_required
== false))
2881 ret
= si_program_cac_config_registers(adev
, si_pi
->lcac_config
);
2884 ret
= si_program_cac_config_registers(adev
, si_pi
->cac_override
);
2887 ret
= si_program_cac_config_registers(adev
, si_pi
->cac_weights
);
2894 static int si_enable_smc_cac(struct amdgpu_device
*adev
,
2895 struct amdgpu_ps
*amdgpu_new_state
,
2898 struct ni_power_info
*ni_pi
= ni_get_pi(adev
);
2899 struct si_power_info
*si_pi
= si_get_pi(adev
);
2900 PPSMC_Result smc_result
;
2903 if (ni_pi
->enable_cac
) {
2905 if (!si_should_disable_uvd_powertune(adev
, amdgpu_new_state
)) {
2906 if (ni_pi
->support_cac_long_term_average
) {
2907 smc_result
= amdgpu_si_send_msg_to_smc(adev
, PPSMC_CACLongTermAvgEnable
);
2908 if (smc_result
!= PPSMC_Result_OK
)
2909 ni_pi
->support_cac_long_term_average
= false;
2912 smc_result
= amdgpu_si_send_msg_to_smc(adev
, PPSMC_MSG_EnableCac
);
2913 if (smc_result
!= PPSMC_Result_OK
) {
2915 ni_pi
->cac_enabled
= false;
2917 ni_pi
->cac_enabled
= true;
2920 if (si_pi
->enable_dte
) {
2921 smc_result
= amdgpu_si_send_msg_to_smc(adev
, PPSMC_MSG_EnableDTE
);
2922 if (smc_result
!= PPSMC_Result_OK
)
2926 } else if (ni_pi
->cac_enabled
) {
2927 if (si_pi
->enable_dte
)
2928 smc_result
= amdgpu_si_send_msg_to_smc(adev
, PPSMC_MSG_DisableDTE
);
2930 smc_result
= amdgpu_si_send_msg_to_smc(adev
, PPSMC_MSG_DisableCac
);
2932 ni_pi
->cac_enabled
= false;
2934 if (ni_pi
->support_cac_long_term_average
)
2935 smc_result
= amdgpu_si_send_msg_to_smc(adev
, PPSMC_CACLongTermAvgDisable
);
2941 static int si_init_smc_spll_table(struct amdgpu_device
*adev
)
2943 struct ni_power_info
*ni_pi
= ni_get_pi(adev
);
2944 struct si_power_info
*si_pi
= si_get_pi(adev
);
2945 SMC_SISLANDS_SPLL_DIV_TABLE
*spll_table
;
2946 SISLANDS_SMC_SCLK_VALUE sclk_params
;
2954 if (si_pi
->spll_table_start
== 0)
2957 spll_table
= kzalloc(sizeof(SMC_SISLANDS_SPLL_DIV_TABLE
), GFP_KERNEL
);
2958 if (spll_table
== NULL
)
2961 for (i
= 0; i
< 256; i
++) {
2962 ret
= si_calculate_sclk_params(adev
, sclk
, &sclk_params
);
2965 p_div
= (sclk_params
.vCG_SPLL_FUNC_CNTL
& SPLL_PDIV_A_MASK
) >> SPLL_PDIV_A_SHIFT
;
2966 fb_div
= (sclk_params
.vCG_SPLL_FUNC_CNTL_3
& SPLL_FB_DIV_MASK
) >> SPLL_FB_DIV_SHIFT
;
2967 clk_s
= (sclk_params
.vCG_SPLL_SPREAD_SPECTRUM
& CLK_S_MASK
) >> CLK_S_SHIFT
;
2968 clk_v
= (sclk_params
.vCG_SPLL_SPREAD_SPECTRUM_2
& CLK_V_MASK
) >> CLK_V_SHIFT
;
2970 fb_div
&= ~0x00001FFF;
2974 if (p_div
& ~(SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_MASK
>> SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_SHIFT
))
2976 if (fb_div
& ~(SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_MASK
>> SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_SHIFT
))
2978 if (clk_s
& ~(SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_MASK
>> SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_SHIFT
))
2980 if (clk_v
& ~(SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_MASK
>> SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_SHIFT
))
2986 tmp
= ((fb_div
<< SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_SHIFT
) & SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_MASK
) |
2987 ((p_div
<< SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_SHIFT
) & SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_MASK
);
2988 spll_table
->freq
[i
] = cpu_to_be32(tmp
);
2990 tmp
= ((clk_v
<< SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_SHIFT
) & SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_MASK
) |
2991 ((clk_s
<< SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_SHIFT
) & SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_MASK
);
2992 spll_table
->ss
[i
] = cpu_to_be32(tmp
);
2999 ret
= amdgpu_si_copy_bytes_to_smc(adev
, si_pi
->spll_table_start
,
3001 sizeof(SMC_SISLANDS_SPLL_DIV_TABLE
),
3005 ni_pi
->enable_power_containment
= false;
3012 static u16
si_get_lower_of_leakage_and_vce_voltage(struct amdgpu_device
*adev
,
3015 u16 highest_leakage
= 0;
3016 struct si_power_info
*si_pi
= si_get_pi(adev
);
3019 for (i
= 0; i
< si_pi
->leakage_voltage
.count
; i
++){
3020 if (highest_leakage
< si_pi
->leakage_voltage
.entries
[i
].voltage
)
3021 highest_leakage
= si_pi
->leakage_voltage
.entries
[i
].voltage
;
3024 if (si_pi
->leakage_voltage
.count
&& (highest_leakage
< vce_voltage
))
3025 return highest_leakage
;
3030 static int si_get_vce_clock_voltage(struct amdgpu_device
*adev
,
3031 u32 evclk
, u32 ecclk
, u16
*voltage
)
3035 struct amdgpu_vce_clock_voltage_dependency_table
*table
=
3036 &adev
->pm
.dpm
.dyn_state
.vce_clock_voltage_dependency_table
;
3038 if (((evclk
== 0) && (ecclk
== 0)) ||
3039 (table
&& (table
->count
== 0))) {
3044 for (i
= 0; i
< table
->count
; i
++) {
3045 if ((evclk
<= table
->entries
[i
].evclk
) &&
3046 (ecclk
<= table
->entries
[i
].ecclk
)) {
3047 *voltage
= table
->entries
[i
].v
;
3053 /* if no match return the highest voltage */
3055 *voltage
= table
->entries
[table
->count
- 1].v
;
3057 *voltage
= si_get_lower_of_leakage_and_vce_voltage(adev
, *voltage
);
3062 static bool si_dpm_vblank_too_short(struct amdgpu_device
*adev
)
3065 u32 vblank_time
= amdgpu_dpm_get_vblank_time(adev
);
3066 /* we never hit the non-gddr5 limit so disable it */
3067 u32 switch_limit
= adev
->mc
.vram_type
== AMDGPU_VRAM_TYPE_GDDR5
? 450 : 0;
3069 if (vblank_time
< switch_limit
)
3076 static int ni_copy_and_switch_arb_sets(struct amdgpu_device
*adev
,
3077 u32 arb_freq_src
, u32 arb_freq_dest
)
3079 u32 mc_arb_dram_timing
;
3080 u32 mc_arb_dram_timing2
;
3084 switch (arb_freq_src
) {
3085 case MC_CG_ARB_FREQ_F0
:
3086 mc_arb_dram_timing
= RREG32(MC_ARB_DRAM_TIMING
);
3087 mc_arb_dram_timing2
= RREG32(MC_ARB_DRAM_TIMING2
);
3088 burst_time
= (RREG32(MC_ARB_BURST_TIME
) & STATE0_MASK
) >> STATE0_SHIFT
;
3090 case MC_CG_ARB_FREQ_F1
:
3091 mc_arb_dram_timing
= RREG32(MC_ARB_DRAM_TIMING_1
);
3092 mc_arb_dram_timing2
= RREG32(MC_ARB_DRAM_TIMING2_1
);
3093 burst_time
= (RREG32(MC_ARB_BURST_TIME
) & STATE1_MASK
) >> STATE1_SHIFT
;
3095 case MC_CG_ARB_FREQ_F2
:
3096 mc_arb_dram_timing
= RREG32(MC_ARB_DRAM_TIMING_2
);
3097 mc_arb_dram_timing2
= RREG32(MC_ARB_DRAM_TIMING2_2
);
3098 burst_time
= (RREG32(MC_ARB_BURST_TIME
) & STATE2_MASK
) >> STATE2_SHIFT
;
3100 case MC_CG_ARB_FREQ_F3
:
3101 mc_arb_dram_timing
= RREG32(MC_ARB_DRAM_TIMING_3
);
3102 mc_arb_dram_timing2
= RREG32(MC_ARB_DRAM_TIMING2_3
);
3103 burst_time
= (RREG32(MC_ARB_BURST_TIME
) & STATE3_MASK
) >> STATE3_SHIFT
;
3109 switch (arb_freq_dest
) {
3110 case MC_CG_ARB_FREQ_F0
:
3111 WREG32(MC_ARB_DRAM_TIMING
, mc_arb_dram_timing
);
3112 WREG32(MC_ARB_DRAM_TIMING2
, mc_arb_dram_timing2
);
3113 WREG32_P(MC_ARB_BURST_TIME
, STATE0(burst_time
), ~STATE0_MASK
);
3115 case MC_CG_ARB_FREQ_F1
:
3116 WREG32(MC_ARB_DRAM_TIMING_1
, mc_arb_dram_timing
);
3117 WREG32(MC_ARB_DRAM_TIMING2_1
, mc_arb_dram_timing2
);
3118 WREG32_P(MC_ARB_BURST_TIME
, STATE1(burst_time
), ~STATE1_MASK
);
3120 case MC_CG_ARB_FREQ_F2
:
3121 WREG32(MC_ARB_DRAM_TIMING_2
, mc_arb_dram_timing
);
3122 WREG32(MC_ARB_DRAM_TIMING2_2
, mc_arb_dram_timing2
);
3123 WREG32_P(MC_ARB_BURST_TIME
, STATE2(burst_time
), ~STATE2_MASK
);
3125 case MC_CG_ARB_FREQ_F3
:
3126 WREG32(MC_ARB_DRAM_TIMING_3
, mc_arb_dram_timing
);
3127 WREG32(MC_ARB_DRAM_TIMING2_3
, mc_arb_dram_timing2
);
3128 WREG32_P(MC_ARB_BURST_TIME
, STATE3(burst_time
), ~STATE3_MASK
);
3134 mc_cg_config
= RREG32(MC_CG_CONFIG
) | 0x0000000F;
3135 WREG32(MC_CG_CONFIG
, mc_cg_config
);
3136 WREG32_P(MC_ARB_CG
, CG_ARB_REQ(arb_freq_dest
), ~CG_ARB_REQ_MASK
);
3141 static void ni_update_current_ps(struct amdgpu_device
*adev
,
3142 struct amdgpu_ps
*rps
)
3144 struct si_ps
*new_ps
= si_get_ps(rps
);
3145 struct evergreen_power_info
*eg_pi
= evergreen_get_pi(adev
);
3146 struct ni_power_info
*ni_pi
= ni_get_pi(adev
);
3148 eg_pi
->current_rps
= *rps
;
3149 ni_pi
->current_ps
= *new_ps
;
3150 eg_pi
->current_rps
.ps_priv
= &ni_pi
->current_ps
;
3151 adev
->pm
.dpm
.current_ps
= &eg_pi
->current_rps
;
3154 static void ni_update_requested_ps(struct amdgpu_device
*adev
,
3155 struct amdgpu_ps
*rps
)
3157 struct si_ps
*new_ps
= si_get_ps(rps
);
3158 struct evergreen_power_info
*eg_pi
= evergreen_get_pi(adev
);
3159 struct ni_power_info
*ni_pi
= ni_get_pi(adev
);
3161 eg_pi
->requested_rps
= *rps
;
3162 ni_pi
->requested_ps
= *new_ps
;
3163 eg_pi
->requested_rps
.ps_priv
= &ni_pi
->requested_ps
;
3164 adev
->pm
.dpm
.requested_ps
= &eg_pi
->requested_rps
;
3167 static void ni_set_uvd_clock_before_set_eng_clock(struct amdgpu_device
*adev
,
3168 struct amdgpu_ps
*new_ps
,
3169 struct amdgpu_ps
*old_ps
)
3171 struct si_ps
*new_state
= si_get_ps(new_ps
);
3172 struct si_ps
*current_state
= si_get_ps(old_ps
);
3174 if ((new_ps
->vclk
== old_ps
->vclk
) &&
3175 (new_ps
->dclk
== old_ps
->dclk
))
3178 if (new_state
->performance_levels
[new_state
->performance_level_count
- 1].sclk
>=
3179 current_state
->performance_levels
[current_state
->performance_level_count
- 1].sclk
)
3182 amdgpu_asic_set_uvd_clocks(adev
, new_ps
->vclk
, new_ps
->dclk
);
3185 static void ni_set_uvd_clock_after_set_eng_clock(struct amdgpu_device
*adev
,
3186 struct amdgpu_ps
*new_ps
,
3187 struct amdgpu_ps
*old_ps
)
3189 struct si_ps
*new_state
= si_get_ps(new_ps
);
3190 struct si_ps
*current_state
= si_get_ps(old_ps
);
3192 if ((new_ps
->vclk
== old_ps
->vclk
) &&
3193 (new_ps
->dclk
== old_ps
->dclk
))
3196 if (new_state
->performance_levels
[new_state
->performance_level_count
- 1].sclk
<
3197 current_state
->performance_levels
[current_state
->performance_level_count
- 1].sclk
)
3200 amdgpu_asic_set_uvd_clocks(adev
, new_ps
->vclk
, new_ps
->dclk
);
3203 static u16
btc_find_voltage(struct atom_voltage_table
*table
, u16 voltage
)
3207 for (i
= 0; i
< table
->count
; i
++)
3208 if (voltage
<= table
->entries
[i
].value
)
3209 return table
->entries
[i
].value
;
3211 return table
->entries
[table
->count
- 1].value
;
3214 static u32
btc_find_valid_clock(struct amdgpu_clock_array
*clocks
,
3215 u32 max_clock
, u32 requested_clock
)
3219 if ((clocks
== NULL
) || (clocks
->count
== 0))
3220 return (requested_clock
< max_clock
) ? requested_clock
: max_clock
;
3222 for (i
= 0; i
< clocks
->count
; i
++) {
3223 if (clocks
->values
[i
] >= requested_clock
)
3224 return (clocks
->values
[i
] < max_clock
) ? clocks
->values
[i
] : max_clock
;
3227 return (clocks
->values
[clocks
->count
- 1] < max_clock
) ?
3228 clocks
->values
[clocks
->count
- 1] : max_clock
;
3231 static u32
btc_get_valid_mclk(struct amdgpu_device
*adev
,
3232 u32 max_mclk
, u32 requested_mclk
)
3234 return btc_find_valid_clock(&adev
->pm
.dpm
.dyn_state
.valid_mclk_values
,
3235 max_mclk
, requested_mclk
);
3238 static u32
btc_get_valid_sclk(struct amdgpu_device
*adev
,
3239 u32 max_sclk
, u32 requested_sclk
)
3241 return btc_find_valid_clock(&adev
->pm
.dpm
.dyn_state
.valid_sclk_values
,
3242 max_sclk
, requested_sclk
);
3245 static void btc_get_max_clock_from_voltage_dependency_table(struct amdgpu_clock_voltage_dependency_table
*table
,
3250 if ((table
== NULL
) || (table
->count
== 0)) {
3255 for (i
= 0; i
< table
->count
; i
++) {
3256 if (clock
< table
->entries
[i
].clk
)
3257 clock
= table
->entries
[i
].clk
;
3262 static void btc_apply_voltage_dependency_rules(struct amdgpu_clock_voltage_dependency_table
*table
,
3263 u32 clock
, u16 max_voltage
, u16
*voltage
)
3267 if ((table
== NULL
) || (table
->count
== 0))
3270 for (i
= 0; i
< table
->count
; i
++) {
3271 if (clock
<= table
->entries
[i
].clk
) {
3272 if (*voltage
< table
->entries
[i
].v
)
3273 *voltage
= (u16
)((table
->entries
[i
].v
< max_voltage
) ?
3274 table
->entries
[i
].v
: max_voltage
);
3279 *voltage
= (*voltage
> max_voltage
) ? *voltage
: max_voltage
;
3282 static void btc_adjust_clock_combinations(struct amdgpu_device
*adev
,
3283 const struct amdgpu_clock_and_voltage_limits
*max_limits
,
3284 struct rv7xx_pl
*pl
)
3287 if ((pl
->mclk
== 0) || (pl
->sclk
== 0))
3290 if (pl
->mclk
== pl
->sclk
)
3293 if (pl
->mclk
> pl
->sclk
) {
3294 if (((pl
->mclk
+ (pl
->sclk
- 1)) / pl
->sclk
) > adev
->pm
.dpm
.dyn_state
.mclk_sclk_ratio
)
3295 pl
->sclk
= btc_get_valid_sclk(adev
,
3298 (adev
->pm
.dpm
.dyn_state
.mclk_sclk_ratio
- 1)) /
3299 adev
->pm
.dpm
.dyn_state
.mclk_sclk_ratio
);
3301 if ((pl
->sclk
- pl
->mclk
) > adev
->pm
.dpm
.dyn_state
.sclk_mclk_delta
)
3302 pl
->mclk
= btc_get_valid_mclk(adev
,
3305 adev
->pm
.dpm
.dyn_state
.sclk_mclk_delta
);
3309 static void btc_apply_voltage_delta_rules(struct amdgpu_device
*adev
,
3310 u16 max_vddc
, u16 max_vddci
,
3311 u16
*vddc
, u16
*vddci
)
3313 struct evergreen_power_info
*eg_pi
= evergreen_get_pi(adev
);
3316 if ((0 == *vddc
) || (0 == *vddci
))
3319 if (*vddc
> *vddci
) {
3320 if ((*vddc
- *vddci
) > adev
->pm
.dpm
.dyn_state
.vddc_vddci_delta
) {
3321 new_voltage
= btc_find_voltage(&eg_pi
->vddci_voltage_table
,
3322 (*vddc
- adev
->pm
.dpm
.dyn_state
.vddc_vddci_delta
));
3323 *vddci
= (new_voltage
< max_vddci
) ? new_voltage
: max_vddci
;
3326 if ((*vddci
- *vddc
) > adev
->pm
.dpm
.dyn_state
.vddc_vddci_delta
) {
3327 new_voltage
= btc_find_voltage(&eg_pi
->vddc_voltage_table
,
3328 (*vddci
- adev
->pm
.dpm
.dyn_state
.vddc_vddci_delta
));
3329 *vddc
= (new_voltage
< max_vddc
) ? new_voltage
: max_vddc
;
3334 static enum amdgpu_pcie_gen
r600_get_pcie_gen_support(struct amdgpu_device
*adev
,
3336 enum amdgpu_pcie_gen asic_gen
,
3337 enum amdgpu_pcie_gen default_gen
)
3340 case AMDGPU_PCIE_GEN1
:
3341 return AMDGPU_PCIE_GEN1
;
3342 case AMDGPU_PCIE_GEN2
:
3343 return AMDGPU_PCIE_GEN2
;
3344 case AMDGPU_PCIE_GEN3
:
3345 return AMDGPU_PCIE_GEN3
;
3347 if ((sys_mask
& DRM_PCIE_SPEED_80
) && (default_gen
== AMDGPU_PCIE_GEN3
))
3348 return AMDGPU_PCIE_GEN3
;
3349 else if ((sys_mask
& DRM_PCIE_SPEED_50
) && (default_gen
== AMDGPU_PCIE_GEN2
))
3350 return AMDGPU_PCIE_GEN2
;
3352 return AMDGPU_PCIE_GEN1
;
3354 return AMDGPU_PCIE_GEN1
;
3357 static void r600_calculate_u_and_p(u32 i
, u32 r_c
, u32 p_b
,
3364 i_c
= (i
* r_c
) / 100;
3373 *p
= i_c
/ (1 << (2 * (*u
)));
3376 static int r600_calculate_at(u32 t
, u32 h
, u32 fh
, u32 fl
, u32
*tl
, u32
*th
)
3381 if ((fl
== 0) || (fh
== 0) || (fl
> fh
))
3384 k
= (100 * fh
) / fl
;
3385 t1
= (t
* (k
- 100));
3386 a
= (1000 * (100 * h
+ t1
)) / (10000 + (t1
/ 100));
3388 ah
= ((a
* t
) + 5000) / 10000;
3397 static bool r600_is_uvd_state(u32
class, u32 class2
)
3399 if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE
)
3401 if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE
)
3403 if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE
)
3405 if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE
)
3407 if (class2
& ATOM_PPLIB_CLASSIFICATION2_MVC
)
3412 static u8
rv770_get_memory_module_index(struct amdgpu_device
*adev
)
3414 return (u8
) ((RREG32(BIOS_SCRATCH_4
) >> 16) & 0xff);
3417 static void rv770_get_max_vddc(struct amdgpu_device
*adev
)
3419 struct rv7xx_power_info
*pi
= rv770_get_pi(adev
);
3422 if (amdgpu_atombios_get_max_vddc(adev
, 0, 0, &vddc
))
3425 pi
->max_vddc
= vddc
;
3428 static void rv770_get_engine_memory_ss(struct amdgpu_device
*adev
)
3430 struct rv7xx_power_info
*pi
= rv770_get_pi(adev
);
3431 struct amdgpu_atom_ss ss
;
3433 pi
->sclk_ss
= amdgpu_atombios_get_asic_ss_info(adev
, &ss
,
3434 ASIC_INTERNAL_ENGINE_SS
, 0);
3435 pi
->mclk_ss
= amdgpu_atombios_get_asic_ss_info(adev
, &ss
,
3436 ASIC_INTERNAL_MEMORY_SS
, 0);
3438 if (pi
->sclk_ss
|| pi
->mclk_ss
)
3439 pi
->dynamic_ss
= true;
3441 pi
->dynamic_ss
= false;
3445 static void si_apply_state_adjust_rules(struct amdgpu_device
*adev
,
3446 struct amdgpu_ps
*rps
)
3448 struct si_ps
*ps
= si_get_ps(rps
);
3449 struct amdgpu_clock_and_voltage_limits
*max_limits
;
3450 bool disable_mclk_switching
= false;
3451 bool disable_sclk_switching
= false;
3453 u16 vddc
, vddci
, min_vce_voltage
= 0;
3454 u32 max_sclk_vddc
, max_mclk_vddci
, max_mclk_vddc
;
3455 u32 max_sclk
= 0, max_mclk
= 0;
3458 if (adev
->asic_type
== CHIP_HAINAN
) {
3459 if ((adev
->pdev
->revision
== 0x81) ||
3460 (adev
->pdev
->revision
== 0x83) ||
3461 (adev
->pdev
->revision
== 0xC3) ||
3462 (adev
->pdev
->device
== 0x6664) ||
3463 (adev
->pdev
->device
== 0x6665) ||
3464 (adev
->pdev
->device
== 0x6667)) {
3469 if (rps
->vce_active
) {
3470 rps
->evclk
= adev
->pm
.dpm
.vce_states
[adev
->pm
.dpm
.vce_level
].evclk
;
3471 rps
->ecclk
= adev
->pm
.dpm
.vce_states
[adev
->pm
.dpm
.vce_level
].ecclk
;
3472 si_get_vce_clock_voltage(adev
, rps
->evclk
, rps
->ecclk
,
3479 if ((adev
->pm
.dpm
.new_active_crtc_count
> 1) ||
3480 si_dpm_vblank_too_short(adev
))
3481 disable_mclk_switching
= true;
3483 if (rps
->vclk
|| rps
->dclk
) {
3484 disable_mclk_switching
= true;
3485 disable_sclk_switching
= true;
3488 if (adev
->pm
.dpm
.ac_power
)
3489 max_limits
= &adev
->pm
.dpm
.dyn_state
.max_clock_voltage_on_ac
;
3491 max_limits
= &adev
->pm
.dpm
.dyn_state
.max_clock_voltage_on_dc
;
3493 for (i
= ps
->performance_level_count
- 2; i
>= 0; i
--) {
3494 if (ps
->performance_levels
[i
].vddc
> ps
->performance_levels
[i
+1].vddc
)
3495 ps
->performance_levels
[i
].vddc
= ps
->performance_levels
[i
+1].vddc
;
3497 if (adev
->pm
.dpm
.ac_power
== false) {
3498 for (i
= 0; i
< ps
->performance_level_count
; i
++) {
3499 if (ps
->performance_levels
[i
].mclk
> max_limits
->mclk
)
3500 ps
->performance_levels
[i
].mclk
= max_limits
->mclk
;
3501 if (ps
->performance_levels
[i
].sclk
> max_limits
->sclk
)
3502 ps
->performance_levels
[i
].sclk
= max_limits
->sclk
;
3503 if (ps
->performance_levels
[i
].vddc
> max_limits
->vddc
)
3504 ps
->performance_levels
[i
].vddc
= max_limits
->vddc
;
3505 if (ps
->performance_levels
[i
].vddci
> max_limits
->vddci
)
3506 ps
->performance_levels
[i
].vddci
= max_limits
->vddci
;
3510 /* limit clocks to max supported clocks based on voltage dependency tables */
3511 btc_get_max_clock_from_voltage_dependency_table(&adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
,
3513 btc_get_max_clock_from_voltage_dependency_table(&adev
->pm
.dpm
.dyn_state
.vddci_dependency_on_mclk
,
3515 btc_get_max_clock_from_voltage_dependency_table(&adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_mclk
,
3518 for (i
= 0; i
< ps
->performance_level_count
; i
++) {
3519 if (max_sclk_vddc
) {
3520 if (ps
->performance_levels
[i
].sclk
> max_sclk_vddc
)
3521 ps
->performance_levels
[i
].sclk
= max_sclk_vddc
;
3523 if (max_mclk_vddci
) {
3524 if (ps
->performance_levels
[i
].mclk
> max_mclk_vddci
)
3525 ps
->performance_levels
[i
].mclk
= max_mclk_vddci
;
3527 if (max_mclk_vddc
) {
3528 if (ps
->performance_levels
[i
].mclk
> max_mclk_vddc
)
3529 ps
->performance_levels
[i
].mclk
= max_mclk_vddc
;
3532 if (ps
->performance_levels
[i
].mclk
> max_mclk
)
3533 ps
->performance_levels
[i
].mclk
= max_mclk
;
3536 if (ps
->performance_levels
[i
].sclk
> max_sclk
)
3537 ps
->performance_levels
[i
].sclk
= max_sclk
;
3541 /* XXX validate the min clocks required for display */
3543 if (disable_mclk_switching
) {
3544 mclk
= ps
->performance_levels
[ps
->performance_level_count
- 1].mclk
;
3545 vddci
= ps
->performance_levels
[ps
->performance_level_count
- 1].vddci
;
3547 mclk
= ps
->performance_levels
[0].mclk
;
3548 vddci
= ps
->performance_levels
[0].vddci
;
3551 if (disable_sclk_switching
) {
3552 sclk
= ps
->performance_levels
[ps
->performance_level_count
- 1].sclk
;
3553 vddc
= ps
->performance_levels
[ps
->performance_level_count
- 1].vddc
;
3555 sclk
= ps
->performance_levels
[0].sclk
;
3556 vddc
= ps
->performance_levels
[0].vddc
;
3559 if (rps
->vce_active
) {
3560 if (sclk
< adev
->pm
.dpm
.vce_states
[adev
->pm
.dpm
.vce_level
].sclk
)
3561 sclk
= adev
->pm
.dpm
.vce_states
[adev
->pm
.dpm
.vce_level
].sclk
;
3562 if (mclk
< adev
->pm
.dpm
.vce_states
[adev
->pm
.dpm
.vce_level
].mclk
)
3563 mclk
= adev
->pm
.dpm
.vce_states
[adev
->pm
.dpm
.vce_level
].mclk
;
3566 /* adjusted low state */
3567 ps
->performance_levels
[0].sclk
= sclk
;
3568 ps
->performance_levels
[0].mclk
= mclk
;
3569 ps
->performance_levels
[0].vddc
= vddc
;
3570 ps
->performance_levels
[0].vddci
= vddci
;
3572 if (disable_sclk_switching
) {
3573 sclk
= ps
->performance_levels
[0].sclk
;
3574 for (i
= 1; i
< ps
->performance_level_count
; i
++) {
3575 if (sclk
< ps
->performance_levels
[i
].sclk
)
3576 sclk
= ps
->performance_levels
[i
].sclk
;
3578 for (i
= 0; i
< ps
->performance_level_count
; i
++) {
3579 ps
->performance_levels
[i
].sclk
= sclk
;
3580 ps
->performance_levels
[i
].vddc
= vddc
;
3583 for (i
= 1; i
< ps
->performance_level_count
; i
++) {
3584 if (ps
->performance_levels
[i
].sclk
< ps
->performance_levels
[i
- 1].sclk
)
3585 ps
->performance_levels
[i
].sclk
= ps
->performance_levels
[i
- 1].sclk
;
3586 if (ps
->performance_levels
[i
].vddc
< ps
->performance_levels
[i
- 1].vddc
)
3587 ps
->performance_levels
[i
].vddc
= ps
->performance_levels
[i
- 1].vddc
;
3591 if (disable_mclk_switching
) {
3592 mclk
= ps
->performance_levels
[0].mclk
;
3593 for (i
= 1; i
< ps
->performance_level_count
; i
++) {
3594 if (mclk
< ps
->performance_levels
[i
].mclk
)
3595 mclk
= ps
->performance_levels
[i
].mclk
;
3597 for (i
= 0; i
< ps
->performance_level_count
; i
++) {
3598 ps
->performance_levels
[i
].mclk
= mclk
;
3599 ps
->performance_levels
[i
].vddci
= vddci
;
3602 for (i
= 1; i
< ps
->performance_level_count
; i
++) {
3603 if (ps
->performance_levels
[i
].mclk
< ps
->performance_levels
[i
- 1].mclk
)
3604 ps
->performance_levels
[i
].mclk
= ps
->performance_levels
[i
- 1].mclk
;
3605 if (ps
->performance_levels
[i
].vddci
< ps
->performance_levels
[i
- 1].vddci
)
3606 ps
->performance_levels
[i
].vddci
= ps
->performance_levels
[i
- 1].vddci
;
3610 for (i
= 0; i
< ps
->performance_level_count
; i
++)
3611 btc_adjust_clock_combinations(adev
, max_limits
,
3612 &ps
->performance_levels
[i
]);
3614 for (i
= 0; i
< ps
->performance_level_count
; i
++) {
3615 if (ps
->performance_levels
[i
].vddc
< min_vce_voltage
)
3616 ps
->performance_levels
[i
].vddc
= min_vce_voltage
;
3617 btc_apply_voltage_dependency_rules(&adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
,
3618 ps
->performance_levels
[i
].sclk
,
3619 max_limits
->vddc
, &ps
->performance_levels
[i
].vddc
);
3620 btc_apply_voltage_dependency_rules(&adev
->pm
.dpm
.dyn_state
.vddci_dependency_on_mclk
,
3621 ps
->performance_levels
[i
].mclk
,
3622 max_limits
->vddci
, &ps
->performance_levels
[i
].vddci
);
3623 btc_apply_voltage_dependency_rules(&adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_mclk
,
3624 ps
->performance_levels
[i
].mclk
,
3625 max_limits
->vddc
, &ps
->performance_levels
[i
].vddc
);
3626 btc_apply_voltage_dependency_rules(&adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_dispclk
,
3627 adev
->clock
.current_dispclk
,
3628 max_limits
->vddc
, &ps
->performance_levels
[i
].vddc
);
3631 for (i
= 0; i
< ps
->performance_level_count
; i
++) {
3632 btc_apply_voltage_delta_rules(adev
,
3633 max_limits
->vddc
, max_limits
->vddci
,
3634 &ps
->performance_levels
[i
].vddc
,
3635 &ps
->performance_levels
[i
].vddci
);
3638 ps
->dc_compatible
= true;
3639 for (i
= 0; i
< ps
->performance_level_count
; i
++) {
3640 if (ps
->performance_levels
[i
].vddc
> adev
->pm
.dpm
.dyn_state
.max_clock_voltage_on_dc
.vddc
)
3641 ps
->dc_compatible
= false;
3646 static int si_read_smc_soft_register(struct amdgpu_device
*adev
,
3647 u16 reg_offset
, u32
*value
)
3649 struct si_power_info
*si_pi
= si_get_pi(adev
);
3651 return amdgpu_si_read_smc_sram_dword(adev
,
3652 si_pi
->soft_regs_start
+ reg_offset
, value
,
3657 static int si_write_smc_soft_register(struct amdgpu_device
*adev
,
3658 u16 reg_offset
, u32 value
)
3660 struct si_power_info
*si_pi
= si_get_pi(adev
);
3662 return amdgpu_si_write_smc_sram_dword(adev
,
3663 si_pi
->soft_regs_start
+ reg_offset
,
3664 value
, si_pi
->sram_end
);
3667 static bool si_is_special_1gb_platform(struct amdgpu_device
*adev
)
3670 u32 tmp
, width
, row
, column
, bank
, density
;
3671 bool is_memory_gddr5
, is_special
;
3673 tmp
= RREG32(MC_SEQ_MISC0
);
3674 is_memory_gddr5
= (MC_SEQ_MISC0_GDDR5_VALUE
== ((tmp
& MC_SEQ_MISC0_GDDR5_MASK
) >> MC_SEQ_MISC0_GDDR5_SHIFT
));
3675 is_special
= (MC_SEQ_MISC0_REV_ID_VALUE
== ((tmp
& MC_SEQ_MISC0_REV_ID_MASK
) >> MC_SEQ_MISC0_REV_ID_SHIFT
))
3676 & (MC_SEQ_MISC0_VEN_ID_VALUE
== ((tmp
& MC_SEQ_MISC0_VEN_ID_MASK
) >> MC_SEQ_MISC0_VEN_ID_SHIFT
));
3678 WREG32(MC_SEQ_IO_DEBUG_INDEX
, 0xb);
3679 width
= ((RREG32(MC_SEQ_IO_DEBUG_DATA
) >> 1) & 1) ? 16 : 32;
3681 tmp
= RREG32(MC_ARB_RAMCFG
);
3682 row
= ((tmp
& NOOFROWS_MASK
) >> NOOFROWS_SHIFT
) + 10;
3683 column
= ((tmp
& NOOFCOLS_MASK
) >> NOOFCOLS_SHIFT
) + 8;
3684 bank
= ((tmp
& NOOFBANK_MASK
) >> NOOFBANK_SHIFT
) + 2;
3686 density
= (1 << (row
+ column
- 20 + bank
)) * width
;
3688 if ((adev
->pdev
->device
== 0x6819) &&
3689 is_memory_gddr5
&& is_special
&& (density
== 0x400))
3695 static void si_get_leakage_vddc(struct amdgpu_device
*adev
)
3697 struct si_power_info
*si_pi
= si_get_pi(adev
);
3698 u16 vddc
, count
= 0;
3701 for (i
= 0; i
< SISLANDS_MAX_LEAKAGE_COUNT
; i
++) {
3702 ret
= amdgpu_atombios_get_leakage_vddc_based_on_leakage_idx(adev
, &vddc
, SISLANDS_LEAKAGE_INDEX0
+ i
);
3704 if (!ret
&& (vddc
> 0) && (vddc
!= (SISLANDS_LEAKAGE_INDEX0
+ i
))) {
3705 si_pi
->leakage_voltage
.entries
[count
].voltage
= vddc
;
3706 si_pi
->leakage_voltage
.entries
[count
].leakage_index
=
3707 SISLANDS_LEAKAGE_INDEX0
+ i
;
3711 si_pi
->leakage_voltage
.count
= count
;
3714 static int si_get_leakage_voltage_from_leakage_index(struct amdgpu_device
*adev
,
3715 u32 index
, u16
*leakage_voltage
)
3717 struct si_power_info
*si_pi
= si_get_pi(adev
);
3720 if (leakage_voltage
== NULL
)
3723 if ((index
& 0xff00) != 0xff00)
3726 if ((index
& 0xff) > SISLANDS_MAX_LEAKAGE_COUNT
+ 1)
3729 if (index
< SISLANDS_LEAKAGE_INDEX0
)
3732 for (i
= 0; i
< si_pi
->leakage_voltage
.count
; i
++) {
3733 if (si_pi
->leakage_voltage
.entries
[i
].leakage_index
== index
) {
3734 *leakage_voltage
= si_pi
->leakage_voltage
.entries
[i
].voltage
;
3741 static void si_set_dpm_event_sources(struct amdgpu_device
*adev
, u32 sources
)
3743 struct rv7xx_power_info
*pi
= rv770_get_pi(adev
);
3744 bool want_thermal_protection
;
3745 enum amdgpu_dpm_event_src dpm_event_src
;
3750 want_thermal_protection
= false;
3752 case (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL
):
3753 want_thermal_protection
= true;
3754 dpm_event_src
= AMDGPU_DPM_EVENT_SRC_DIGITAL
;
3756 case (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL
):
3757 want_thermal_protection
= true;
3758 dpm_event_src
= AMDGPU_DPM_EVENT_SRC_EXTERNAL
;
3760 case ((1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL
) |
3761 (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL
)):
3762 want_thermal_protection
= true;
3763 dpm_event_src
= AMDGPU_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL
;
3767 if (want_thermal_protection
) {
3768 WREG32_P(CG_THERMAL_CTRL
, DPM_EVENT_SRC(dpm_event_src
), ~DPM_EVENT_SRC_MASK
);
3769 if (pi
->thermal_protection
)
3770 WREG32_P(GENERAL_PWRMGT
, 0, ~THERMAL_PROTECTION_DIS
);
3772 WREG32_P(GENERAL_PWRMGT
, THERMAL_PROTECTION_DIS
, ~THERMAL_PROTECTION_DIS
);
3776 static void si_enable_auto_throttle_source(struct amdgpu_device
*adev
,
3777 enum amdgpu_dpm_auto_throttle_src source
,
3780 struct rv7xx_power_info
*pi
= rv770_get_pi(adev
);
3783 if (!(pi
->active_auto_throttle_sources
& (1 << source
))) {
3784 pi
->active_auto_throttle_sources
|= 1 << source
;
3785 si_set_dpm_event_sources(adev
, pi
->active_auto_throttle_sources
);
3788 if (pi
->active_auto_throttle_sources
& (1 << source
)) {
3789 pi
->active_auto_throttle_sources
&= ~(1 << source
);
3790 si_set_dpm_event_sources(adev
, pi
->active_auto_throttle_sources
);
3795 static void si_start_dpm(struct amdgpu_device
*adev
)
3797 WREG32_P(GENERAL_PWRMGT
, GLOBAL_PWRMGT_EN
, ~GLOBAL_PWRMGT_EN
);
3800 static void si_stop_dpm(struct amdgpu_device
*adev
)
3802 WREG32_P(GENERAL_PWRMGT
, 0, ~GLOBAL_PWRMGT_EN
);
3805 static void si_enable_sclk_control(struct amdgpu_device
*adev
, bool enable
)
3808 WREG32_P(SCLK_PWRMGT_CNTL
, 0, ~SCLK_PWRMGT_OFF
);
3810 WREG32_P(SCLK_PWRMGT_CNTL
, SCLK_PWRMGT_OFF
, ~SCLK_PWRMGT_OFF
);
3815 static int si_notify_hardware_of_thermal_state(struct amdgpu_device
*adev
,
3820 if (thermal_level
== 0) {
3821 ret
= amdgpu_si_send_msg_to_smc(adev
, PPSMC_MSG_EnableThermalInterrupt
);
3822 if (ret
== PPSMC_Result_OK
)
3830 static void si_notify_hardware_vpu_recovery_event(struct amdgpu_device
*adev
)
3832 si_write_smc_soft_register(adev
, SI_SMC_SOFT_REGISTER_tdr_is_about_to_happen
, true);
3837 static int si_notify_hw_of_powersource(struct amdgpu_device
*adev
, bool ac_power
)
3840 return (amdgpu_si_send_msg_to_smc(adev
, PPSMC_MSG_RunningOnAC
) == PPSMC_Result_OK
) ?
3847 static PPSMC_Result
si_send_msg_to_smc_with_parameter(struct amdgpu_device
*adev
,
3848 PPSMC_Msg msg
, u32 parameter
)
3850 WREG32(SMC_SCRATCH0
, parameter
);
3851 return amdgpu_si_send_msg_to_smc(adev
, msg
);
3854 static int si_restrict_performance_levels_before_switch(struct amdgpu_device
*adev
)
3856 if (amdgpu_si_send_msg_to_smc(adev
, PPSMC_MSG_NoForcedLevel
) != PPSMC_Result_OK
)
3859 return (si_send_msg_to_smc_with_parameter(adev
, PPSMC_MSG_SetEnabledLevels
, 1) == PPSMC_Result_OK
) ?
3863 static int si_dpm_force_performance_level(struct amdgpu_device
*adev
,
3864 enum amd_dpm_forced_level level
)
3866 struct amdgpu_ps
*rps
= adev
->pm
.dpm
.current_ps
;
3867 struct si_ps
*ps
= si_get_ps(rps
);
3868 u32 levels
= ps
->performance_level_count
;
3870 if (level
== AMD_DPM_FORCED_LEVEL_HIGH
) {
3871 if (si_send_msg_to_smc_with_parameter(adev
, PPSMC_MSG_SetEnabledLevels
, levels
) != PPSMC_Result_OK
)
3874 if (si_send_msg_to_smc_with_parameter(adev
, PPSMC_MSG_SetForcedLevels
, 1) != PPSMC_Result_OK
)
3876 } else if (level
== AMD_DPM_FORCED_LEVEL_LOW
) {
3877 if (si_send_msg_to_smc_with_parameter(adev
, PPSMC_MSG_SetForcedLevels
, 0) != PPSMC_Result_OK
)
3880 if (si_send_msg_to_smc_with_parameter(adev
, PPSMC_MSG_SetEnabledLevels
, 1) != PPSMC_Result_OK
)
3882 } else if (level
== AMD_DPM_FORCED_LEVEL_AUTO
) {
3883 if (si_send_msg_to_smc_with_parameter(adev
, PPSMC_MSG_SetForcedLevels
, 0) != PPSMC_Result_OK
)
3886 if (si_send_msg_to_smc_with_parameter(adev
, PPSMC_MSG_SetEnabledLevels
, levels
) != PPSMC_Result_OK
)
3890 adev
->pm
.dpm
.forced_level
= level
;
3896 static int si_set_boot_state(struct amdgpu_device
*adev
)
3898 return (amdgpu_si_send_msg_to_smc(adev
, PPSMC_MSG_SwitchToInitialState
) == PPSMC_Result_OK
) ?
3903 static int si_set_sw_state(struct amdgpu_device
*adev
)
3905 return (amdgpu_si_send_msg_to_smc(adev
, PPSMC_MSG_SwitchToSwState
) == PPSMC_Result_OK
) ?
3909 static int si_halt_smc(struct amdgpu_device
*adev
)
3911 if (amdgpu_si_send_msg_to_smc(adev
, PPSMC_MSG_Halt
) != PPSMC_Result_OK
)
3914 return (amdgpu_si_wait_for_smc_inactive(adev
) == PPSMC_Result_OK
) ?
3918 static int si_resume_smc(struct amdgpu_device
*adev
)
3920 if (amdgpu_si_send_msg_to_smc(adev
, PPSMC_FlushDataCache
) != PPSMC_Result_OK
)
3923 return (amdgpu_si_send_msg_to_smc(adev
, PPSMC_MSG_Resume
) == PPSMC_Result_OK
) ?
3927 static void si_dpm_start_smc(struct amdgpu_device
*adev
)
3929 amdgpu_si_program_jump_on_start(adev
);
3930 amdgpu_si_start_smc(adev
);
3931 amdgpu_si_smc_clock(adev
, true);
3934 static void si_dpm_stop_smc(struct amdgpu_device
*adev
)
3936 amdgpu_si_reset_smc(adev
);
3937 amdgpu_si_smc_clock(adev
, false);
3940 static int si_process_firmware_header(struct amdgpu_device
*adev
)
3942 struct si_power_info
*si_pi
= si_get_pi(adev
);
3946 ret
= amdgpu_si_read_smc_sram_dword(adev
,
3947 SISLANDS_SMC_FIRMWARE_HEADER_LOCATION
+
3948 SISLANDS_SMC_FIRMWARE_HEADER_stateTable
,
3949 &tmp
, si_pi
->sram_end
);
3953 si_pi
->state_table_start
= tmp
;
3955 ret
= amdgpu_si_read_smc_sram_dword(adev
,
3956 SISLANDS_SMC_FIRMWARE_HEADER_LOCATION
+
3957 SISLANDS_SMC_FIRMWARE_HEADER_softRegisters
,
3958 &tmp
, si_pi
->sram_end
);
3962 si_pi
->soft_regs_start
= tmp
;
3964 ret
= amdgpu_si_read_smc_sram_dword(adev
,
3965 SISLANDS_SMC_FIRMWARE_HEADER_LOCATION
+
3966 SISLANDS_SMC_FIRMWARE_HEADER_mcRegisterTable
,
3967 &tmp
, si_pi
->sram_end
);
3971 si_pi
->mc_reg_table_start
= tmp
;
3973 ret
= amdgpu_si_read_smc_sram_dword(adev
,
3974 SISLANDS_SMC_FIRMWARE_HEADER_LOCATION
+
3975 SISLANDS_SMC_FIRMWARE_HEADER_fanTable
,
3976 &tmp
, si_pi
->sram_end
);
3980 si_pi
->fan_table_start
= tmp
;
3982 ret
= amdgpu_si_read_smc_sram_dword(adev
,
3983 SISLANDS_SMC_FIRMWARE_HEADER_LOCATION
+
3984 SISLANDS_SMC_FIRMWARE_HEADER_mcArbDramAutoRefreshTable
,
3985 &tmp
, si_pi
->sram_end
);
3989 si_pi
->arb_table_start
= tmp
;
3991 ret
= amdgpu_si_read_smc_sram_dword(adev
,
3992 SISLANDS_SMC_FIRMWARE_HEADER_LOCATION
+
3993 SISLANDS_SMC_FIRMWARE_HEADER_CacConfigTable
,
3994 &tmp
, si_pi
->sram_end
);
3998 si_pi
->cac_table_start
= tmp
;
4000 ret
= amdgpu_si_read_smc_sram_dword(adev
,
4001 SISLANDS_SMC_FIRMWARE_HEADER_LOCATION
+
4002 SISLANDS_SMC_FIRMWARE_HEADER_DteConfiguration
,
4003 &tmp
, si_pi
->sram_end
);
4007 si_pi
->dte_table_start
= tmp
;
4009 ret
= amdgpu_si_read_smc_sram_dword(adev
,
4010 SISLANDS_SMC_FIRMWARE_HEADER_LOCATION
+
4011 SISLANDS_SMC_FIRMWARE_HEADER_spllTable
,
4012 &tmp
, si_pi
->sram_end
);
4016 si_pi
->spll_table_start
= tmp
;
4018 ret
= amdgpu_si_read_smc_sram_dword(adev
,
4019 SISLANDS_SMC_FIRMWARE_HEADER_LOCATION
+
4020 SISLANDS_SMC_FIRMWARE_HEADER_PAPMParameters
,
4021 &tmp
, si_pi
->sram_end
);
4025 si_pi
->papm_cfg_table_start
= tmp
;
4030 static void si_read_clock_registers(struct amdgpu_device
*adev
)
4032 struct si_power_info
*si_pi
= si_get_pi(adev
);
4034 si_pi
->clock_registers
.cg_spll_func_cntl
= RREG32(CG_SPLL_FUNC_CNTL
);
4035 si_pi
->clock_registers
.cg_spll_func_cntl_2
= RREG32(CG_SPLL_FUNC_CNTL_2
);
4036 si_pi
->clock_registers
.cg_spll_func_cntl_3
= RREG32(CG_SPLL_FUNC_CNTL_3
);
4037 si_pi
->clock_registers
.cg_spll_func_cntl_4
= RREG32(CG_SPLL_FUNC_CNTL_4
);
4038 si_pi
->clock_registers
.cg_spll_spread_spectrum
= RREG32(CG_SPLL_SPREAD_SPECTRUM
);
4039 si_pi
->clock_registers
.cg_spll_spread_spectrum_2
= RREG32(CG_SPLL_SPREAD_SPECTRUM_2
);
4040 si_pi
->clock_registers
.dll_cntl
= RREG32(DLL_CNTL
);
4041 si_pi
->clock_registers
.mclk_pwrmgt_cntl
= RREG32(MCLK_PWRMGT_CNTL
);
4042 si_pi
->clock_registers
.mpll_ad_func_cntl
= RREG32(MPLL_AD_FUNC_CNTL
);
4043 si_pi
->clock_registers
.mpll_dq_func_cntl
= RREG32(MPLL_DQ_FUNC_CNTL
);
4044 si_pi
->clock_registers
.mpll_func_cntl
= RREG32(MPLL_FUNC_CNTL
);
4045 si_pi
->clock_registers
.mpll_func_cntl_1
= RREG32(MPLL_FUNC_CNTL_1
);
4046 si_pi
->clock_registers
.mpll_func_cntl_2
= RREG32(MPLL_FUNC_CNTL_2
);
4047 si_pi
->clock_registers
.mpll_ss1
= RREG32(MPLL_SS1
);
4048 si_pi
->clock_registers
.mpll_ss2
= RREG32(MPLL_SS2
);
4051 static void si_enable_thermal_protection(struct amdgpu_device
*adev
,
4055 WREG32_P(GENERAL_PWRMGT
, 0, ~THERMAL_PROTECTION_DIS
);
4057 WREG32_P(GENERAL_PWRMGT
, THERMAL_PROTECTION_DIS
, ~THERMAL_PROTECTION_DIS
);
4060 static void si_enable_acpi_power_management(struct amdgpu_device
*adev
)
4062 WREG32_P(GENERAL_PWRMGT
, STATIC_PM_EN
, ~STATIC_PM_EN
);
4066 static int si_enter_ulp_state(struct amdgpu_device
*adev
)
4068 WREG32(SMC_MESSAGE_0
, PPSMC_MSG_SwitchToMinimumPower
);
4075 static int si_exit_ulp_state(struct amdgpu_device
*adev
)
4079 WREG32(SMC_MESSAGE_0
, PPSMC_MSG_ResumeFromMinimumPower
);
4083 for (i
= 0; i
< adev
->usec_timeout
; i
++) {
4084 if (RREG32(SMC_RESP_0
) == 1)
4093 static int si_notify_smc_display_change(struct amdgpu_device
*adev
,
4096 PPSMC_Msg msg
= has_display
?
4097 PPSMC_MSG_HasDisplay
: PPSMC_MSG_NoDisplay
;
4099 return (amdgpu_si_send_msg_to_smc(adev
, msg
) == PPSMC_Result_OK
) ?
4103 static void si_program_response_times(struct amdgpu_device
*adev
)
4105 u32 voltage_response_time
, backbias_response_time
, acpi_delay_time
, vbi_time_out
;
4106 u32 vddc_dly
, acpi_dly
, vbi_dly
;
4107 u32 reference_clock
;
4109 si_write_smc_soft_register(adev
, SI_SMC_SOFT_REGISTER_mvdd_chg_time
, 1);
4111 voltage_response_time
= (u32
)adev
->pm
.dpm
.voltage_response_time
;
4112 backbias_response_time
= (u32
)adev
->pm
.dpm
.backbias_response_time
;
4114 if (voltage_response_time
== 0)
4115 voltage_response_time
= 1000;
4117 acpi_delay_time
= 15000;
4118 vbi_time_out
= 100000;
4120 reference_clock
= amdgpu_asic_get_xclk(adev
);
4122 vddc_dly
= (voltage_response_time
* reference_clock
) / 100;
4123 acpi_dly
= (acpi_delay_time
* reference_clock
) / 100;
4124 vbi_dly
= (vbi_time_out
* reference_clock
) / 100;
4126 si_write_smc_soft_register(adev
, SI_SMC_SOFT_REGISTER_delay_vreg
, vddc_dly
);
4127 si_write_smc_soft_register(adev
, SI_SMC_SOFT_REGISTER_delay_acpi
, acpi_dly
);
4128 si_write_smc_soft_register(adev
, SI_SMC_SOFT_REGISTER_mclk_chg_timeout
, vbi_dly
);
4129 si_write_smc_soft_register(adev
, SI_SMC_SOFT_REGISTER_mc_block_delay
, 0xAA);
4132 static void si_program_ds_registers(struct amdgpu_device
*adev
)
4134 struct evergreen_power_info
*eg_pi
= evergreen_get_pi(adev
);
4137 /* DEEP_SLEEP_CLK_SEL field should be 0x10 on tahiti A0 */
4138 if (adev
->asic_type
== CHIP_TAHITI
&& adev
->rev_id
== 0x0)
4143 if (eg_pi
->sclk_deep_sleep
) {
4144 WREG32_P(MISC_CLK_CNTL
, DEEP_SLEEP_CLK_SEL(tmp
), ~DEEP_SLEEP_CLK_SEL_MASK
);
4145 WREG32_P(CG_SPLL_AUTOSCALE_CNTL
, AUTOSCALE_ON_SS_CLEAR
,
4146 ~AUTOSCALE_ON_SS_CLEAR
);
4150 static void si_program_display_gap(struct amdgpu_device
*adev
)
4155 tmp
= RREG32(CG_DISPLAY_GAP_CNTL
) & ~(DISP1_GAP_MASK
| DISP2_GAP_MASK
);
4156 if (adev
->pm
.dpm
.new_active_crtc_count
> 0)
4157 tmp
|= DISP1_GAP(R600_PM_DISPLAY_GAP_VBLANK_OR_WM
);
4159 tmp
|= DISP1_GAP(R600_PM_DISPLAY_GAP_IGNORE
);
4161 if (adev
->pm
.dpm
.new_active_crtc_count
> 1)
4162 tmp
|= DISP2_GAP(R600_PM_DISPLAY_GAP_VBLANK_OR_WM
);
4164 tmp
|= DISP2_GAP(R600_PM_DISPLAY_GAP_IGNORE
);
4166 WREG32(CG_DISPLAY_GAP_CNTL
, tmp
);
4168 tmp
= RREG32(DCCG_DISP_SLOW_SELECT_REG
);
4169 pipe
= (tmp
& DCCG_DISP1_SLOW_SELECT_MASK
) >> DCCG_DISP1_SLOW_SELECT_SHIFT
;
4171 if ((adev
->pm
.dpm
.new_active_crtc_count
> 0) &&
4172 (!(adev
->pm
.dpm
.new_active_crtcs
& (1 << pipe
)))) {
4173 /* find the first active crtc */
4174 for (i
= 0; i
< adev
->mode_info
.num_crtc
; i
++) {
4175 if (adev
->pm
.dpm
.new_active_crtcs
& (1 << i
))
4178 if (i
== adev
->mode_info
.num_crtc
)
4183 tmp
&= ~DCCG_DISP1_SLOW_SELECT_MASK
;
4184 tmp
|= DCCG_DISP1_SLOW_SELECT(pipe
);
4185 WREG32(DCCG_DISP_SLOW_SELECT_REG
, tmp
);
4188 /* Setting this to false forces the performance state to low if the crtcs are disabled.
4189 * This can be a problem on PowerXpress systems or if you want to use the card
4190 * for offscreen rendering or compute if there are no crtcs enabled.
4192 si_notify_smc_display_change(adev
, adev
->pm
.dpm
.new_active_crtc_count
> 0);
4195 static void si_enable_spread_spectrum(struct amdgpu_device
*adev
, bool enable
)
4197 struct rv7xx_power_info
*pi
= rv770_get_pi(adev
);
4201 WREG32_P(GENERAL_PWRMGT
, DYN_SPREAD_SPECTRUM_EN
, ~DYN_SPREAD_SPECTRUM_EN
);
4203 WREG32_P(CG_SPLL_SPREAD_SPECTRUM
, 0, ~SSEN
);
4204 WREG32_P(GENERAL_PWRMGT
, 0, ~DYN_SPREAD_SPECTRUM_EN
);
4208 static void si_setup_bsp(struct amdgpu_device
*adev
)
4210 struct rv7xx_power_info
*pi
= rv770_get_pi(adev
);
4211 u32 xclk
= amdgpu_asic_get_xclk(adev
);
4213 r600_calculate_u_and_p(pi
->asi
,
4219 r600_calculate_u_and_p(pi
->pasi
,
4226 pi
->dsp
= BSP(pi
->bsp
) | BSU(pi
->bsu
);
4227 pi
->psp
= BSP(pi
->pbsp
) | BSU(pi
->pbsu
);
4229 WREG32(CG_BSP
, pi
->dsp
);
4232 static void si_program_git(struct amdgpu_device
*adev
)
4234 WREG32_P(CG_GIT
, CG_GICST(R600_GICST_DFLT
), ~CG_GICST_MASK
);
4237 static void si_program_tp(struct amdgpu_device
*adev
)
4240 enum r600_td td
= R600_TD_DFLT
;
4242 for (i
= 0; i
< R600_PM_NUMBER_OF_TC
; i
++)
4243 WREG32(CG_FFCT_0
+ i
, (UTC_0(r600_utc
[i
]) | DTC_0(r600_dtc
[i
])));
4245 if (td
== R600_TD_AUTO
)
4246 WREG32_P(SCLK_PWRMGT_CNTL
, 0, ~FIR_FORCE_TREND_SEL
);
4248 WREG32_P(SCLK_PWRMGT_CNTL
, FIR_FORCE_TREND_SEL
, ~FIR_FORCE_TREND_SEL
);
4250 if (td
== R600_TD_UP
)
4251 WREG32_P(SCLK_PWRMGT_CNTL
, 0, ~FIR_TREND_MODE
);
4253 if (td
== R600_TD_DOWN
)
4254 WREG32_P(SCLK_PWRMGT_CNTL
, FIR_TREND_MODE
, ~FIR_TREND_MODE
);
4257 static void si_program_tpp(struct amdgpu_device
*adev
)
4259 WREG32(CG_TPC
, R600_TPC_DFLT
);
4262 static void si_program_sstp(struct amdgpu_device
*adev
)
4264 WREG32(CG_SSP
, (SSTU(R600_SSTU_DFLT
) | SST(R600_SST_DFLT
)));
4267 static void si_enable_display_gap(struct amdgpu_device
*adev
)
4269 u32 tmp
= RREG32(CG_DISPLAY_GAP_CNTL
);
4271 tmp
&= ~(DISP1_GAP_MASK
| DISP2_GAP_MASK
);
4272 tmp
|= (DISP1_GAP(R600_PM_DISPLAY_GAP_IGNORE
) |
4273 DISP2_GAP(R600_PM_DISPLAY_GAP_IGNORE
));
4275 tmp
&= ~(DISP1_GAP_MCHG_MASK
| DISP2_GAP_MCHG_MASK
);
4276 tmp
|= (DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK
) |
4277 DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE
));
4278 WREG32(CG_DISPLAY_GAP_CNTL
, tmp
);
4281 static void si_program_vc(struct amdgpu_device
*adev
)
4283 struct rv7xx_power_info
*pi
= rv770_get_pi(adev
);
4285 WREG32(CG_FTV
, pi
->vrc
);
4288 static void si_clear_vc(struct amdgpu_device
*adev
)
4293 static u8
si_get_ddr3_mclk_frequency_ratio(u32 memory_clock
)
4297 if (memory_clock
< 10000)
4299 else if (memory_clock
>= 80000)
4300 mc_para_index
= 0x0f;
4302 mc_para_index
= (u8
)((memory_clock
- 10000) / 5000 + 1);
4303 return mc_para_index
;
4306 static u8
si_get_mclk_frequency_ratio(u32 memory_clock
, bool strobe_mode
)
4311 if (memory_clock
< 12500)
4312 mc_para_index
= 0x00;
4313 else if (memory_clock
> 47500)
4314 mc_para_index
= 0x0f;
4316 mc_para_index
= (u8
)((memory_clock
- 10000) / 2500);
4318 if (memory_clock
< 65000)
4319 mc_para_index
= 0x00;
4320 else if (memory_clock
> 135000)
4321 mc_para_index
= 0x0f;
4323 mc_para_index
= (u8
)((memory_clock
- 60000) / 5000);
4325 return mc_para_index
;
4328 static u8
si_get_strobe_mode_settings(struct amdgpu_device
*adev
, u32 mclk
)
4330 struct rv7xx_power_info
*pi
= rv770_get_pi(adev
);
4331 bool strobe_mode
= false;
4334 if (mclk
<= pi
->mclk_strobe_mode_threshold
)
4337 if (adev
->mc
.vram_type
== AMDGPU_VRAM_TYPE_GDDR5
)
4338 result
= si_get_mclk_frequency_ratio(mclk
, strobe_mode
);
4340 result
= si_get_ddr3_mclk_frequency_ratio(mclk
);
4343 result
|= SISLANDS_SMC_STROBE_ENABLE
;
4348 static int si_upload_firmware(struct amdgpu_device
*adev
)
4350 struct si_power_info
*si_pi
= si_get_pi(adev
);
4352 amdgpu_si_reset_smc(adev
);
4353 amdgpu_si_smc_clock(adev
, false);
4355 return amdgpu_si_load_smc_ucode(adev
, si_pi
->sram_end
);
4358 static bool si_validate_phase_shedding_tables(struct amdgpu_device
*adev
,
4359 const struct atom_voltage_table
*table
,
4360 const struct amdgpu_phase_shedding_limits_table
*limits
)
4362 u32 data
, num_bits
, num_levels
;
4364 if ((table
== NULL
) || (limits
== NULL
))
4367 data
= table
->mask_low
;
4369 num_bits
= hweight32(data
);
4374 num_levels
= (1 << num_bits
);
4376 if (table
->count
!= num_levels
)
4379 if (limits
->count
!= (num_levels
- 1))
4385 static void si_trim_voltage_table_to_fit_state_table(struct amdgpu_device
*adev
,
4386 u32 max_voltage_steps
,
4387 struct atom_voltage_table
*voltage_table
)
4389 unsigned int i
, diff
;
4391 if (voltage_table
->count
<= max_voltage_steps
)
4394 diff
= voltage_table
->count
- max_voltage_steps
;
4396 for (i
= 0; i
< max_voltage_steps
; i
++)
4397 voltage_table
->entries
[i
] = voltage_table
->entries
[i
+ diff
];
4399 voltage_table
->count
= max_voltage_steps
;
4402 static int si_get_svi2_voltage_table(struct amdgpu_device
*adev
,
4403 struct amdgpu_clock_voltage_dependency_table
*voltage_dependency_table
,
4404 struct atom_voltage_table
*voltage_table
)
4408 if (voltage_dependency_table
== NULL
)
4411 voltage_table
->mask_low
= 0;
4412 voltage_table
->phase_delay
= 0;
4414 voltage_table
->count
= voltage_dependency_table
->count
;
4415 for (i
= 0; i
< voltage_table
->count
; i
++) {
4416 voltage_table
->entries
[i
].value
= voltage_dependency_table
->entries
[i
].v
;
4417 voltage_table
->entries
[i
].smio_low
= 0;
4423 static int si_construct_voltage_tables(struct amdgpu_device
*adev
)
4425 struct rv7xx_power_info
*pi
= rv770_get_pi(adev
);
4426 struct evergreen_power_info
*eg_pi
= evergreen_get_pi(adev
);
4427 struct si_power_info
*si_pi
= si_get_pi(adev
);
4430 if (pi
->voltage_control
) {
4431 ret
= amdgpu_atombios_get_voltage_table(adev
, VOLTAGE_TYPE_VDDC
,
4432 VOLTAGE_OBJ_GPIO_LUT
, &eg_pi
->vddc_voltage_table
);
4436 if (eg_pi
->vddc_voltage_table
.count
> SISLANDS_MAX_NO_VREG_STEPS
)
4437 si_trim_voltage_table_to_fit_state_table(adev
,
4438 SISLANDS_MAX_NO_VREG_STEPS
,
4439 &eg_pi
->vddc_voltage_table
);
4440 } else if (si_pi
->voltage_control_svi2
) {
4441 ret
= si_get_svi2_voltage_table(adev
,
4442 &adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_mclk
,
4443 &eg_pi
->vddc_voltage_table
);
4450 if (eg_pi
->vddci_control
) {
4451 ret
= amdgpu_atombios_get_voltage_table(adev
, VOLTAGE_TYPE_VDDCI
,
4452 VOLTAGE_OBJ_GPIO_LUT
, &eg_pi
->vddci_voltage_table
);
4456 if (eg_pi
->vddci_voltage_table
.count
> SISLANDS_MAX_NO_VREG_STEPS
)
4457 si_trim_voltage_table_to_fit_state_table(adev
,
4458 SISLANDS_MAX_NO_VREG_STEPS
,
4459 &eg_pi
->vddci_voltage_table
);
4461 if (si_pi
->vddci_control_svi2
) {
4462 ret
= si_get_svi2_voltage_table(adev
,
4463 &adev
->pm
.dpm
.dyn_state
.vddci_dependency_on_mclk
,
4464 &eg_pi
->vddci_voltage_table
);
4469 if (pi
->mvdd_control
) {
4470 ret
= amdgpu_atombios_get_voltage_table(adev
, VOLTAGE_TYPE_MVDDC
,
4471 VOLTAGE_OBJ_GPIO_LUT
, &si_pi
->mvdd_voltage_table
);
4474 pi
->mvdd_control
= false;
4478 if (si_pi
->mvdd_voltage_table
.count
== 0) {
4479 pi
->mvdd_control
= false;
4483 if (si_pi
->mvdd_voltage_table
.count
> SISLANDS_MAX_NO_VREG_STEPS
)
4484 si_trim_voltage_table_to_fit_state_table(adev
,
4485 SISLANDS_MAX_NO_VREG_STEPS
,
4486 &si_pi
->mvdd_voltage_table
);
4489 if (si_pi
->vddc_phase_shed_control
) {
4490 ret
= amdgpu_atombios_get_voltage_table(adev
, VOLTAGE_TYPE_VDDC
,
4491 VOLTAGE_OBJ_PHASE_LUT
, &si_pi
->vddc_phase_shed_table
);
4493 si_pi
->vddc_phase_shed_control
= false;
4495 if ((si_pi
->vddc_phase_shed_table
.count
== 0) ||
4496 (si_pi
->vddc_phase_shed_table
.count
> SISLANDS_MAX_NO_VREG_STEPS
))
4497 si_pi
->vddc_phase_shed_control
= false;
4503 static void si_populate_smc_voltage_table(struct amdgpu_device
*adev
,
4504 const struct atom_voltage_table
*voltage_table
,
4505 SISLANDS_SMC_STATETABLE
*table
)
4509 for (i
= 0; i
< voltage_table
->count
; i
++)
4510 table
->lowSMIO
[i
] |= cpu_to_be32(voltage_table
->entries
[i
].smio_low
);
4513 static int si_populate_smc_voltage_tables(struct amdgpu_device
*adev
,
4514 SISLANDS_SMC_STATETABLE
*table
)
4516 struct rv7xx_power_info
*pi
= rv770_get_pi(adev
);
4517 struct evergreen_power_info
*eg_pi
= evergreen_get_pi(adev
);
4518 struct si_power_info
*si_pi
= si_get_pi(adev
);
4521 if (si_pi
->voltage_control_svi2
) {
4522 si_write_smc_soft_register(adev
, SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svc
,
4523 si_pi
->svc_gpio_id
);
4524 si_write_smc_soft_register(adev
, SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svd
,
4525 si_pi
->svd_gpio_id
);
4526 si_write_smc_soft_register(adev
, SI_SMC_SOFT_REGISTER_svi_rework_plat_type
,
4529 if (eg_pi
->vddc_voltage_table
.count
) {
4530 si_populate_smc_voltage_table(adev
, &eg_pi
->vddc_voltage_table
, table
);
4531 table
->voltageMaskTable
.lowMask
[SISLANDS_SMC_VOLTAGEMASK_VDDC
] =
4532 cpu_to_be32(eg_pi
->vddc_voltage_table
.mask_low
);
4534 for (i
= 0; i
< eg_pi
->vddc_voltage_table
.count
; i
++) {
4535 if (pi
->max_vddc_in_table
<= eg_pi
->vddc_voltage_table
.entries
[i
].value
) {
4536 table
->maxVDDCIndexInPPTable
= i
;
4542 if (eg_pi
->vddci_voltage_table
.count
) {
4543 si_populate_smc_voltage_table(adev
, &eg_pi
->vddci_voltage_table
, table
);
4545 table
->voltageMaskTable
.lowMask
[SISLANDS_SMC_VOLTAGEMASK_VDDCI
] =
4546 cpu_to_be32(eg_pi
->vddci_voltage_table
.mask_low
);
4550 if (si_pi
->mvdd_voltage_table
.count
) {
4551 si_populate_smc_voltage_table(adev
, &si_pi
->mvdd_voltage_table
, table
);
4553 table
->voltageMaskTable
.lowMask
[SISLANDS_SMC_VOLTAGEMASK_MVDD
] =
4554 cpu_to_be32(si_pi
->mvdd_voltage_table
.mask_low
);
4557 if (si_pi
->vddc_phase_shed_control
) {
4558 if (si_validate_phase_shedding_tables(adev
, &si_pi
->vddc_phase_shed_table
,
4559 &adev
->pm
.dpm
.dyn_state
.phase_shedding_limits_table
)) {
4560 si_populate_smc_voltage_table(adev
, &si_pi
->vddc_phase_shed_table
, table
);
4562 table
->phaseMaskTable
.lowMask
[SISLANDS_SMC_VOLTAGEMASK_VDDC_PHASE_SHEDDING
] =
4563 cpu_to_be32(si_pi
->vddc_phase_shed_table
.mask_low
);
4565 si_write_smc_soft_register(adev
, SI_SMC_SOFT_REGISTER_phase_shedding_delay
,
4566 (u32
)si_pi
->vddc_phase_shed_table
.phase_delay
);
4568 si_pi
->vddc_phase_shed_control
= false;
4576 static int si_populate_voltage_value(struct amdgpu_device
*adev
,
4577 const struct atom_voltage_table
*table
,
4578 u16 value
, SISLANDS_SMC_VOLTAGE_VALUE
*voltage
)
4582 for (i
= 0; i
< table
->count
; i
++) {
4583 if (value
<= table
->entries
[i
].value
) {
4584 voltage
->index
= (u8
)i
;
4585 voltage
->value
= cpu_to_be16(table
->entries
[i
].value
);
4590 if (i
>= table
->count
)
4596 static int si_populate_mvdd_value(struct amdgpu_device
*adev
, u32 mclk
,
4597 SISLANDS_SMC_VOLTAGE_VALUE
*voltage
)
4599 struct rv7xx_power_info
*pi
= rv770_get_pi(adev
);
4600 struct si_power_info
*si_pi
= si_get_pi(adev
);
4602 if (pi
->mvdd_control
) {
4603 if (mclk
<= pi
->mvdd_split_frequency
)
4606 voltage
->index
= (u8
)(si_pi
->mvdd_voltage_table
.count
) - 1;
4608 voltage
->value
= cpu_to_be16(si_pi
->mvdd_voltage_table
.entries
[voltage
->index
].value
);
4613 static int si_get_std_voltage_value(struct amdgpu_device
*adev
,
4614 SISLANDS_SMC_VOLTAGE_VALUE
*voltage
,
4618 bool voltage_found
= false;
4619 *std_voltage
= be16_to_cpu(voltage
->value
);
4621 if (adev
->pm
.dpm
.dyn_state
.cac_leakage_table
.entries
) {
4622 if (adev
->pm
.dpm
.platform_caps
& ATOM_PP_PLATFORM_CAP_NEW_CAC_VOLTAGE
) {
4623 if (adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
.entries
== NULL
)
4626 for (v_index
= 0; (u32
)v_index
< adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
.count
; v_index
++) {
4627 if (be16_to_cpu(voltage
->value
) ==
4628 (u16
)adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
.entries
[v_index
].v
) {
4629 voltage_found
= true;
4630 if ((u32
)v_index
< adev
->pm
.dpm
.dyn_state
.cac_leakage_table
.count
)
4632 adev
->pm
.dpm
.dyn_state
.cac_leakage_table
.entries
[v_index
].vddc
;
4635 adev
->pm
.dpm
.dyn_state
.cac_leakage_table
.entries
[adev
->pm
.dpm
.dyn_state
.cac_leakage_table
.count
-1].vddc
;
4640 if (!voltage_found
) {
4641 for (v_index
= 0; (u32
)v_index
< adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
.count
; v_index
++) {
4642 if (be16_to_cpu(voltage
->value
) <=
4643 (u16
)adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
.entries
[v_index
].v
) {
4644 voltage_found
= true;
4645 if ((u32
)v_index
< adev
->pm
.dpm
.dyn_state
.cac_leakage_table
.count
)
4647 adev
->pm
.dpm
.dyn_state
.cac_leakage_table
.entries
[v_index
].vddc
;
4650 adev
->pm
.dpm
.dyn_state
.cac_leakage_table
.entries
[adev
->pm
.dpm
.dyn_state
.cac_leakage_table
.count
-1].vddc
;
4656 if ((u32
)voltage
->index
< adev
->pm
.dpm
.dyn_state
.cac_leakage_table
.count
)
4657 *std_voltage
= adev
->pm
.dpm
.dyn_state
.cac_leakage_table
.entries
[voltage
->index
].vddc
;
4664 static int si_populate_std_voltage_value(struct amdgpu_device
*adev
,
4665 u16 value
, u8 index
,
4666 SISLANDS_SMC_VOLTAGE_VALUE
*voltage
)
4668 voltage
->index
= index
;
4669 voltage
->value
= cpu_to_be16(value
);
4674 static int si_populate_phase_shedding_value(struct amdgpu_device
*adev
,
4675 const struct amdgpu_phase_shedding_limits_table
*limits
,
4676 u16 voltage
, u32 sclk
, u32 mclk
,
4677 SISLANDS_SMC_VOLTAGE_VALUE
*smc_voltage
)
4681 for (i
= 0; i
< limits
->count
; i
++) {
4682 if ((voltage
<= limits
->entries
[i
].voltage
) &&
4683 (sclk
<= limits
->entries
[i
].sclk
) &&
4684 (mclk
<= limits
->entries
[i
].mclk
))
4688 smc_voltage
->phase_settings
= (u8
)i
;
4693 static int si_init_arb_table_index(struct amdgpu_device
*adev
)
4695 struct si_power_info
*si_pi
= si_get_pi(adev
);
4699 ret
= amdgpu_si_read_smc_sram_dword(adev
, si_pi
->arb_table_start
,
4700 &tmp
, si_pi
->sram_end
);
4705 tmp
|= MC_CG_ARB_FREQ_F1
<< 24;
4707 return amdgpu_si_write_smc_sram_dword(adev
, si_pi
->arb_table_start
,
4708 tmp
, si_pi
->sram_end
);
4711 static int si_initial_switch_from_arb_f0_to_f1(struct amdgpu_device
*adev
)
4713 return ni_copy_and_switch_arb_sets(adev
, MC_CG_ARB_FREQ_F0
, MC_CG_ARB_FREQ_F1
);
4716 static int si_reset_to_default(struct amdgpu_device
*adev
)
4718 return (amdgpu_si_send_msg_to_smc(adev
, PPSMC_MSG_ResetToDefaults
) == PPSMC_Result_OK
) ?
4722 static int si_force_switch_to_arb_f0(struct amdgpu_device
*adev
)
4724 struct si_power_info
*si_pi
= si_get_pi(adev
);
4728 ret
= amdgpu_si_read_smc_sram_dword(adev
, si_pi
->arb_table_start
,
4729 &tmp
, si_pi
->sram_end
);
4733 tmp
= (tmp
>> 24) & 0xff;
4735 if (tmp
== MC_CG_ARB_FREQ_F0
)
4738 return ni_copy_and_switch_arb_sets(adev
, tmp
, MC_CG_ARB_FREQ_F0
);
4741 static u32
si_calculate_memory_refresh_rate(struct amdgpu_device
*adev
,
4745 u32 dram_refresh_rate
;
4746 u32 mc_arb_rfsh_rate
;
4747 u32 tmp
= (RREG32(MC_ARB_RAMCFG
) & NOOFROWS_MASK
) >> NOOFROWS_SHIFT
;
4752 dram_rows
= 1 << (tmp
+ 10);
4754 dram_refresh_rate
= 1 << ((RREG32(MC_SEQ_MISC0
) & 0x3) + 3);
4755 mc_arb_rfsh_rate
= ((engine_clock
* 10) * dram_refresh_rate
/ dram_rows
- 32) / 64;
4757 return mc_arb_rfsh_rate
;
4760 static int si_populate_memory_timing_parameters(struct amdgpu_device
*adev
,
4761 struct rv7xx_pl
*pl
,
4762 SMC_SIslands_MCArbDramTimingRegisterSet
*arb_regs
)
4768 arb_regs
->mc_arb_rfsh_rate
=
4769 (u8
)si_calculate_memory_refresh_rate(adev
, pl
->sclk
);
4771 amdgpu_atombios_set_engine_dram_timings(adev
,
4775 dram_timing
= RREG32(MC_ARB_DRAM_TIMING
);
4776 dram_timing2
= RREG32(MC_ARB_DRAM_TIMING2
);
4777 burst_time
= RREG32(MC_ARB_BURST_TIME
) & STATE0_MASK
;
4779 arb_regs
->mc_arb_dram_timing
= cpu_to_be32(dram_timing
);
4780 arb_regs
->mc_arb_dram_timing2
= cpu_to_be32(dram_timing2
);
4781 arb_regs
->mc_arb_burst_time
= (u8
)burst_time
;
4786 static int si_do_program_memory_timing_parameters(struct amdgpu_device
*adev
,
4787 struct amdgpu_ps
*amdgpu_state
,
4788 unsigned int first_arb_set
)
4790 struct si_power_info
*si_pi
= si_get_pi(adev
);
4791 struct si_ps
*state
= si_get_ps(amdgpu_state
);
4792 SMC_SIslands_MCArbDramTimingRegisterSet arb_regs
= { 0 };
4795 for (i
= 0; i
< state
->performance_level_count
; i
++) {
4796 ret
= si_populate_memory_timing_parameters(adev
, &state
->performance_levels
[i
], &arb_regs
);
4799 ret
= amdgpu_si_copy_bytes_to_smc(adev
,
4800 si_pi
->arb_table_start
+
4801 offsetof(SMC_SIslands_MCArbDramTimingRegisters
, data
) +
4802 sizeof(SMC_SIslands_MCArbDramTimingRegisterSet
) * (first_arb_set
+ i
),
4804 sizeof(SMC_SIslands_MCArbDramTimingRegisterSet
),
4813 static int si_program_memory_timing_parameters(struct amdgpu_device
*adev
,
4814 struct amdgpu_ps
*amdgpu_new_state
)
4816 return si_do_program_memory_timing_parameters(adev
, amdgpu_new_state
,
4817 SISLANDS_DRIVER_STATE_ARB_INDEX
);
4820 static int si_populate_initial_mvdd_value(struct amdgpu_device
*adev
,
4821 struct SISLANDS_SMC_VOLTAGE_VALUE
*voltage
)
4823 struct rv7xx_power_info
*pi
= rv770_get_pi(adev
);
4824 struct si_power_info
*si_pi
= si_get_pi(adev
);
4826 if (pi
->mvdd_control
)
4827 return si_populate_voltage_value(adev
, &si_pi
->mvdd_voltage_table
,
4828 si_pi
->mvdd_bootup_value
, voltage
);
4833 static int si_populate_smc_initial_state(struct amdgpu_device
*adev
,
4834 struct amdgpu_ps
*amdgpu_initial_state
,
4835 SISLANDS_SMC_STATETABLE
*table
)
4837 struct si_ps
*initial_state
= si_get_ps(amdgpu_initial_state
);
4838 struct rv7xx_power_info
*pi
= rv770_get_pi(adev
);
4839 struct evergreen_power_info
*eg_pi
= evergreen_get_pi(adev
);
4840 struct si_power_info
*si_pi
= si_get_pi(adev
);
4844 table
->initialState
.levels
[0].mclk
.vDLL_CNTL
=
4845 cpu_to_be32(si_pi
->clock_registers
.dll_cntl
);
4846 table
->initialState
.levels
[0].mclk
.vMCLK_PWRMGT_CNTL
=
4847 cpu_to_be32(si_pi
->clock_registers
.mclk_pwrmgt_cntl
);
4848 table
->initialState
.levels
[0].mclk
.vMPLL_AD_FUNC_CNTL
=
4849 cpu_to_be32(si_pi
->clock_registers
.mpll_ad_func_cntl
);
4850 table
->initialState
.levels
[0].mclk
.vMPLL_DQ_FUNC_CNTL
=
4851 cpu_to_be32(si_pi
->clock_registers
.mpll_dq_func_cntl
);
4852 table
->initialState
.levels
[0].mclk
.vMPLL_FUNC_CNTL
=
4853 cpu_to_be32(si_pi
->clock_registers
.mpll_func_cntl
);
4854 table
->initialState
.levels
[0].mclk
.vMPLL_FUNC_CNTL_1
=
4855 cpu_to_be32(si_pi
->clock_registers
.mpll_func_cntl_1
);
4856 table
->initialState
.levels
[0].mclk
.vMPLL_FUNC_CNTL_2
=
4857 cpu_to_be32(si_pi
->clock_registers
.mpll_func_cntl_2
);
4858 table
->initialState
.levels
[0].mclk
.vMPLL_SS
=
4859 cpu_to_be32(si_pi
->clock_registers
.mpll_ss1
);
4860 table
->initialState
.levels
[0].mclk
.vMPLL_SS2
=
4861 cpu_to_be32(si_pi
->clock_registers
.mpll_ss2
);
4863 table
->initialState
.levels
[0].mclk
.mclk_value
=
4864 cpu_to_be32(initial_state
->performance_levels
[0].mclk
);
4866 table
->initialState
.levels
[0].sclk
.vCG_SPLL_FUNC_CNTL
=
4867 cpu_to_be32(si_pi
->clock_registers
.cg_spll_func_cntl
);
4868 table
->initialState
.levels
[0].sclk
.vCG_SPLL_FUNC_CNTL_2
=
4869 cpu_to_be32(si_pi
->clock_registers
.cg_spll_func_cntl_2
);
4870 table
->initialState
.levels
[0].sclk
.vCG_SPLL_FUNC_CNTL_3
=
4871 cpu_to_be32(si_pi
->clock_registers
.cg_spll_func_cntl_3
);
4872 table
->initialState
.levels
[0].sclk
.vCG_SPLL_FUNC_CNTL_4
=
4873 cpu_to_be32(si_pi
->clock_registers
.cg_spll_func_cntl_4
);
4874 table
->initialState
.levels
[0].sclk
.vCG_SPLL_SPREAD_SPECTRUM
=
4875 cpu_to_be32(si_pi
->clock_registers
.cg_spll_spread_spectrum
);
4876 table
->initialState
.levels
[0].sclk
.vCG_SPLL_SPREAD_SPECTRUM_2
=
4877 cpu_to_be32(si_pi
->clock_registers
.cg_spll_spread_spectrum_2
);
4879 table
->initialState
.levels
[0].sclk
.sclk_value
=
4880 cpu_to_be32(initial_state
->performance_levels
[0].sclk
);
4882 table
->initialState
.levels
[0].arbRefreshState
=
4883 SISLANDS_INITIAL_STATE_ARB_INDEX
;
4885 table
->initialState
.levels
[0].ACIndex
= 0;
4887 ret
= si_populate_voltage_value(adev
, &eg_pi
->vddc_voltage_table
,
4888 initial_state
->performance_levels
[0].vddc
,
4889 &table
->initialState
.levels
[0].vddc
);
4894 ret
= si_get_std_voltage_value(adev
,
4895 &table
->initialState
.levels
[0].vddc
,
4898 si_populate_std_voltage_value(adev
, std_vddc
,
4899 table
->initialState
.levels
[0].vddc
.index
,
4900 &table
->initialState
.levels
[0].std_vddc
);
4903 if (eg_pi
->vddci_control
)
4904 si_populate_voltage_value(adev
,
4905 &eg_pi
->vddci_voltage_table
,
4906 initial_state
->performance_levels
[0].vddci
,
4907 &table
->initialState
.levels
[0].vddci
);
4909 if (si_pi
->vddc_phase_shed_control
)
4910 si_populate_phase_shedding_value(adev
,
4911 &adev
->pm
.dpm
.dyn_state
.phase_shedding_limits_table
,
4912 initial_state
->performance_levels
[0].vddc
,
4913 initial_state
->performance_levels
[0].sclk
,
4914 initial_state
->performance_levels
[0].mclk
,
4915 &table
->initialState
.levels
[0].vddc
);
4917 si_populate_initial_mvdd_value(adev
, &table
->initialState
.levels
[0].mvdd
);
4919 reg
= CG_R(0xffff) | CG_L(0);
4920 table
->initialState
.levels
[0].aT
= cpu_to_be32(reg
);
4921 table
->initialState
.levels
[0].bSP
= cpu_to_be32(pi
->dsp
);
4922 table
->initialState
.levels
[0].gen2PCIE
= (u8
)si_pi
->boot_pcie_gen
;
4924 if (adev
->mc
.vram_type
== AMDGPU_VRAM_TYPE_GDDR5
) {
4925 table
->initialState
.levels
[0].strobeMode
=
4926 si_get_strobe_mode_settings(adev
,
4927 initial_state
->performance_levels
[0].mclk
);
4929 if (initial_state
->performance_levels
[0].mclk
> pi
->mclk_edc_enable_threshold
)
4930 table
->initialState
.levels
[0].mcFlags
= SISLANDS_SMC_MC_EDC_RD_FLAG
| SISLANDS_SMC_MC_EDC_WR_FLAG
;
4932 table
->initialState
.levels
[0].mcFlags
= 0;
4935 table
->initialState
.levelCount
= 1;
4937 table
->initialState
.flags
|= PPSMC_SWSTATE_FLAG_DC
;
4939 table
->initialState
.levels
[0].dpm2
.MaxPS
= 0;
4940 table
->initialState
.levels
[0].dpm2
.NearTDPDec
= 0;
4941 table
->initialState
.levels
[0].dpm2
.AboveSafeInc
= 0;
4942 table
->initialState
.levels
[0].dpm2
.BelowSafeInc
= 0;
4943 table
->initialState
.levels
[0].dpm2
.PwrEfficiencyRatio
= 0;
4945 reg
= MIN_POWER_MASK
| MAX_POWER_MASK
;
4946 table
->initialState
.levels
[0].SQPowerThrottle
= cpu_to_be32(reg
);
4948 reg
= MAX_POWER_DELTA_MASK
| STI_SIZE_MASK
| LTI_RATIO_MASK
;
4949 table
->initialState
.levels
[0].SQPowerThrottle_2
= cpu_to_be32(reg
);
4954 static int si_populate_smc_acpi_state(struct amdgpu_device
*adev
,
4955 SISLANDS_SMC_STATETABLE
*table
)
4957 struct rv7xx_power_info
*pi
= rv770_get_pi(adev
);
4958 struct evergreen_power_info
*eg_pi
= evergreen_get_pi(adev
);
4959 struct si_power_info
*si_pi
= si_get_pi(adev
);
4960 u32 spll_func_cntl
= si_pi
->clock_registers
.cg_spll_func_cntl
;
4961 u32 spll_func_cntl_2
= si_pi
->clock_registers
.cg_spll_func_cntl_2
;
4962 u32 spll_func_cntl_3
= si_pi
->clock_registers
.cg_spll_func_cntl_3
;
4963 u32 spll_func_cntl_4
= si_pi
->clock_registers
.cg_spll_func_cntl_4
;
4964 u32 dll_cntl
= si_pi
->clock_registers
.dll_cntl
;
4965 u32 mclk_pwrmgt_cntl
= si_pi
->clock_registers
.mclk_pwrmgt_cntl
;
4966 u32 mpll_ad_func_cntl
= si_pi
->clock_registers
.mpll_ad_func_cntl
;
4967 u32 mpll_dq_func_cntl
= si_pi
->clock_registers
.mpll_dq_func_cntl
;
4968 u32 mpll_func_cntl
= si_pi
->clock_registers
.mpll_func_cntl
;
4969 u32 mpll_func_cntl_1
= si_pi
->clock_registers
.mpll_func_cntl_1
;
4970 u32 mpll_func_cntl_2
= si_pi
->clock_registers
.mpll_func_cntl_2
;
4974 table
->ACPIState
= table
->initialState
;
4976 table
->ACPIState
.flags
&= ~PPSMC_SWSTATE_FLAG_DC
;
4978 if (pi
->acpi_vddc
) {
4979 ret
= si_populate_voltage_value(adev
, &eg_pi
->vddc_voltage_table
,
4980 pi
->acpi_vddc
, &table
->ACPIState
.levels
[0].vddc
);
4984 ret
= si_get_std_voltage_value(adev
,
4985 &table
->ACPIState
.levels
[0].vddc
, &std_vddc
);
4987 si_populate_std_voltage_value(adev
, std_vddc
,
4988 table
->ACPIState
.levels
[0].vddc
.index
,
4989 &table
->ACPIState
.levels
[0].std_vddc
);
4991 table
->ACPIState
.levels
[0].gen2PCIE
= si_pi
->acpi_pcie_gen
;
4993 if (si_pi
->vddc_phase_shed_control
) {
4994 si_populate_phase_shedding_value(adev
,
4995 &adev
->pm
.dpm
.dyn_state
.phase_shedding_limits_table
,
4999 &table
->ACPIState
.levels
[0].vddc
);
5002 ret
= si_populate_voltage_value(adev
, &eg_pi
->vddc_voltage_table
,
5003 pi
->min_vddc_in_table
, &table
->ACPIState
.levels
[0].vddc
);
5007 ret
= si_get_std_voltage_value(adev
,
5008 &table
->ACPIState
.levels
[0].vddc
, &std_vddc
);
5011 si_populate_std_voltage_value(adev
, std_vddc
,
5012 table
->ACPIState
.levels
[0].vddc
.index
,
5013 &table
->ACPIState
.levels
[0].std_vddc
);
5015 table
->ACPIState
.levels
[0].gen2PCIE
= (u8
)r600_get_pcie_gen_support(adev
,
5016 si_pi
->sys_pcie_mask
,
5017 si_pi
->boot_pcie_gen
,
5020 if (si_pi
->vddc_phase_shed_control
)
5021 si_populate_phase_shedding_value(adev
,
5022 &adev
->pm
.dpm
.dyn_state
.phase_shedding_limits_table
,
5023 pi
->min_vddc_in_table
,
5026 &table
->ACPIState
.levels
[0].vddc
);
5029 if (pi
->acpi_vddc
) {
5030 if (eg_pi
->acpi_vddci
)
5031 si_populate_voltage_value(adev
, &eg_pi
->vddci_voltage_table
,
5033 &table
->ACPIState
.levels
[0].vddci
);
5036 mclk_pwrmgt_cntl
|= MRDCK0_RESET
| MRDCK1_RESET
;
5037 mclk_pwrmgt_cntl
&= ~(MRDCK0_PDNB
| MRDCK1_PDNB
);
5039 dll_cntl
&= ~(MRDCK0_BYPASS
| MRDCK1_BYPASS
);
5041 spll_func_cntl_2
&= ~SCLK_MUX_SEL_MASK
;
5042 spll_func_cntl_2
|= SCLK_MUX_SEL(4);
5044 table
->ACPIState
.levels
[0].mclk
.vDLL_CNTL
=
5045 cpu_to_be32(dll_cntl
);
5046 table
->ACPIState
.levels
[0].mclk
.vMCLK_PWRMGT_CNTL
=
5047 cpu_to_be32(mclk_pwrmgt_cntl
);
5048 table
->ACPIState
.levels
[0].mclk
.vMPLL_AD_FUNC_CNTL
=
5049 cpu_to_be32(mpll_ad_func_cntl
);
5050 table
->ACPIState
.levels
[0].mclk
.vMPLL_DQ_FUNC_CNTL
=
5051 cpu_to_be32(mpll_dq_func_cntl
);
5052 table
->ACPIState
.levels
[0].mclk
.vMPLL_FUNC_CNTL
=
5053 cpu_to_be32(mpll_func_cntl
);
5054 table
->ACPIState
.levels
[0].mclk
.vMPLL_FUNC_CNTL_1
=
5055 cpu_to_be32(mpll_func_cntl_1
);
5056 table
->ACPIState
.levels
[0].mclk
.vMPLL_FUNC_CNTL_2
=
5057 cpu_to_be32(mpll_func_cntl_2
);
5058 table
->ACPIState
.levels
[0].mclk
.vMPLL_SS
=
5059 cpu_to_be32(si_pi
->clock_registers
.mpll_ss1
);
5060 table
->ACPIState
.levels
[0].mclk
.vMPLL_SS2
=
5061 cpu_to_be32(si_pi
->clock_registers
.mpll_ss2
);
5063 table
->ACPIState
.levels
[0].sclk
.vCG_SPLL_FUNC_CNTL
=
5064 cpu_to_be32(spll_func_cntl
);
5065 table
->ACPIState
.levels
[0].sclk
.vCG_SPLL_FUNC_CNTL_2
=
5066 cpu_to_be32(spll_func_cntl_2
);
5067 table
->ACPIState
.levels
[0].sclk
.vCG_SPLL_FUNC_CNTL_3
=
5068 cpu_to_be32(spll_func_cntl_3
);
5069 table
->ACPIState
.levels
[0].sclk
.vCG_SPLL_FUNC_CNTL_4
=
5070 cpu_to_be32(spll_func_cntl_4
);
5072 table
->ACPIState
.levels
[0].mclk
.mclk_value
= 0;
5073 table
->ACPIState
.levels
[0].sclk
.sclk_value
= 0;
5075 si_populate_mvdd_value(adev
, 0, &table
->ACPIState
.levels
[0].mvdd
);
5077 if (eg_pi
->dynamic_ac_timing
)
5078 table
->ACPIState
.levels
[0].ACIndex
= 0;
5080 table
->ACPIState
.levels
[0].dpm2
.MaxPS
= 0;
5081 table
->ACPIState
.levels
[0].dpm2
.NearTDPDec
= 0;
5082 table
->ACPIState
.levels
[0].dpm2
.AboveSafeInc
= 0;
5083 table
->ACPIState
.levels
[0].dpm2
.BelowSafeInc
= 0;
5084 table
->ACPIState
.levels
[0].dpm2
.PwrEfficiencyRatio
= 0;
5086 reg
= MIN_POWER_MASK
| MAX_POWER_MASK
;
5087 table
->ACPIState
.levels
[0].SQPowerThrottle
= cpu_to_be32(reg
);
5089 reg
= MAX_POWER_DELTA_MASK
| STI_SIZE_MASK
| LTI_RATIO_MASK
;
5090 table
->ACPIState
.levels
[0].SQPowerThrottle_2
= cpu_to_be32(reg
);
5095 static int si_populate_ulv_state(struct amdgpu_device
*adev
,
5096 SISLANDS_SMC_SWSTATE
*state
)
5098 struct evergreen_power_info
*eg_pi
= evergreen_get_pi(adev
);
5099 struct si_power_info
*si_pi
= si_get_pi(adev
);
5100 struct si_ulv_param
*ulv
= &si_pi
->ulv
;
5101 u32 sclk_in_sr
= 1350; /* ??? */
5104 ret
= si_convert_power_level_to_smc(adev
, &ulv
->pl
,
5107 if (eg_pi
->sclk_deep_sleep
) {
5108 if (sclk_in_sr
<= SCLK_MIN_DEEPSLEEP_FREQ
)
5109 state
->levels
[0].stateFlags
|= PPSMC_STATEFLAG_DEEPSLEEP_BYPASS
;
5111 state
->levels
[0].stateFlags
|= PPSMC_STATEFLAG_DEEPSLEEP_THROTTLE
;
5113 if (ulv
->one_pcie_lane_in_ulv
)
5114 state
->flags
|= PPSMC_SWSTATE_FLAG_PCIE_X1
;
5115 state
->levels
[0].arbRefreshState
= (u8
)(SISLANDS_ULV_STATE_ARB_INDEX
);
5116 state
->levels
[0].ACIndex
= 1;
5117 state
->levels
[0].std_vddc
= state
->levels
[0].vddc
;
5118 state
->levelCount
= 1;
5120 state
->flags
|= PPSMC_SWSTATE_FLAG_DC
;
5126 static int si_program_ulv_memory_timing_parameters(struct amdgpu_device
*adev
)
5128 struct si_power_info
*si_pi
= si_get_pi(adev
);
5129 struct si_ulv_param
*ulv
= &si_pi
->ulv
;
5130 SMC_SIslands_MCArbDramTimingRegisterSet arb_regs
= { 0 };
5133 ret
= si_populate_memory_timing_parameters(adev
, &ulv
->pl
,
5138 si_write_smc_soft_register(adev
, SI_SMC_SOFT_REGISTER_ulv_volt_change_delay
,
5139 ulv
->volt_change_delay
);
5141 ret
= amdgpu_si_copy_bytes_to_smc(adev
,
5142 si_pi
->arb_table_start
+
5143 offsetof(SMC_SIslands_MCArbDramTimingRegisters
, data
) +
5144 sizeof(SMC_SIslands_MCArbDramTimingRegisterSet
) * SISLANDS_ULV_STATE_ARB_INDEX
,
5146 sizeof(SMC_SIslands_MCArbDramTimingRegisterSet
),
5152 static void si_get_mvdd_configuration(struct amdgpu_device
*adev
)
5154 struct rv7xx_power_info
*pi
= rv770_get_pi(adev
);
5156 pi
->mvdd_split_frequency
= 30000;
5159 static int si_init_smc_table(struct amdgpu_device
*adev
)
5161 struct si_power_info
*si_pi
= si_get_pi(adev
);
5162 struct amdgpu_ps
*amdgpu_boot_state
= adev
->pm
.dpm
.boot_ps
;
5163 const struct si_ulv_param
*ulv
= &si_pi
->ulv
;
5164 SISLANDS_SMC_STATETABLE
*table
= &si_pi
->smc_statetable
;
5169 si_populate_smc_voltage_tables(adev
, table
);
5171 switch (adev
->pm
.int_thermal_type
) {
5172 case THERMAL_TYPE_SI
:
5173 case THERMAL_TYPE_EMC2103_WITH_INTERNAL
:
5174 table
->thermalProtectType
= PPSMC_THERMAL_PROTECT_TYPE_INTERNAL
;
5176 case THERMAL_TYPE_NONE
:
5177 table
->thermalProtectType
= PPSMC_THERMAL_PROTECT_TYPE_NONE
;
5180 table
->thermalProtectType
= PPSMC_THERMAL_PROTECT_TYPE_EXTERNAL
;
5184 if (adev
->pm
.dpm
.platform_caps
& ATOM_PP_PLATFORM_CAP_HARDWAREDC
)
5185 table
->systemFlags
|= PPSMC_SYSTEMFLAG_GPIO_DC
;
5187 if (adev
->pm
.dpm
.platform_caps
& ATOM_PP_PLATFORM_CAP_REGULATOR_HOT
) {
5188 if ((adev
->pdev
->device
!= 0x6818) && (adev
->pdev
->device
!= 0x6819))
5189 table
->systemFlags
|= PPSMC_SYSTEMFLAG_REGULATOR_HOT
;
5192 if (adev
->pm
.dpm
.platform_caps
& ATOM_PP_PLATFORM_CAP_STEPVDDC
)
5193 table
->systemFlags
|= PPSMC_SYSTEMFLAG_STEPVDDC
;
5195 if (adev
->mc
.vram_type
== AMDGPU_VRAM_TYPE_GDDR5
)
5196 table
->systemFlags
|= PPSMC_SYSTEMFLAG_GDDR5
;
5198 if (adev
->pm
.dpm
.platform_caps
& ATOM_PP_PLATFORM_CAP_REVERT_GPIO5_POLARITY
)
5199 table
->extraFlags
|= PPSMC_EXTRAFLAGS_AC2DC_GPIO5_POLARITY_HIGH
;
5201 if (adev
->pm
.dpm
.platform_caps
& ATOM_PP_PLATFORM_CAP_VRHOT_GPIO_CONFIGURABLE
) {
5202 table
->systemFlags
|= PPSMC_SYSTEMFLAG_REGULATOR_HOT_PROG_GPIO
;
5203 vr_hot_gpio
= adev
->pm
.dpm
.backbias_response_time
;
5204 si_write_smc_soft_register(adev
, SI_SMC_SOFT_REGISTER_vr_hot_gpio
,
5208 ret
= si_populate_smc_initial_state(adev
, amdgpu_boot_state
, table
);
5212 ret
= si_populate_smc_acpi_state(adev
, table
);
5216 table
->driverState
= table
->initialState
;
5218 ret
= si_do_program_memory_timing_parameters(adev
, amdgpu_boot_state
,
5219 SISLANDS_INITIAL_STATE_ARB_INDEX
);
5223 if (ulv
->supported
&& ulv
->pl
.vddc
) {
5224 ret
= si_populate_ulv_state(adev
, &table
->ULVState
);
5228 ret
= si_program_ulv_memory_timing_parameters(adev
);
5232 WREG32(CG_ULV_CONTROL
, ulv
->cg_ulv_control
);
5233 WREG32(CG_ULV_PARAMETER
, ulv
->cg_ulv_parameter
);
5235 lane_width
= amdgpu_get_pcie_lanes(adev
);
5236 si_write_smc_soft_register(adev
, SI_SMC_SOFT_REGISTER_non_ulv_pcie_link_width
, lane_width
);
5238 table
->ULVState
= table
->initialState
;
5241 return amdgpu_si_copy_bytes_to_smc(adev
, si_pi
->state_table_start
,
5242 (u8
*)table
, sizeof(SISLANDS_SMC_STATETABLE
),
5246 static int si_calculate_sclk_params(struct amdgpu_device
*adev
,
5248 SISLANDS_SMC_SCLK_VALUE
*sclk
)
5250 struct rv7xx_power_info
*pi
= rv770_get_pi(adev
);
5251 struct si_power_info
*si_pi
= si_get_pi(adev
);
5252 struct atom_clock_dividers dividers
;
5253 u32 spll_func_cntl
= si_pi
->clock_registers
.cg_spll_func_cntl
;
5254 u32 spll_func_cntl_2
= si_pi
->clock_registers
.cg_spll_func_cntl_2
;
5255 u32 spll_func_cntl_3
= si_pi
->clock_registers
.cg_spll_func_cntl_3
;
5256 u32 spll_func_cntl_4
= si_pi
->clock_registers
.cg_spll_func_cntl_4
;
5257 u32 cg_spll_spread_spectrum
= si_pi
->clock_registers
.cg_spll_spread_spectrum
;
5258 u32 cg_spll_spread_spectrum_2
= si_pi
->clock_registers
.cg_spll_spread_spectrum_2
;
5260 u32 reference_clock
= adev
->clock
.spll
.reference_freq
;
5261 u32 reference_divider
;
5265 ret
= amdgpu_atombios_get_clock_dividers(adev
, COMPUTE_ENGINE_PLL_PARAM
,
5266 engine_clock
, false, ÷rs
);
5270 reference_divider
= 1 + dividers
.ref_div
;
5272 tmp
= (u64
) engine_clock
* reference_divider
* dividers
.post_div
* 16384;
5273 do_div(tmp
, reference_clock
);
5276 spll_func_cntl
&= ~(SPLL_PDIV_A_MASK
| SPLL_REF_DIV_MASK
);
5277 spll_func_cntl
|= SPLL_REF_DIV(dividers
.ref_div
);
5278 spll_func_cntl
|= SPLL_PDIV_A(dividers
.post_div
);
5280 spll_func_cntl_2
&= ~SCLK_MUX_SEL_MASK
;
5281 spll_func_cntl_2
|= SCLK_MUX_SEL(2);
5283 spll_func_cntl_3
&= ~SPLL_FB_DIV_MASK
;
5284 spll_func_cntl_3
|= SPLL_FB_DIV(fbdiv
);
5285 spll_func_cntl_3
|= SPLL_DITHEN
;
5288 struct amdgpu_atom_ss ss
;
5289 u32 vco_freq
= engine_clock
* dividers
.post_div
;
5291 if (amdgpu_atombios_get_asic_ss_info(adev
, &ss
,
5292 ASIC_INTERNAL_ENGINE_SS
, vco_freq
)) {
5293 u32 clk_s
= reference_clock
* 5 / (reference_divider
* ss
.rate
);
5294 u32 clk_v
= 4 * ss
.percentage
* fbdiv
/ (clk_s
* 10000);
5296 cg_spll_spread_spectrum
&= ~CLK_S_MASK
;
5297 cg_spll_spread_spectrum
|= CLK_S(clk_s
);
5298 cg_spll_spread_spectrum
|= SSEN
;
5300 cg_spll_spread_spectrum_2
&= ~CLK_V_MASK
;
5301 cg_spll_spread_spectrum_2
|= CLK_V(clk_v
);
5305 sclk
->sclk_value
= engine_clock
;
5306 sclk
->vCG_SPLL_FUNC_CNTL
= spll_func_cntl
;
5307 sclk
->vCG_SPLL_FUNC_CNTL_2
= spll_func_cntl_2
;
5308 sclk
->vCG_SPLL_FUNC_CNTL_3
= spll_func_cntl_3
;
5309 sclk
->vCG_SPLL_FUNC_CNTL_4
= spll_func_cntl_4
;
5310 sclk
->vCG_SPLL_SPREAD_SPECTRUM
= cg_spll_spread_spectrum
;
5311 sclk
->vCG_SPLL_SPREAD_SPECTRUM_2
= cg_spll_spread_spectrum_2
;
5316 static int si_populate_sclk_value(struct amdgpu_device
*adev
,
5318 SISLANDS_SMC_SCLK_VALUE
*sclk
)
5320 SISLANDS_SMC_SCLK_VALUE sclk_tmp
;
5323 ret
= si_calculate_sclk_params(adev
, engine_clock
, &sclk_tmp
);
5325 sclk
->sclk_value
= cpu_to_be32(sclk_tmp
.sclk_value
);
5326 sclk
->vCG_SPLL_FUNC_CNTL
= cpu_to_be32(sclk_tmp
.vCG_SPLL_FUNC_CNTL
);
5327 sclk
->vCG_SPLL_FUNC_CNTL_2
= cpu_to_be32(sclk_tmp
.vCG_SPLL_FUNC_CNTL_2
);
5328 sclk
->vCG_SPLL_FUNC_CNTL_3
= cpu_to_be32(sclk_tmp
.vCG_SPLL_FUNC_CNTL_3
);
5329 sclk
->vCG_SPLL_FUNC_CNTL_4
= cpu_to_be32(sclk_tmp
.vCG_SPLL_FUNC_CNTL_4
);
5330 sclk
->vCG_SPLL_SPREAD_SPECTRUM
= cpu_to_be32(sclk_tmp
.vCG_SPLL_SPREAD_SPECTRUM
);
5331 sclk
->vCG_SPLL_SPREAD_SPECTRUM_2
= cpu_to_be32(sclk_tmp
.vCG_SPLL_SPREAD_SPECTRUM_2
);
5337 static int si_populate_mclk_value(struct amdgpu_device
*adev
,
5340 SISLANDS_SMC_MCLK_VALUE
*mclk
,
5344 struct rv7xx_power_info
*pi
= rv770_get_pi(adev
);
5345 struct si_power_info
*si_pi
= si_get_pi(adev
);
5346 u32 dll_cntl
= si_pi
->clock_registers
.dll_cntl
;
5347 u32 mclk_pwrmgt_cntl
= si_pi
->clock_registers
.mclk_pwrmgt_cntl
;
5348 u32 mpll_ad_func_cntl
= si_pi
->clock_registers
.mpll_ad_func_cntl
;
5349 u32 mpll_dq_func_cntl
= si_pi
->clock_registers
.mpll_dq_func_cntl
;
5350 u32 mpll_func_cntl
= si_pi
->clock_registers
.mpll_func_cntl
;
5351 u32 mpll_func_cntl_1
= si_pi
->clock_registers
.mpll_func_cntl_1
;
5352 u32 mpll_func_cntl_2
= si_pi
->clock_registers
.mpll_func_cntl_2
;
5353 u32 mpll_ss1
= si_pi
->clock_registers
.mpll_ss1
;
5354 u32 mpll_ss2
= si_pi
->clock_registers
.mpll_ss2
;
5355 struct atom_mpll_param mpll_param
;
5358 ret
= amdgpu_atombios_get_memory_pll_dividers(adev
, memory_clock
, strobe_mode
, &mpll_param
);
5362 mpll_func_cntl
&= ~BWCTRL_MASK
;
5363 mpll_func_cntl
|= BWCTRL(mpll_param
.bwcntl
);
5365 mpll_func_cntl_1
&= ~(CLKF_MASK
| CLKFRAC_MASK
| VCO_MODE_MASK
);
5366 mpll_func_cntl_1
|= CLKF(mpll_param
.clkf
) |
5367 CLKFRAC(mpll_param
.clkfrac
) | VCO_MODE(mpll_param
.vco_mode
);
5369 mpll_ad_func_cntl
&= ~YCLK_POST_DIV_MASK
;
5370 mpll_ad_func_cntl
|= YCLK_POST_DIV(mpll_param
.post_div
);
5372 if (adev
->mc
.vram_type
== AMDGPU_VRAM_TYPE_GDDR5
) {
5373 mpll_dq_func_cntl
&= ~(YCLK_SEL_MASK
| YCLK_POST_DIV_MASK
);
5374 mpll_dq_func_cntl
|= YCLK_SEL(mpll_param
.yclk_sel
) |
5375 YCLK_POST_DIV(mpll_param
.post_div
);
5379 struct amdgpu_atom_ss ss
;
5382 u32 reference_clock
= adev
->clock
.mpll
.reference_freq
;
5384 if (adev
->mc
.vram_type
== AMDGPU_VRAM_TYPE_GDDR5
)
5385 freq_nom
= memory_clock
* 4;
5387 freq_nom
= memory_clock
* 2;
5389 tmp
= freq_nom
/ reference_clock
;
5391 if (amdgpu_atombios_get_asic_ss_info(adev
, &ss
,
5392 ASIC_INTERNAL_MEMORY_SS
, freq_nom
)) {
5393 u32 clks
= reference_clock
* 5 / ss
.rate
;
5394 u32 clkv
= (u32
)((((131 * ss
.percentage
* ss
.rate
) / 100) * tmp
) / freq_nom
);
5396 mpll_ss1
&= ~CLKV_MASK
;
5397 mpll_ss1
|= CLKV(clkv
);
5399 mpll_ss2
&= ~CLKS_MASK
;
5400 mpll_ss2
|= CLKS(clks
);
5404 mclk_pwrmgt_cntl
&= ~DLL_SPEED_MASK
;
5405 mclk_pwrmgt_cntl
|= DLL_SPEED(mpll_param
.dll_speed
);
5408 mclk_pwrmgt_cntl
|= MRDCK0_PDNB
| MRDCK1_PDNB
;
5410 mclk_pwrmgt_cntl
&= ~(MRDCK0_PDNB
| MRDCK1_PDNB
);
5412 mclk
->mclk_value
= cpu_to_be32(memory_clock
);
5413 mclk
->vMPLL_FUNC_CNTL
= cpu_to_be32(mpll_func_cntl
);
5414 mclk
->vMPLL_FUNC_CNTL_1
= cpu_to_be32(mpll_func_cntl_1
);
5415 mclk
->vMPLL_FUNC_CNTL_2
= cpu_to_be32(mpll_func_cntl_2
);
5416 mclk
->vMPLL_AD_FUNC_CNTL
= cpu_to_be32(mpll_ad_func_cntl
);
5417 mclk
->vMPLL_DQ_FUNC_CNTL
= cpu_to_be32(mpll_dq_func_cntl
);
5418 mclk
->vMCLK_PWRMGT_CNTL
= cpu_to_be32(mclk_pwrmgt_cntl
);
5419 mclk
->vDLL_CNTL
= cpu_to_be32(dll_cntl
);
5420 mclk
->vMPLL_SS
= cpu_to_be32(mpll_ss1
);
5421 mclk
->vMPLL_SS2
= cpu_to_be32(mpll_ss2
);
5426 static void si_populate_smc_sp(struct amdgpu_device
*adev
,
5427 struct amdgpu_ps
*amdgpu_state
,
5428 SISLANDS_SMC_SWSTATE
*smc_state
)
5430 struct si_ps
*ps
= si_get_ps(amdgpu_state
);
5431 struct rv7xx_power_info
*pi
= rv770_get_pi(adev
);
5434 for (i
= 0; i
< ps
->performance_level_count
- 1; i
++)
5435 smc_state
->levels
[i
].bSP
= cpu_to_be32(pi
->dsp
);
5437 smc_state
->levels
[ps
->performance_level_count
- 1].bSP
=
5438 cpu_to_be32(pi
->psp
);
5441 static int si_convert_power_level_to_smc(struct amdgpu_device
*adev
,
5442 struct rv7xx_pl
*pl
,
5443 SISLANDS_SMC_HW_PERFORMANCE_LEVEL
*level
)
5445 struct rv7xx_power_info
*pi
= rv770_get_pi(adev
);
5446 struct evergreen_power_info
*eg_pi
= evergreen_get_pi(adev
);
5447 struct si_power_info
*si_pi
= si_get_pi(adev
);
5451 bool gmc_pg
= false;
5453 if (eg_pi
->pcie_performance_request
&&
5454 (si_pi
->force_pcie_gen
!= AMDGPU_PCIE_GEN_INVALID
))
5455 level
->gen2PCIE
= (u8
)si_pi
->force_pcie_gen
;
5457 level
->gen2PCIE
= (u8
)pl
->pcie_gen
;
5459 ret
= si_populate_sclk_value(adev
, pl
->sclk
, &level
->sclk
);
5465 if (pi
->mclk_stutter_mode_threshold
&&
5466 (pl
->mclk
<= pi
->mclk_stutter_mode_threshold
) &&
5467 !eg_pi
->uvd_enabled
&&
5468 (RREG32(DPG_PIPE_STUTTER_CONTROL
) & STUTTER_ENABLE
) &&
5469 (adev
->pm
.dpm
.new_active_crtc_count
<= 2)) {
5470 level
->mcFlags
|= SISLANDS_SMC_MC_STUTTER_EN
;
5473 level
->mcFlags
|= SISLANDS_SMC_MC_PG_EN
;
5476 if (adev
->mc
.vram_type
== AMDGPU_VRAM_TYPE_GDDR5
) {
5477 if (pl
->mclk
> pi
->mclk_edc_enable_threshold
)
5478 level
->mcFlags
|= SISLANDS_SMC_MC_EDC_RD_FLAG
;
5480 if (pl
->mclk
> eg_pi
->mclk_edc_wr_enable_threshold
)
5481 level
->mcFlags
|= SISLANDS_SMC_MC_EDC_WR_FLAG
;
5483 level
->strobeMode
= si_get_strobe_mode_settings(adev
, pl
->mclk
);
5485 if (level
->strobeMode
& SISLANDS_SMC_STROBE_ENABLE
) {
5486 if (si_get_mclk_frequency_ratio(pl
->mclk
, true) >=
5487 ((RREG32(MC_SEQ_MISC7
) >> 16) & 0xf))
5488 dll_state_on
= ((RREG32(MC_SEQ_MISC5
) >> 1) & 0x1) ? true : false;
5490 dll_state_on
= ((RREG32(MC_SEQ_MISC6
) >> 1) & 0x1) ? true : false;
5492 dll_state_on
= false;
5495 level
->strobeMode
= si_get_strobe_mode_settings(adev
,
5498 dll_state_on
= ((RREG32(MC_SEQ_MISC5
) >> 1) & 0x1) ? true : false;
5501 ret
= si_populate_mclk_value(adev
,
5505 (level
->strobeMode
& SISLANDS_SMC_STROBE_ENABLE
) != 0, dll_state_on
);
5509 ret
= si_populate_voltage_value(adev
,
5510 &eg_pi
->vddc_voltage_table
,
5511 pl
->vddc
, &level
->vddc
);
5516 ret
= si_get_std_voltage_value(adev
, &level
->vddc
, &std_vddc
);
5520 ret
= si_populate_std_voltage_value(adev
, std_vddc
,
5521 level
->vddc
.index
, &level
->std_vddc
);
5525 if (eg_pi
->vddci_control
) {
5526 ret
= si_populate_voltage_value(adev
, &eg_pi
->vddci_voltage_table
,
5527 pl
->vddci
, &level
->vddci
);
5532 if (si_pi
->vddc_phase_shed_control
) {
5533 ret
= si_populate_phase_shedding_value(adev
,
5534 &adev
->pm
.dpm
.dyn_state
.phase_shedding_limits_table
,
5543 level
->MaxPoweredUpCU
= si_pi
->max_cu
;
5545 ret
= si_populate_mvdd_value(adev
, pl
->mclk
, &level
->mvdd
);
5550 static int si_populate_smc_t(struct amdgpu_device
*adev
,
5551 struct amdgpu_ps
*amdgpu_state
,
5552 SISLANDS_SMC_SWSTATE
*smc_state
)
5554 struct rv7xx_power_info
*pi
= rv770_get_pi(adev
);
5555 struct si_ps
*state
= si_get_ps(amdgpu_state
);
5561 if (state
->performance_level_count
>= 9)
5564 if (state
->performance_level_count
< 2) {
5565 a_t
= CG_R(0xffff) | CG_L(0);
5566 smc_state
->levels
[0].aT
= cpu_to_be32(a_t
);
5570 smc_state
->levels
[0].aT
= cpu_to_be32(0);
5572 for (i
= 0; i
<= state
->performance_level_count
- 2; i
++) {
5573 ret
= r600_calculate_at(
5574 (50 / SISLANDS_MAX_HARDWARE_POWERLEVELS
) * 100 * (i
+ 1),
5576 state
->performance_levels
[i
+ 1].sclk
,
5577 state
->performance_levels
[i
].sclk
,
5582 t_h
= (i
+ 1) * 1000 - 50 * R600_AH_DFLT
;
5583 t_l
= (i
+ 1) * 1000 + 50 * R600_AH_DFLT
;
5586 a_t
= be32_to_cpu(smc_state
->levels
[i
].aT
) & ~CG_R_MASK
;
5587 a_t
|= CG_R(t_l
* pi
->bsp
/ 20000);
5588 smc_state
->levels
[i
].aT
= cpu_to_be32(a_t
);
5590 high_bsp
= (i
== state
->performance_level_count
- 2) ?
5592 a_t
= CG_R(0xffff) | CG_L(t_h
* high_bsp
/ 20000);
5593 smc_state
->levels
[i
+ 1].aT
= cpu_to_be32(a_t
);
5599 static int si_disable_ulv(struct amdgpu_device
*adev
)
5601 struct si_power_info
*si_pi
= si_get_pi(adev
);
5602 struct si_ulv_param
*ulv
= &si_pi
->ulv
;
5605 return (amdgpu_si_send_msg_to_smc(adev
, PPSMC_MSG_DisableULV
) == PPSMC_Result_OK
) ?
5611 static bool si_is_state_ulv_compatible(struct amdgpu_device
*adev
,
5612 struct amdgpu_ps
*amdgpu_state
)
5614 const struct si_power_info
*si_pi
= si_get_pi(adev
);
5615 const struct si_ulv_param
*ulv
= &si_pi
->ulv
;
5616 const struct si_ps
*state
= si_get_ps(amdgpu_state
);
5619 if (state
->performance_levels
[0].mclk
!= ulv
->pl
.mclk
)
5622 /* XXX validate against display requirements! */
5624 for (i
= 0; i
< adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_dispclk
.count
; i
++) {
5625 if (adev
->clock
.current_dispclk
<=
5626 adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_dispclk
.entries
[i
].clk
) {
5628 adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_dispclk
.entries
[i
].v
)
5633 if ((amdgpu_state
->vclk
!= 0) || (amdgpu_state
->dclk
!= 0))
5639 static int si_set_power_state_conditionally_enable_ulv(struct amdgpu_device
*adev
,
5640 struct amdgpu_ps
*amdgpu_new_state
)
5642 const struct si_power_info
*si_pi
= si_get_pi(adev
);
5643 const struct si_ulv_param
*ulv
= &si_pi
->ulv
;
5645 if (ulv
->supported
) {
5646 if (si_is_state_ulv_compatible(adev
, amdgpu_new_state
))
5647 return (amdgpu_si_send_msg_to_smc(adev
, PPSMC_MSG_EnableULV
) == PPSMC_Result_OK
) ?
5653 static int si_convert_power_state_to_smc(struct amdgpu_device
*adev
,
5654 struct amdgpu_ps
*amdgpu_state
,
5655 SISLANDS_SMC_SWSTATE
*smc_state
)
5657 struct evergreen_power_info
*eg_pi
= evergreen_get_pi(adev
);
5658 struct ni_power_info
*ni_pi
= ni_get_pi(adev
);
5659 struct si_power_info
*si_pi
= si_get_pi(adev
);
5660 struct si_ps
*state
= si_get_ps(amdgpu_state
);
5663 u32 sclk_in_sr
= 1350; /* ??? */
5665 if (state
->performance_level_count
> SISLANDS_MAX_HARDWARE_POWERLEVELS
)
5668 threshold
= state
->performance_levels
[state
->performance_level_count
-1].sclk
* 100 / 100;
5670 if (amdgpu_state
->vclk
&& amdgpu_state
->dclk
) {
5671 eg_pi
->uvd_enabled
= true;
5672 if (eg_pi
->smu_uvd_hs
)
5673 smc_state
->flags
|= PPSMC_SWSTATE_FLAG_UVD
;
5675 eg_pi
->uvd_enabled
= false;
5678 if (state
->dc_compatible
)
5679 smc_state
->flags
|= PPSMC_SWSTATE_FLAG_DC
;
5681 smc_state
->levelCount
= 0;
5682 for (i
= 0; i
< state
->performance_level_count
; i
++) {
5683 if (eg_pi
->sclk_deep_sleep
) {
5684 if ((i
== 0) || si_pi
->sclk_deep_sleep_above_low
) {
5685 if (sclk_in_sr
<= SCLK_MIN_DEEPSLEEP_FREQ
)
5686 smc_state
->levels
[i
].stateFlags
|= PPSMC_STATEFLAG_DEEPSLEEP_BYPASS
;
5688 smc_state
->levels
[i
].stateFlags
|= PPSMC_STATEFLAG_DEEPSLEEP_THROTTLE
;
5692 ret
= si_convert_power_level_to_smc(adev
, &state
->performance_levels
[i
],
5693 &smc_state
->levels
[i
]);
5694 smc_state
->levels
[i
].arbRefreshState
=
5695 (u8
)(SISLANDS_DRIVER_STATE_ARB_INDEX
+ i
);
5700 if (ni_pi
->enable_power_containment
)
5701 smc_state
->levels
[i
].displayWatermark
=
5702 (state
->performance_levels
[i
].sclk
< threshold
) ?
5703 PPSMC_DISPLAY_WATERMARK_LOW
: PPSMC_DISPLAY_WATERMARK_HIGH
;
5705 smc_state
->levels
[i
].displayWatermark
= (i
< 2) ?
5706 PPSMC_DISPLAY_WATERMARK_LOW
: PPSMC_DISPLAY_WATERMARK_HIGH
;
5708 if (eg_pi
->dynamic_ac_timing
)
5709 smc_state
->levels
[i
].ACIndex
= SISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT
+ i
;
5711 smc_state
->levels
[i
].ACIndex
= 0;
5713 smc_state
->levelCount
++;
5716 si_write_smc_soft_register(adev
,
5717 SI_SMC_SOFT_REGISTER_watermark_threshold
,
5720 si_populate_smc_sp(adev
, amdgpu_state
, smc_state
);
5722 ret
= si_populate_power_containment_values(adev
, amdgpu_state
, smc_state
);
5724 ni_pi
->enable_power_containment
= false;
5726 ret
= si_populate_sq_ramping_values(adev
, amdgpu_state
, smc_state
);
5728 ni_pi
->enable_sq_ramping
= false;
5730 return si_populate_smc_t(adev
, amdgpu_state
, smc_state
);
5733 static int si_upload_sw_state(struct amdgpu_device
*adev
,
5734 struct amdgpu_ps
*amdgpu_new_state
)
5736 struct si_power_info
*si_pi
= si_get_pi(adev
);
5737 struct si_ps
*new_state
= si_get_ps(amdgpu_new_state
);
5739 u32 address
= si_pi
->state_table_start
+
5740 offsetof(SISLANDS_SMC_STATETABLE
, driverState
);
5741 u32 state_size
= sizeof(SISLANDS_SMC_SWSTATE
) +
5742 ((new_state
->performance_level_count
- 1) *
5743 sizeof(SISLANDS_SMC_HW_PERFORMANCE_LEVEL
));
5744 SISLANDS_SMC_SWSTATE
*smc_state
= &si_pi
->smc_statetable
.driverState
;
5746 memset(smc_state
, 0, state_size
);
5748 ret
= si_convert_power_state_to_smc(adev
, amdgpu_new_state
, smc_state
);
5752 return amdgpu_si_copy_bytes_to_smc(adev
, address
, (u8
*)smc_state
,
5753 state_size
, si_pi
->sram_end
);
5756 static int si_upload_ulv_state(struct amdgpu_device
*adev
)
5758 struct si_power_info
*si_pi
= si_get_pi(adev
);
5759 struct si_ulv_param
*ulv
= &si_pi
->ulv
;
5762 if (ulv
->supported
&& ulv
->pl
.vddc
) {
5763 u32 address
= si_pi
->state_table_start
+
5764 offsetof(SISLANDS_SMC_STATETABLE
, ULVState
);
5765 SISLANDS_SMC_SWSTATE
*smc_state
= &si_pi
->smc_statetable
.ULVState
;
5766 u32 state_size
= sizeof(SISLANDS_SMC_SWSTATE
);
5768 memset(smc_state
, 0, state_size
);
5770 ret
= si_populate_ulv_state(adev
, smc_state
);
5772 ret
= amdgpu_si_copy_bytes_to_smc(adev
, address
, (u8
*)smc_state
,
5773 state_size
, si_pi
->sram_end
);
5779 static int si_upload_smc_data(struct amdgpu_device
*adev
)
5781 struct amdgpu_crtc
*amdgpu_crtc
= NULL
;
5784 if (adev
->pm
.dpm
.new_active_crtc_count
== 0)
5787 for (i
= 0; i
< adev
->mode_info
.num_crtc
; i
++) {
5788 if (adev
->pm
.dpm
.new_active_crtcs
& (1 << i
)) {
5789 amdgpu_crtc
= adev
->mode_info
.crtcs
[i
];
5794 if (amdgpu_crtc
== NULL
)
5797 if (amdgpu_crtc
->line_time
<= 0)
5800 if (si_write_smc_soft_register(adev
,
5801 SI_SMC_SOFT_REGISTER_crtc_index
,
5802 amdgpu_crtc
->crtc_id
) != PPSMC_Result_OK
)
5805 if (si_write_smc_soft_register(adev
,
5806 SI_SMC_SOFT_REGISTER_mclk_change_block_cp_min
,
5807 amdgpu_crtc
->wm_high
/ amdgpu_crtc
->line_time
) != PPSMC_Result_OK
)
5810 if (si_write_smc_soft_register(adev
,
5811 SI_SMC_SOFT_REGISTER_mclk_change_block_cp_max
,
5812 amdgpu_crtc
->wm_low
/ amdgpu_crtc
->line_time
) != PPSMC_Result_OK
)
5818 static int si_set_mc_special_registers(struct amdgpu_device
*adev
,
5819 struct si_mc_reg_table
*table
)
5824 for (i
= 0, j
= table
->last
; i
< table
->last
; i
++) {
5825 if (j
>= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE
)
5827 switch (table
->mc_reg_address
[i
].s1
) {
5829 temp_reg
= RREG32(MC_PMG_CMD_EMRS
);
5830 table
->mc_reg_address
[j
].s1
= MC_PMG_CMD_EMRS
;
5831 table
->mc_reg_address
[j
].s0
= MC_SEQ_PMG_CMD_EMRS_LP
;
5832 for (k
= 0; k
< table
->num_entries
; k
++)
5833 table
->mc_reg_table_entry
[k
].mc_data
[j
] =
5834 ((temp_reg
& 0xffff0000)) |
5835 ((table
->mc_reg_table_entry
[k
].mc_data
[i
] & 0xffff0000) >> 16);
5837 if (j
>= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE
)
5840 temp_reg
= RREG32(MC_PMG_CMD_MRS
);
5841 table
->mc_reg_address
[j
].s1
= MC_PMG_CMD_MRS
;
5842 table
->mc_reg_address
[j
].s0
= MC_SEQ_PMG_CMD_MRS_LP
;
5843 for (k
= 0; k
< table
->num_entries
; k
++) {
5844 table
->mc_reg_table_entry
[k
].mc_data
[j
] =
5845 (temp_reg
& 0xffff0000) |
5846 (table
->mc_reg_table_entry
[k
].mc_data
[i
] & 0x0000ffff);
5847 if (adev
->mc
.vram_type
!= AMDGPU_VRAM_TYPE_GDDR5
)
5848 table
->mc_reg_table_entry
[k
].mc_data
[j
] |= 0x100;
5851 if (j
>= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE
)
5854 if (adev
->mc
.vram_type
!= AMDGPU_VRAM_TYPE_GDDR5
) {
5855 table
->mc_reg_address
[j
].s1
= MC_PMG_AUTO_CMD
;
5856 table
->mc_reg_address
[j
].s0
= MC_PMG_AUTO_CMD
;
5857 for (k
= 0; k
< table
->num_entries
; k
++)
5858 table
->mc_reg_table_entry
[k
].mc_data
[j
] =
5859 (table
->mc_reg_table_entry
[k
].mc_data
[i
] & 0xffff0000) >> 16;
5861 if (j
>= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE
)
5865 case MC_SEQ_RESERVE_M
:
5866 temp_reg
= RREG32(MC_PMG_CMD_MRS1
);
5867 table
->mc_reg_address
[j
].s1
= MC_PMG_CMD_MRS1
;
5868 table
->mc_reg_address
[j
].s0
= MC_SEQ_PMG_CMD_MRS1_LP
;
5869 for(k
= 0; k
< table
->num_entries
; k
++)
5870 table
->mc_reg_table_entry
[k
].mc_data
[j
] =
5871 (temp_reg
& 0xffff0000) |
5872 (table
->mc_reg_table_entry
[k
].mc_data
[i
] & 0x0000ffff);
5874 if (j
>= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE
)
5887 static bool si_check_s0_mc_reg_index(u16 in_reg
, u16
*out_reg
)
5891 case MC_SEQ_RAS_TIMING
:
5892 *out_reg
= MC_SEQ_RAS_TIMING_LP
;
5894 case MC_SEQ_CAS_TIMING
:
5895 *out_reg
= MC_SEQ_CAS_TIMING_LP
;
5897 case MC_SEQ_MISC_TIMING
:
5898 *out_reg
= MC_SEQ_MISC_TIMING_LP
;
5900 case MC_SEQ_MISC_TIMING2
:
5901 *out_reg
= MC_SEQ_MISC_TIMING2_LP
;
5903 case MC_SEQ_RD_CTL_D0
:
5904 *out_reg
= MC_SEQ_RD_CTL_D0_LP
;
5906 case MC_SEQ_RD_CTL_D1
:
5907 *out_reg
= MC_SEQ_RD_CTL_D1_LP
;
5909 case MC_SEQ_WR_CTL_D0
:
5910 *out_reg
= MC_SEQ_WR_CTL_D0_LP
;
5912 case MC_SEQ_WR_CTL_D1
:
5913 *out_reg
= MC_SEQ_WR_CTL_D1_LP
;
5915 case MC_PMG_CMD_EMRS
:
5916 *out_reg
= MC_SEQ_PMG_CMD_EMRS_LP
;
5918 case MC_PMG_CMD_MRS
:
5919 *out_reg
= MC_SEQ_PMG_CMD_MRS_LP
;
5921 case MC_PMG_CMD_MRS1
:
5922 *out_reg
= MC_SEQ_PMG_CMD_MRS1_LP
;
5924 case MC_SEQ_PMG_TIMING
:
5925 *out_reg
= MC_SEQ_PMG_TIMING_LP
;
5927 case MC_PMG_CMD_MRS2
:
5928 *out_reg
= MC_SEQ_PMG_CMD_MRS2_LP
;
5930 case MC_SEQ_WR_CTL_2
:
5931 *out_reg
= MC_SEQ_WR_CTL_2_LP
;
5941 static void si_set_valid_flag(struct si_mc_reg_table
*table
)
5945 for (i
= 0; i
< table
->last
; i
++) {
5946 for (j
= 1; j
< table
->num_entries
; j
++) {
5947 if (table
->mc_reg_table_entry
[j
-1].mc_data
[i
] != table
->mc_reg_table_entry
[j
].mc_data
[i
]) {
5948 table
->valid_flag
|= 1 << i
;
5955 static void si_set_s0_mc_reg_index(struct si_mc_reg_table
*table
)
5960 for (i
= 0; i
< table
->last
; i
++)
5961 table
->mc_reg_address
[i
].s0
= si_check_s0_mc_reg_index(table
->mc_reg_address
[i
].s1
, &address
) ?
5962 address
: table
->mc_reg_address
[i
].s1
;
5966 static int si_copy_vbios_mc_reg_table(struct atom_mc_reg_table
*table
,
5967 struct si_mc_reg_table
*si_table
)
5971 if (table
->last
> SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE
)
5973 if (table
->num_entries
> MAX_AC_TIMING_ENTRIES
)
5976 for (i
= 0; i
< table
->last
; i
++)
5977 si_table
->mc_reg_address
[i
].s1
= table
->mc_reg_address
[i
].s1
;
5978 si_table
->last
= table
->last
;
5980 for (i
= 0; i
< table
->num_entries
; i
++) {
5981 si_table
->mc_reg_table_entry
[i
].mclk_max
=
5982 table
->mc_reg_table_entry
[i
].mclk_max
;
5983 for (j
= 0; j
< table
->last
; j
++) {
5984 si_table
->mc_reg_table_entry
[i
].mc_data
[j
] =
5985 table
->mc_reg_table_entry
[i
].mc_data
[j
];
5988 si_table
->num_entries
= table
->num_entries
;
5993 static int si_initialize_mc_reg_table(struct amdgpu_device
*adev
)
5995 struct si_power_info
*si_pi
= si_get_pi(adev
);
5996 struct atom_mc_reg_table
*table
;
5997 struct si_mc_reg_table
*si_table
= &si_pi
->mc_reg_table
;
5998 u8 module_index
= rv770_get_memory_module_index(adev
);
6001 table
= kzalloc(sizeof(struct atom_mc_reg_table
), GFP_KERNEL
);
6005 WREG32(MC_SEQ_RAS_TIMING_LP
, RREG32(MC_SEQ_RAS_TIMING
));
6006 WREG32(MC_SEQ_CAS_TIMING_LP
, RREG32(MC_SEQ_CAS_TIMING
));
6007 WREG32(MC_SEQ_MISC_TIMING_LP
, RREG32(MC_SEQ_MISC_TIMING
));
6008 WREG32(MC_SEQ_MISC_TIMING2_LP
, RREG32(MC_SEQ_MISC_TIMING2
));
6009 WREG32(MC_SEQ_PMG_CMD_EMRS_LP
, RREG32(MC_PMG_CMD_EMRS
));
6010 WREG32(MC_SEQ_PMG_CMD_MRS_LP
, RREG32(MC_PMG_CMD_MRS
));
6011 WREG32(MC_SEQ_PMG_CMD_MRS1_LP
, RREG32(MC_PMG_CMD_MRS1
));
6012 WREG32(MC_SEQ_WR_CTL_D0_LP
, RREG32(MC_SEQ_WR_CTL_D0
));
6013 WREG32(MC_SEQ_WR_CTL_D1_LP
, RREG32(MC_SEQ_WR_CTL_D1
));
6014 WREG32(MC_SEQ_RD_CTL_D0_LP
, RREG32(MC_SEQ_RD_CTL_D0
));
6015 WREG32(MC_SEQ_RD_CTL_D1_LP
, RREG32(MC_SEQ_RD_CTL_D1
));
6016 WREG32(MC_SEQ_PMG_TIMING_LP
, RREG32(MC_SEQ_PMG_TIMING
));
6017 WREG32(MC_SEQ_PMG_CMD_MRS2_LP
, RREG32(MC_PMG_CMD_MRS2
));
6018 WREG32(MC_SEQ_WR_CTL_2_LP
, RREG32(MC_SEQ_WR_CTL_2
));
6020 ret
= amdgpu_atombios_init_mc_reg_table(adev
, module_index
, table
);
6024 ret
= si_copy_vbios_mc_reg_table(table
, si_table
);
6028 si_set_s0_mc_reg_index(si_table
);
6030 ret
= si_set_mc_special_registers(adev
, si_table
);
6034 si_set_valid_flag(si_table
);
6043 static void si_populate_mc_reg_addresses(struct amdgpu_device
*adev
,
6044 SMC_SIslands_MCRegisters
*mc_reg_table
)
6046 struct si_power_info
*si_pi
= si_get_pi(adev
);
6049 for (i
= 0, j
= 0; j
< si_pi
->mc_reg_table
.last
; j
++) {
6050 if (si_pi
->mc_reg_table
.valid_flag
& (1 << j
)) {
6051 if (i
>= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE
)
6053 mc_reg_table
->address
[i
].s0
=
6054 cpu_to_be16(si_pi
->mc_reg_table
.mc_reg_address
[j
].s0
);
6055 mc_reg_table
->address
[i
].s1
=
6056 cpu_to_be16(si_pi
->mc_reg_table
.mc_reg_address
[j
].s1
);
6060 mc_reg_table
->last
= (u8
)i
;
6063 static void si_convert_mc_registers(const struct si_mc_reg_entry
*entry
,
6064 SMC_SIslands_MCRegisterSet
*data
,
6065 u32 num_entries
, u32 valid_flag
)
6069 for(i
= 0, j
= 0; j
< num_entries
; j
++) {
6070 if (valid_flag
& (1 << j
)) {
6071 data
->value
[i
] = cpu_to_be32(entry
->mc_data
[j
]);
6077 static void si_convert_mc_reg_table_entry_to_smc(struct amdgpu_device
*adev
,
6078 struct rv7xx_pl
*pl
,
6079 SMC_SIslands_MCRegisterSet
*mc_reg_table_data
)
6081 struct si_power_info
*si_pi
= si_get_pi(adev
);
6084 for (i
= 0; i
< si_pi
->mc_reg_table
.num_entries
; i
++) {
6085 if (pl
->mclk
<= si_pi
->mc_reg_table
.mc_reg_table_entry
[i
].mclk_max
)
6089 if ((i
== si_pi
->mc_reg_table
.num_entries
) && (i
> 0))
6092 si_convert_mc_registers(&si_pi
->mc_reg_table
.mc_reg_table_entry
[i
],
6093 mc_reg_table_data
, si_pi
->mc_reg_table
.last
,
6094 si_pi
->mc_reg_table
.valid_flag
);
6097 static void si_convert_mc_reg_table_to_smc(struct amdgpu_device
*adev
,
6098 struct amdgpu_ps
*amdgpu_state
,
6099 SMC_SIslands_MCRegisters
*mc_reg_table
)
6101 struct si_ps
*state
= si_get_ps(amdgpu_state
);
6104 for (i
= 0; i
< state
->performance_level_count
; i
++) {
6105 si_convert_mc_reg_table_entry_to_smc(adev
,
6106 &state
->performance_levels
[i
],
6107 &mc_reg_table
->data
[SISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT
+ i
]);
6111 static int si_populate_mc_reg_table(struct amdgpu_device
*adev
,
6112 struct amdgpu_ps
*amdgpu_boot_state
)
6114 struct si_ps
*boot_state
= si_get_ps(amdgpu_boot_state
);
6115 struct si_power_info
*si_pi
= si_get_pi(adev
);
6116 struct si_ulv_param
*ulv
= &si_pi
->ulv
;
6117 SMC_SIslands_MCRegisters
*smc_mc_reg_table
= &si_pi
->smc_mc_reg_table
;
6119 memset(smc_mc_reg_table
, 0, sizeof(SMC_SIslands_MCRegisters
));
6121 si_write_smc_soft_register(adev
, SI_SMC_SOFT_REGISTER_seq_index
, 1);
6123 si_populate_mc_reg_addresses(adev
, smc_mc_reg_table
);
6125 si_convert_mc_reg_table_entry_to_smc(adev
, &boot_state
->performance_levels
[0],
6126 &smc_mc_reg_table
->data
[SISLANDS_MCREGISTERTABLE_INITIAL_SLOT
]);
6128 si_convert_mc_registers(&si_pi
->mc_reg_table
.mc_reg_table_entry
[0],
6129 &smc_mc_reg_table
->data
[SISLANDS_MCREGISTERTABLE_ACPI_SLOT
],
6130 si_pi
->mc_reg_table
.last
,
6131 si_pi
->mc_reg_table
.valid_flag
);
6133 if (ulv
->supported
&& ulv
->pl
.vddc
!= 0)
6134 si_convert_mc_reg_table_entry_to_smc(adev
, &ulv
->pl
,
6135 &smc_mc_reg_table
->data
[SISLANDS_MCREGISTERTABLE_ULV_SLOT
]);
6137 si_convert_mc_registers(&si_pi
->mc_reg_table
.mc_reg_table_entry
[0],
6138 &smc_mc_reg_table
->data
[SISLANDS_MCREGISTERTABLE_ULV_SLOT
],
6139 si_pi
->mc_reg_table
.last
,
6140 si_pi
->mc_reg_table
.valid_flag
);
6142 si_convert_mc_reg_table_to_smc(adev
, amdgpu_boot_state
, smc_mc_reg_table
);
6144 return amdgpu_si_copy_bytes_to_smc(adev
, si_pi
->mc_reg_table_start
,
6145 (u8
*)smc_mc_reg_table
,
6146 sizeof(SMC_SIslands_MCRegisters
), si_pi
->sram_end
);
6149 static int si_upload_mc_reg_table(struct amdgpu_device
*adev
,
6150 struct amdgpu_ps
*amdgpu_new_state
)
6152 struct si_ps
*new_state
= si_get_ps(amdgpu_new_state
);
6153 struct si_power_info
*si_pi
= si_get_pi(adev
);
6154 u32 address
= si_pi
->mc_reg_table_start
+
6155 offsetof(SMC_SIslands_MCRegisters
,
6156 data
[SISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT
]);
6157 SMC_SIslands_MCRegisters
*smc_mc_reg_table
= &si_pi
->smc_mc_reg_table
;
6159 memset(smc_mc_reg_table
, 0, sizeof(SMC_SIslands_MCRegisters
));
6161 si_convert_mc_reg_table_to_smc(adev
, amdgpu_new_state
, smc_mc_reg_table
);
6163 return amdgpu_si_copy_bytes_to_smc(adev
, address
,
6164 (u8
*)&smc_mc_reg_table
->data
[SISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT
],
6165 sizeof(SMC_SIslands_MCRegisterSet
) * new_state
->performance_level_count
,
6169 static void si_enable_voltage_control(struct amdgpu_device
*adev
, bool enable
)
6172 WREG32_P(GENERAL_PWRMGT
, VOLT_PWRMGT_EN
, ~VOLT_PWRMGT_EN
);
6174 WREG32_P(GENERAL_PWRMGT
, 0, ~VOLT_PWRMGT_EN
);
6177 static enum amdgpu_pcie_gen
si_get_maximum_link_speed(struct amdgpu_device
*adev
,
6178 struct amdgpu_ps
*amdgpu_state
)
6180 struct si_ps
*state
= si_get_ps(amdgpu_state
);
6182 u16 pcie_speed
, max_speed
= 0;
6184 for (i
= 0; i
< state
->performance_level_count
; i
++) {
6185 pcie_speed
= state
->performance_levels
[i
].pcie_gen
;
6186 if (max_speed
< pcie_speed
)
6187 max_speed
= pcie_speed
;
6192 static u16
si_get_current_pcie_speed(struct amdgpu_device
*adev
)
6196 speed_cntl
= RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL
) & LC_CURRENT_DATA_RATE_MASK
;
6197 speed_cntl
>>= LC_CURRENT_DATA_RATE_SHIFT
;
6199 return (u16
)speed_cntl
;
6202 static void si_request_link_speed_change_before_state_change(struct amdgpu_device
*adev
,
6203 struct amdgpu_ps
*amdgpu_new_state
,
6204 struct amdgpu_ps
*amdgpu_current_state
)
6206 struct si_power_info
*si_pi
= si_get_pi(adev
);
6207 enum amdgpu_pcie_gen target_link_speed
= si_get_maximum_link_speed(adev
, amdgpu_new_state
);
6208 enum amdgpu_pcie_gen current_link_speed
;
6210 if (si_pi
->force_pcie_gen
== AMDGPU_PCIE_GEN_INVALID
)
6211 current_link_speed
= si_get_maximum_link_speed(adev
, amdgpu_current_state
);
6213 current_link_speed
= si_pi
->force_pcie_gen
;
6215 si_pi
->force_pcie_gen
= AMDGPU_PCIE_GEN_INVALID
;
6216 si_pi
->pspp_notify_required
= false;
6217 if (target_link_speed
> current_link_speed
) {
6218 switch (target_link_speed
) {
6219 #if defined(CONFIG_ACPI)
6220 case AMDGPU_PCIE_GEN3
:
6221 if (amdgpu_acpi_pcie_performance_request(adev
, PCIE_PERF_REQ_PECI_GEN3
, false) == 0)
6223 si_pi
->force_pcie_gen
= AMDGPU_PCIE_GEN2
;
6224 if (current_link_speed
== AMDGPU_PCIE_GEN2
)
6226 case AMDGPU_PCIE_GEN2
:
6227 if (amdgpu_acpi_pcie_performance_request(adev
, PCIE_PERF_REQ_PECI_GEN2
, false) == 0)
6231 si_pi
->force_pcie_gen
= si_get_current_pcie_speed(adev
);
6235 if (target_link_speed
< current_link_speed
)
6236 si_pi
->pspp_notify_required
= true;
6240 static void si_notify_link_speed_change_after_state_change(struct amdgpu_device
*adev
,
6241 struct amdgpu_ps
*amdgpu_new_state
,
6242 struct amdgpu_ps
*amdgpu_current_state
)
6244 struct si_power_info
*si_pi
= si_get_pi(adev
);
6245 enum amdgpu_pcie_gen target_link_speed
= si_get_maximum_link_speed(adev
, amdgpu_new_state
);
6248 if (si_pi
->pspp_notify_required
) {
6249 if (target_link_speed
== AMDGPU_PCIE_GEN3
)
6250 request
= PCIE_PERF_REQ_PECI_GEN3
;
6251 else if (target_link_speed
== AMDGPU_PCIE_GEN2
)
6252 request
= PCIE_PERF_REQ_PECI_GEN2
;
6254 request
= PCIE_PERF_REQ_PECI_GEN1
;
6256 if ((request
== PCIE_PERF_REQ_PECI_GEN1
) &&
6257 (si_get_current_pcie_speed(adev
) > 0))
6260 #if defined(CONFIG_ACPI)
6261 amdgpu_acpi_pcie_performance_request(adev
, request
, false);
6267 static int si_ds_request(struct amdgpu_device
*adev
,
6268 bool ds_status_on
, u32 count_write
)
6270 struct evergreen_power_info
*eg_pi
= evergreen_get_pi(adev
);
6272 if (eg_pi
->sclk_deep_sleep
) {
6274 return (amdgpu_si_send_msg_to_smc(adev
, PPSMC_MSG_CancelThrottleOVRDSCLKDS
) ==
6278 return (amdgpu_si_send_msg_to_smc(adev
, PPSMC_MSG_ThrottleOVRDSCLKDS
) ==
6279 PPSMC_Result_OK
) ? 0 : -EINVAL
;
6285 static void si_set_max_cu_value(struct amdgpu_device
*adev
)
6287 struct si_power_info
*si_pi
= si_get_pi(adev
);
6289 if (adev
->asic_type
== CHIP_VERDE
) {
6290 switch (adev
->pdev
->device
) {
6326 static int si_patch_single_dependency_table_based_on_leakage(struct amdgpu_device
*adev
,
6327 struct amdgpu_clock_voltage_dependency_table
*table
)
6331 u16 leakage_voltage
;
6334 for (i
= 0; i
< table
->count
; i
++) {
6335 switch (si_get_leakage_voltage_from_leakage_index(adev
,
6336 table
->entries
[i
].v
,
6337 &leakage_voltage
)) {
6339 table
->entries
[i
].v
= leakage_voltage
;
6349 for (j
= (table
->count
- 2); j
>= 0; j
--) {
6350 table
->entries
[j
].v
= (table
->entries
[j
].v
<= table
->entries
[j
+ 1].v
) ?
6351 table
->entries
[j
].v
: table
->entries
[j
+ 1].v
;
6357 static int si_patch_dependency_tables_based_on_leakage(struct amdgpu_device
*adev
)
6361 ret
= si_patch_single_dependency_table_based_on_leakage(adev
,
6362 &adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
);
6364 DRM_ERROR("Could not patch vddc_on_sclk leakage table\n");
6365 ret
= si_patch_single_dependency_table_based_on_leakage(adev
,
6366 &adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_mclk
);
6368 DRM_ERROR("Could not patch vddc_on_mclk leakage table\n");
6369 ret
= si_patch_single_dependency_table_based_on_leakage(adev
,
6370 &adev
->pm
.dpm
.dyn_state
.vddci_dependency_on_mclk
);
6372 DRM_ERROR("Could not patch vddci_on_mclk leakage table\n");
6376 static void si_set_pcie_lane_width_in_smc(struct amdgpu_device
*adev
,
6377 struct amdgpu_ps
*amdgpu_new_state
,
6378 struct amdgpu_ps
*amdgpu_current_state
)
6381 u32 new_lane_width
=
6382 (amdgpu_new_state
->caps
& ATOM_PPLIB_PCIE_LINK_WIDTH_MASK
) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT
;
6383 u32 current_lane_width
=
6384 (amdgpu_current_state
->caps
& ATOM_PPLIB_PCIE_LINK_WIDTH_MASK
) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT
;
6386 if (new_lane_width
!= current_lane_width
) {
6387 amdgpu_set_pcie_lanes(adev
, new_lane_width
);
6388 lane_width
= amdgpu_get_pcie_lanes(adev
);
6389 si_write_smc_soft_register(adev
, SI_SMC_SOFT_REGISTER_non_ulv_pcie_link_width
, lane_width
);
6393 static void si_dpm_setup_asic(struct amdgpu_device
*adev
)
6395 si_read_clock_registers(adev
);
6396 si_enable_acpi_power_management(adev
);
6399 static int si_thermal_enable_alert(struct amdgpu_device
*adev
,
6402 u32 thermal_int
= RREG32(CG_THERMAL_INT
);
6405 PPSMC_Result result
;
6407 thermal_int
&= ~(THERM_INT_MASK_HIGH
| THERM_INT_MASK_LOW
);
6408 WREG32(CG_THERMAL_INT
, thermal_int
);
6409 result
= amdgpu_si_send_msg_to_smc(adev
, PPSMC_MSG_EnableThermalInterrupt
);
6410 if (result
!= PPSMC_Result_OK
) {
6411 DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
6415 thermal_int
|= THERM_INT_MASK_HIGH
| THERM_INT_MASK_LOW
;
6416 WREG32(CG_THERMAL_INT
, thermal_int
);
6422 static int si_thermal_set_temperature_range(struct amdgpu_device
*adev
,
6423 int min_temp
, int max_temp
)
6425 int low_temp
= 0 * 1000;
6426 int high_temp
= 255 * 1000;
6428 if (low_temp
< min_temp
)
6429 low_temp
= min_temp
;
6430 if (high_temp
> max_temp
)
6431 high_temp
= max_temp
;
6432 if (high_temp
< low_temp
) {
6433 DRM_ERROR("invalid thermal range: %d - %d\n", low_temp
, high_temp
);
6437 WREG32_P(CG_THERMAL_INT
, DIG_THERM_INTH(high_temp
/ 1000), ~DIG_THERM_INTH_MASK
);
6438 WREG32_P(CG_THERMAL_INT
, DIG_THERM_INTL(low_temp
/ 1000), ~DIG_THERM_INTL_MASK
);
6439 WREG32_P(CG_THERMAL_CTRL
, DIG_THERM_DPM(high_temp
/ 1000), ~DIG_THERM_DPM_MASK
);
6441 adev
->pm
.dpm
.thermal
.min_temp
= low_temp
;
6442 adev
->pm
.dpm
.thermal
.max_temp
= high_temp
;
6447 static void si_fan_ctrl_set_static_mode(struct amdgpu_device
*adev
, u32 mode
)
6449 struct si_power_info
*si_pi
= si_get_pi(adev
);
6452 if (si_pi
->fan_ctrl_is_in_default_mode
) {
6453 tmp
= (RREG32(CG_FDO_CTRL2
) & FDO_PWM_MODE_MASK
) >> FDO_PWM_MODE_SHIFT
;
6454 si_pi
->fan_ctrl_default_mode
= tmp
;
6455 tmp
= (RREG32(CG_FDO_CTRL2
) & TMIN_MASK
) >> TMIN_SHIFT
;
6457 si_pi
->fan_ctrl_is_in_default_mode
= false;
6460 tmp
= RREG32(CG_FDO_CTRL2
) & ~TMIN_MASK
;
6462 WREG32(CG_FDO_CTRL2
, tmp
);
6464 tmp
= RREG32(CG_FDO_CTRL2
) & ~FDO_PWM_MODE_MASK
;
6465 tmp
|= FDO_PWM_MODE(mode
);
6466 WREG32(CG_FDO_CTRL2
, tmp
);
6469 static int si_thermal_setup_fan_table(struct amdgpu_device
*adev
)
6471 struct si_power_info
*si_pi
= si_get_pi(adev
);
6472 PP_SIslands_FanTable fan_table
= { FDO_MODE_HARDWARE
};
6474 u32 t_diff1
, t_diff2
, pwm_diff1
, pwm_diff2
;
6475 u16 fdo_min
, slope1
, slope2
;
6476 u32 reference_clock
, tmp
;
6480 if (!si_pi
->fan_table_start
) {
6481 adev
->pm
.dpm
.fan
.ucode_fan_control
= false;
6485 duty100
= (RREG32(CG_FDO_CTRL1
) & FMAX_DUTY100_MASK
) >> FMAX_DUTY100_SHIFT
;
6488 adev
->pm
.dpm
.fan
.ucode_fan_control
= false;
6492 tmp64
= (u64
)adev
->pm
.dpm
.fan
.pwm_min
* duty100
;
6493 do_div(tmp64
, 10000);
6494 fdo_min
= (u16
)tmp64
;
6496 t_diff1
= adev
->pm
.dpm
.fan
.t_med
- adev
->pm
.dpm
.fan
.t_min
;
6497 t_diff2
= adev
->pm
.dpm
.fan
.t_high
- adev
->pm
.dpm
.fan
.t_med
;
6499 pwm_diff1
= adev
->pm
.dpm
.fan
.pwm_med
- adev
->pm
.dpm
.fan
.pwm_min
;
6500 pwm_diff2
= adev
->pm
.dpm
.fan
.pwm_high
- adev
->pm
.dpm
.fan
.pwm_med
;
6502 slope1
= (u16
)((50 + ((16 * duty100
* pwm_diff1
) / t_diff1
)) / 100);
6503 slope2
= (u16
)((50 + ((16 * duty100
* pwm_diff2
) / t_diff2
)) / 100);
6505 fan_table
.temp_min
= cpu_to_be16((50 + adev
->pm
.dpm
.fan
.t_min
) / 100);
6506 fan_table
.temp_med
= cpu_to_be16((50 + adev
->pm
.dpm
.fan
.t_med
) / 100);
6507 fan_table
.temp_max
= cpu_to_be16((50 + adev
->pm
.dpm
.fan
.t_max
) / 100);
6508 fan_table
.slope1
= cpu_to_be16(slope1
);
6509 fan_table
.slope2
= cpu_to_be16(slope2
);
6510 fan_table
.fdo_min
= cpu_to_be16(fdo_min
);
6511 fan_table
.hys_down
= cpu_to_be16(adev
->pm
.dpm
.fan
.t_hyst
);
6512 fan_table
.hys_up
= cpu_to_be16(1);
6513 fan_table
.hys_slope
= cpu_to_be16(1);
6514 fan_table
.temp_resp_lim
= cpu_to_be16(5);
6515 reference_clock
= amdgpu_asic_get_xclk(adev
);
6517 fan_table
.refresh_period
= cpu_to_be32((adev
->pm
.dpm
.fan
.cycle_delay
*
6518 reference_clock
) / 1600);
6519 fan_table
.fdo_max
= cpu_to_be16((u16
)duty100
);
6521 tmp
= (RREG32(CG_MULT_THERMAL_CTRL
) & TEMP_SEL_MASK
) >> TEMP_SEL_SHIFT
;
6522 fan_table
.temp_src
= (uint8_t)tmp
;
6524 ret
= amdgpu_si_copy_bytes_to_smc(adev
,
6525 si_pi
->fan_table_start
,
6531 DRM_ERROR("Failed to load fan table to the SMC.");
6532 adev
->pm
.dpm
.fan
.ucode_fan_control
= false;
6538 static int si_fan_ctrl_start_smc_fan_control(struct amdgpu_device
*adev
)
6540 struct si_power_info
*si_pi
= si_get_pi(adev
);
6543 ret
= amdgpu_si_send_msg_to_smc(adev
, PPSMC_StartFanControl
);
6544 if (ret
== PPSMC_Result_OK
) {
6545 si_pi
->fan_is_controlled_by_smc
= true;
6552 static int si_fan_ctrl_stop_smc_fan_control(struct amdgpu_device
*adev
)
6554 struct si_power_info
*si_pi
= si_get_pi(adev
);
6557 ret
= amdgpu_si_send_msg_to_smc(adev
, PPSMC_StopFanControl
);
6559 if (ret
== PPSMC_Result_OK
) {
6560 si_pi
->fan_is_controlled_by_smc
= false;
6567 static int si_dpm_get_fan_speed_percent(struct amdgpu_device
*adev
,
6573 if (adev
->pm
.no_fan
)
6576 duty100
= (RREG32(CG_FDO_CTRL1
) & FMAX_DUTY100_MASK
) >> FMAX_DUTY100_SHIFT
;
6577 duty
= (RREG32(CG_THERMAL_STATUS
) & FDO_PWM_DUTY_MASK
) >> FDO_PWM_DUTY_SHIFT
;
6582 tmp64
= (u64
)duty
* 100;
6583 do_div(tmp64
, duty100
);
6584 *speed
= (u32
)tmp64
;
6592 static int si_dpm_set_fan_speed_percent(struct amdgpu_device
*adev
,
6595 struct si_power_info
*si_pi
= si_get_pi(adev
);
6600 if (adev
->pm
.no_fan
)
6603 if (si_pi
->fan_is_controlled_by_smc
)
6609 duty100
= (RREG32(CG_FDO_CTRL1
) & FMAX_DUTY100_MASK
) >> FMAX_DUTY100_SHIFT
;
6614 tmp64
= (u64
)speed
* duty100
;
6618 tmp
= RREG32(CG_FDO_CTRL0
) & ~FDO_STATIC_DUTY_MASK
;
6619 tmp
|= FDO_STATIC_DUTY(duty
);
6620 WREG32(CG_FDO_CTRL0
, tmp
);
6625 static void si_dpm_set_fan_control_mode(struct amdgpu_device
*adev
, u32 mode
)
6628 /* stop auto-manage */
6629 if (adev
->pm
.dpm
.fan
.ucode_fan_control
)
6630 si_fan_ctrl_stop_smc_fan_control(adev
);
6631 si_fan_ctrl_set_static_mode(adev
, mode
);
6633 /* restart auto-manage */
6634 if (adev
->pm
.dpm
.fan
.ucode_fan_control
)
6635 si_thermal_start_smc_fan_control(adev
);
6637 si_fan_ctrl_set_default_mode(adev
);
6641 static u32
si_dpm_get_fan_control_mode(struct amdgpu_device
*adev
)
6643 struct si_power_info
*si_pi
= si_get_pi(adev
);
6646 if (si_pi
->fan_is_controlled_by_smc
)
6649 tmp
= RREG32(CG_FDO_CTRL2
) & FDO_PWM_MODE_MASK
;
6650 return (tmp
>> FDO_PWM_MODE_SHIFT
);
6654 static int si_fan_ctrl_get_fan_speed_rpm(struct amdgpu_device
*adev
,
6658 u32 xclk
= amdgpu_asic_get_xclk(adev
);
6660 if (adev
->pm
.no_fan
)
6663 if (adev
->pm
.fan_pulses_per_revolution
== 0)
6666 tach_period
= (RREG32(CG_TACH_STATUS
) & TACH_PERIOD_MASK
) >> TACH_PERIOD_SHIFT
;
6667 if (tach_period
== 0)
6670 *speed
= 60 * xclk
* 10000 / tach_period
;
6675 static int si_fan_ctrl_set_fan_speed_rpm(struct amdgpu_device
*adev
,
6678 u32 tach_period
, tmp
;
6679 u32 xclk
= amdgpu_asic_get_xclk(adev
);
6681 if (adev
->pm
.no_fan
)
6684 if (adev
->pm
.fan_pulses_per_revolution
== 0)
6687 if ((speed
< adev
->pm
.fan_min_rpm
) ||
6688 (speed
> adev
->pm
.fan_max_rpm
))
6691 if (adev
->pm
.dpm
.fan
.ucode_fan_control
)
6692 si_fan_ctrl_stop_smc_fan_control(adev
);
6694 tach_period
= 60 * xclk
* 10000 / (8 * speed
);
6695 tmp
= RREG32(CG_TACH_CTRL
) & ~TARGET_PERIOD_MASK
;
6696 tmp
|= TARGET_PERIOD(tach_period
);
6697 WREG32(CG_TACH_CTRL
, tmp
);
6699 si_fan_ctrl_set_static_mode(adev
, FDO_PWM_MODE_STATIC_RPM
);
6705 static void si_fan_ctrl_set_default_mode(struct amdgpu_device
*adev
)
6707 struct si_power_info
*si_pi
= si_get_pi(adev
);
6710 if (!si_pi
->fan_ctrl_is_in_default_mode
) {
6711 tmp
= RREG32(CG_FDO_CTRL2
) & ~FDO_PWM_MODE_MASK
;
6712 tmp
|= FDO_PWM_MODE(si_pi
->fan_ctrl_default_mode
);
6713 WREG32(CG_FDO_CTRL2
, tmp
);
6715 tmp
= RREG32(CG_FDO_CTRL2
) & ~TMIN_MASK
;
6716 tmp
|= TMIN(si_pi
->t_min
);
6717 WREG32(CG_FDO_CTRL2
, tmp
);
6718 si_pi
->fan_ctrl_is_in_default_mode
= true;
6722 static void si_thermal_start_smc_fan_control(struct amdgpu_device
*adev
)
6724 if (adev
->pm
.dpm
.fan
.ucode_fan_control
) {
6725 si_fan_ctrl_start_smc_fan_control(adev
);
6726 si_fan_ctrl_set_static_mode(adev
, FDO_PWM_MODE_STATIC
);
6730 static void si_thermal_initialize(struct amdgpu_device
*adev
)
6734 if (adev
->pm
.fan_pulses_per_revolution
) {
6735 tmp
= RREG32(CG_TACH_CTRL
) & ~EDGE_PER_REV_MASK
;
6736 tmp
|= EDGE_PER_REV(adev
->pm
.fan_pulses_per_revolution
-1);
6737 WREG32(CG_TACH_CTRL
, tmp
);
6740 tmp
= RREG32(CG_FDO_CTRL2
) & ~TACH_PWM_RESP_RATE_MASK
;
6741 tmp
|= TACH_PWM_RESP_RATE(0x28);
6742 WREG32(CG_FDO_CTRL2
, tmp
);
6745 static int si_thermal_start_thermal_controller(struct amdgpu_device
*adev
)
6749 si_thermal_initialize(adev
);
6750 ret
= si_thermal_set_temperature_range(adev
, R600_TEMP_RANGE_MIN
, R600_TEMP_RANGE_MAX
);
6753 ret
= si_thermal_enable_alert(adev
, true);
6756 if (adev
->pm
.dpm
.fan
.ucode_fan_control
) {
6757 ret
= si_halt_smc(adev
);
6760 ret
= si_thermal_setup_fan_table(adev
);
6763 ret
= si_resume_smc(adev
);
6766 si_thermal_start_smc_fan_control(adev
);
6772 static void si_thermal_stop_thermal_controller(struct amdgpu_device
*adev
)
6774 if (!adev
->pm
.no_fan
) {
6775 si_fan_ctrl_set_default_mode(adev
);
6776 si_fan_ctrl_stop_smc_fan_control(adev
);
6780 static int si_dpm_enable(struct amdgpu_device
*adev
)
6782 struct rv7xx_power_info
*pi
= rv770_get_pi(adev
);
6783 struct evergreen_power_info
*eg_pi
= evergreen_get_pi(adev
);
6784 struct si_power_info
*si_pi
= si_get_pi(adev
);
6785 struct amdgpu_ps
*boot_ps
= adev
->pm
.dpm
.boot_ps
;
6788 if (amdgpu_si_is_smc_running(adev
))
6790 if (pi
->voltage_control
|| si_pi
->voltage_control_svi2
)
6791 si_enable_voltage_control(adev
, true);
6792 if (pi
->mvdd_control
)
6793 si_get_mvdd_configuration(adev
);
6794 if (pi
->voltage_control
|| si_pi
->voltage_control_svi2
) {
6795 ret
= si_construct_voltage_tables(adev
);
6797 DRM_ERROR("si_construct_voltage_tables failed\n");
6801 if (eg_pi
->dynamic_ac_timing
) {
6802 ret
= si_initialize_mc_reg_table(adev
);
6804 eg_pi
->dynamic_ac_timing
= false;
6807 si_enable_spread_spectrum(adev
, true);
6808 if (pi
->thermal_protection
)
6809 si_enable_thermal_protection(adev
, true);
6811 si_program_git(adev
);
6812 si_program_tp(adev
);
6813 si_program_tpp(adev
);
6814 si_program_sstp(adev
);
6815 si_enable_display_gap(adev
);
6816 si_program_vc(adev
);
6817 ret
= si_upload_firmware(adev
);
6819 DRM_ERROR("si_upload_firmware failed\n");
6822 ret
= si_process_firmware_header(adev
);
6824 DRM_ERROR("si_process_firmware_header failed\n");
6827 ret
= si_initial_switch_from_arb_f0_to_f1(adev
);
6829 DRM_ERROR("si_initial_switch_from_arb_f0_to_f1 failed\n");
6832 ret
= si_init_smc_table(adev
);
6834 DRM_ERROR("si_init_smc_table failed\n");
6837 ret
= si_init_smc_spll_table(adev
);
6839 DRM_ERROR("si_init_smc_spll_table failed\n");
6842 ret
= si_init_arb_table_index(adev
);
6844 DRM_ERROR("si_init_arb_table_index failed\n");
6847 if (eg_pi
->dynamic_ac_timing
) {
6848 ret
= si_populate_mc_reg_table(adev
, boot_ps
);
6850 DRM_ERROR("si_populate_mc_reg_table failed\n");
6854 ret
= si_initialize_smc_cac_tables(adev
);
6856 DRM_ERROR("si_initialize_smc_cac_tables failed\n");
6859 ret
= si_initialize_hardware_cac_manager(adev
);
6861 DRM_ERROR("si_initialize_hardware_cac_manager failed\n");
6864 ret
= si_initialize_smc_dte_tables(adev
);
6866 DRM_ERROR("si_initialize_smc_dte_tables failed\n");
6869 ret
= si_populate_smc_tdp_limits(adev
, boot_ps
);
6871 DRM_ERROR("si_populate_smc_tdp_limits failed\n");
6874 ret
= si_populate_smc_tdp_limits_2(adev
, boot_ps
);
6876 DRM_ERROR("si_populate_smc_tdp_limits_2 failed\n");
6879 si_program_response_times(adev
);
6880 si_program_ds_registers(adev
);
6881 si_dpm_start_smc(adev
);
6882 ret
= si_notify_smc_display_change(adev
, false);
6884 DRM_ERROR("si_notify_smc_display_change failed\n");
6887 si_enable_sclk_control(adev
, true);
6890 si_enable_auto_throttle_source(adev
, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL
, true);
6891 si_thermal_start_thermal_controller(adev
);
6892 ni_update_current_ps(adev
, boot_ps
);
6897 static int si_set_temperature_range(struct amdgpu_device
*adev
)
6901 ret
= si_thermal_enable_alert(adev
, false);
6904 ret
= si_thermal_set_temperature_range(adev
, R600_TEMP_RANGE_MIN
, R600_TEMP_RANGE_MAX
);
6907 ret
= si_thermal_enable_alert(adev
, true);
6914 static void si_dpm_disable(struct amdgpu_device
*adev
)
6916 struct rv7xx_power_info
*pi
= rv770_get_pi(adev
);
6917 struct amdgpu_ps
*boot_ps
= adev
->pm
.dpm
.boot_ps
;
6919 if (!amdgpu_si_is_smc_running(adev
))
6921 si_thermal_stop_thermal_controller(adev
);
6922 si_disable_ulv(adev
);
6924 if (pi
->thermal_protection
)
6925 si_enable_thermal_protection(adev
, false);
6926 si_enable_power_containment(adev
, boot_ps
, false);
6927 si_enable_smc_cac(adev
, boot_ps
, false);
6928 si_enable_spread_spectrum(adev
, false);
6929 si_enable_auto_throttle_source(adev
, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL
, false);
6931 si_reset_to_default(adev
);
6932 si_dpm_stop_smc(adev
);
6933 si_force_switch_to_arb_f0(adev
);
6935 ni_update_current_ps(adev
, boot_ps
);
6938 static int si_dpm_pre_set_power_state(struct amdgpu_device
*adev
)
6940 struct evergreen_power_info
*eg_pi
= evergreen_get_pi(adev
);
6941 struct amdgpu_ps requested_ps
= *adev
->pm
.dpm
.requested_ps
;
6942 struct amdgpu_ps
*new_ps
= &requested_ps
;
6944 ni_update_requested_ps(adev
, new_ps
);
6945 si_apply_state_adjust_rules(adev
, &eg_pi
->requested_rps
);
6950 static int si_power_control_set_level(struct amdgpu_device
*adev
)
6952 struct amdgpu_ps
*new_ps
= adev
->pm
.dpm
.requested_ps
;
6955 ret
= si_restrict_performance_levels_before_switch(adev
);
6958 ret
= si_halt_smc(adev
);
6961 ret
= si_populate_smc_tdp_limits(adev
, new_ps
);
6964 ret
= si_populate_smc_tdp_limits_2(adev
, new_ps
);
6967 ret
= si_resume_smc(adev
);
6970 ret
= si_set_sw_state(adev
);
6976 static int si_dpm_set_power_state(struct amdgpu_device
*adev
)
6978 struct evergreen_power_info
*eg_pi
= evergreen_get_pi(adev
);
6979 struct amdgpu_ps
*new_ps
= &eg_pi
->requested_rps
;
6980 struct amdgpu_ps
*old_ps
= &eg_pi
->current_rps
;
6983 ret
= si_disable_ulv(adev
);
6985 DRM_ERROR("si_disable_ulv failed\n");
6988 ret
= si_restrict_performance_levels_before_switch(adev
);
6990 DRM_ERROR("si_restrict_performance_levels_before_switch failed\n");
6993 if (eg_pi
->pcie_performance_request
)
6994 si_request_link_speed_change_before_state_change(adev
, new_ps
, old_ps
);
6995 ni_set_uvd_clock_before_set_eng_clock(adev
, new_ps
, old_ps
);
6996 ret
= si_enable_power_containment(adev
, new_ps
, false);
6998 DRM_ERROR("si_enable_power_containment failed\n");
7001 ret
= si_enable_smc_cac(adev
, new_ps
, false);
7003 DRM_ERROR("si_enable_smc_cac failed\n");
7006 ret
= si_halt_smc(adev
);
7008 DRM_ERROR("si_halt_smc failed\n");
7011 ret
= si_upload_sw_state(adev
, new_ps
);
7013 DRM_ERROR("si_upload_sw_state failed\n");
7016 ret
= si_upload_smc_data(adev
);
7018 DRM_ERROR("si_upload_smc_data failed\n");
7021 ret
= si_upload_ulv_state(adev
);
7023 DRM_ERROR("si_upload_ulv_state failed\n");
7026 if (eg_pi
->dynamic_ac_timing
) {
7027 ret
= si_upload_mc_reg_table(adev
, new_ps
);
7029 DRM_ERROR("si_upload_mc_reg_table failed\n");
7033 ret
= si_program_memory_timing_parameters(adev
, new_ps
);
7035 DRM_ERROR("si_program_memory_timing_parameters failed\n");
7038 si_set_pcie_lane_width_in_smc(adev
, new_ps
, old_ps
);
7040 ret
= si_resume_smc(adev
);
7042 DRM_ERROR("si_resume_smc failed\n");
7045 ret
= si_set_sw_state(adev
);
7047 DRM_ERROR("si_set_sw_state failed\n");
7050 ni_set_uvd_clock_after_set_eng_clock(adev
, new_ps
, old_ps
);
7051 if (eg_pi
->pcie_performance_request
)
7052 si_notify_link_speed_change_after_state_change(adev
, new_ps
, old_ps
);
7053 ret
= si_set_power_state_conditionally_enable_ulv(adev
, new_ps
);
7055 DRM_ERROR("si_set_power_state_conditionally_enable_ulv failed\n");
7058 ret
= si_enable_smc_cac(adev
, new_ps
, true);
7060 DRM_ERROR("si_enable_smc_cac failed\n");
7063 ret
= si_enable_power_containment(adev
, new_ps
, true);
7065 DRM_ERROR("si_enable_power_containment failed\n");
7069 ret
= si_power_control_set_level(adev
);
7071 DRM_ERROR("si_power_control_set_level failed\n");
7078 static void si_dpm_post_set_power_state(struct amdgpu_device
*adev
)
7080 struct evergreen_power_info
*eg_pi
= evergreen_get_pi(adev
);
7081 struct amdgpu_ps
*new_ps
= &eg_pi
->requested_rps
;
7083 ni_update_current_ps(adev
, new_ps
);
7087 void si_dpm_reset_asic(struct amdgpu_device
*adev
)
7089 si_restrict_performance_levels_before_switch(adev
);
7090 si_disable_ulv(adev
);
7091 si_set_boot_state(adev
);
7095 static void si_dpm_display_configuration_changed(struct amdgpu_device
*adev
)
7097 si_program_display_gap(adev
);
7101 static void si_parse_pplib_non_clock_info(struct amdgpu_device
*adev
,
7102 struct amdgpu_ps
*rps
,
7103 struct _ATOM_PPLIB_NONCLOCK_INFO
*non_clock_info
,
7106 rps
->caps
= le32_to_cpu(non_clock_info
->ulCapsAndSettings
);
7107 rps
->class = le16_to_cpu(non_clock_info
->usClassification
);
7108 rps
->class2
= le16_to_cpu(non_clock_info
->usClassification2
);
7110 if (ATOM_PPLIB_NONCLOCKINFO_VER1
< table_rev
) {
7111 rps
->vclk
= le32_to_cpu(non_clock_info
->ulVCLK
);
7112 rps
->dclk
= le32_to_cpu(non_clock_info
->ulDCLK
);
7113 } else if (r600_is_uvd_state(rps
->class, rps
->class2
)) {
7114 rps
->vclk
= RV770_DEFAULT_VCLK_FREQ
;
7115 rps
->dclk
= RV770_DEFAULT_DCLK_FREQ
;
7121 if (rps
->class & ATOM_PPLIB_CLASSIFICATION_BOOT
)
7122 adev
->pm
.dpm
.boot_ps
= rps
;
7123 if (rps
->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE
)
7124 adev
->pm
.dpm
.uvd_ps
= rps
;
7127 static void si_parse_pplib_clock_info(struct amdgpu_device
*adev
,
7128 struct amdgpu_ps
*rps
, int index
,
7129 union pplib_clock_info
*clock_info
)
7131 struct rv7xx_power_info
*pi
= rv770_get_pi(adev
);
7132 struct evergreen_power_info
*eg_pi
= evergreen_get_pi(adev
);
7133 struct si_power_info
*si_pi
= si_get_pi(adev
);
7134 struct si_ps
*ps
= si_get_ps(rps
);
7135 u16 leakage_voltage
;
7136 struct rv7xx_pl
*pl
= &ps
->performance_levels
[index
];
7139 ps
->performance_level_count
= index
+ 1;
7141 pl
->sclk
= le16_to_cpu(clock_info
->si
.usEngineClockLow
);
7142 pl
->sclk
|= clock_info
->si
.ucEngineClockHigh
<< 16;
7143 pl
->mclk
= le16_to_cpu(clock_info
->si
.usMemoryClockLow
);
7144 pl
->mclk
|= clock_info
->si
.ucMemoryClockHigh
<< 16;
7146 pl
->vddc
= le16_to_cpu(clock_info
->si
.usVDDC
);
7147 pl
->vddci
= le16_to_cpu(clock_info
->si
.usVDDCI
);
7148 pl
->flags
= le32_to_cpu(clock_info
->si
.ulFlags
);
7149 pl
->pcie_gen
= r600_get_pcie_gen_support(adev
,
7150 si_pi
->sys_pcie_mask
,
7151 si_pi
->boot_pcie_gen
,
7152 clock_info
->si
.ucPCIEGen
);
7154 /* patch up vddc if necessary */
7155 ret
= si_get_leakage_voltage_from_leakage_index(adev
, pl
->vddc
,
7158 pl
->vddc
= leakage_voltage
;
7160 if (rps
->class & ATOM_PPLIB_CLASSIFICATION_ACPI
) {
7161 pi
->acpi_vddc
= pl
->vddc
;
7162 eg_pi
->acpi_vddci
= pl
->vddci
;
7163 si_pi
->acpi_pcie_gen
= pl
->pcie_gen
;
7166 if ((rps
->class2
& ATOM_PPLIB_CLASSIFICATION2_ULV
) &&
7168 /* XXX disable for A0 tahiti */
7169 si_pi
->ulv
.supported
= false;
7170 si_pi
->ulv
.pl
= *pl
;
7171 si_pi
->ulv
.one_pcie_lane_in_ulv
= false;
7172 si_pi
->ulv
.volt_change_delay
= SISLANDS_ULVVOLTAGECHANGEDELAY_DFLT
;
7173 si_pi
->ulv
.cg_ulv_parameter
= SISLANDS_CGULVPARAMETER_DFLT
;
7174 si_pi
->ulv
.cg_ulv_control
= SISLANDS_CGULVCONTROL_DFLT
;
7177 if (pi
->min_vddc_in_table
> pl
->vddc
)
7178 pi
->min_vddc_in_table
= pl
->vddc
;
7180 if (pi
->max_vddc_in_table
< pl
->vddc
)
7181 pi
->max_vddc_in_table
= pl
->vddc
;
7183 /* patch up boot state */
7184 if (rps
->class & ATOM_PPLIB_CLASSIFICATION_BOOT
) {
7185 u16 vddc
, vddci
, mvdd
;
7186 amdgpu_atombios_get_default_voltages(adev
, &vddc
, &vddci
, &mvdd
);
7187 pl
->mclk
= adev
->clock
.default_mclk
;
7188 pl
->sclk
= adev
->clock
.default_sclk
;
7191 si_pi
->mvdd_bootup_value
= mvdd
;
7194 if ((rps
->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK
) ==
7195 ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE
) {
7196 adev
->pm
.dpm
.dyn_state
.max_clock_voltage_on_ac
.sclk
= pl
->sclk
;
7197 adev
->pm
.dpm
.dyn_state
.max_clock_voltage_on_ac
.mclk
= pl
->mclk
;
7198 adev
->pm
.dpm
.dyn_state
.max_clock_voltage_on_ac
.vddc
= pl
->vddc
;
7199 adev
->pm
.dpm
.dyn_state
.max_clock_voltage_on_ac
.vddci
= pl
->vddci
;
7203 union pplib_power_state
{
7204 struct _ATOM_PPLIB_STATE v1
;
7205 struct _ATOM_PPLIB_STATE_V2 v2
;
7208 static int si_parse_power_table(struct amdgpu_device
*adev
)
7210 struct amdgpu_mode_info
*mode_info
= &adev
->mode_info
;
7211 struct _ATOM_PPLIB_NONCLOCK_INFO
*non_clock_info
;
7212 union pplib_power_state
*power_state
;
7213 int i
, j
, k
, non_clock_array_index
, clock_array_index
;
7214 union pplib_clock_info
*clock_info
;
7215 struct _StateArray
*state_array
;
7216 struct _ClockInfoArray
*clock_info_array
;
7217 struct _NonClockInfoArray
*non_clock_info_array
;
7218 union power_info
*power_info
;
7219 int index
= GetIndexIntoMasterTable(DATA
, PowerPlayInfo
);
7222 u8
*power_state_offset
;
7225 if (!amdgpu_atom_parse_data_header(mode_info
->atom_context
, index
, NULL
,
7226 &frev
, &crev
, &data_offset
))
7228 power_info
= (union power_info
*)(mode_info
->atom_context
->bios
+ data_offset
);
7230 amdgpu_add_thermal_controller(adev
);
7232 state_array
= (struct _StateArray
*)
7233 (mode_info
->atom_context
->bios
+ data_offset
+
7234 le16_to_cpu(power_info
->pplib
.usStateArrayOffset
));
7235 clock_info_array
= (struct _ClockInfoArray
*)
7236 (mode_info
->atom_context
->bios
+ data_offset
+
7237 le16_to_cpu(power_info
->pplib
.usClockInfoArrayOffset
));
7238 non_clock_info_array
= (struct _NonClockInfoArray
*)
7239 (mode_info
->atom_context
->bios
+ data_offset
+
7240 le16_to_cpu(power_info
->pplib
.usNonClockInfoArrayOffset
));
7242 adev
->pm
.dpm
.ps
= kzalloc(sizeof(struct amdgpu_ps
) *
7243 state_array
->ucNumEntries
, GFP_KERNEL
);
7244 if (!adev
->pm
.dpm
.ps
)
7246 power_state_offset
= (u8
*)state_array
->states
;
7247 for (i
= 0; i
< state_array
->ucNumEntries
; i
++) {
7249 power_state
= (union pplib_power_state
*)power_state_offset
;
7250 non_clock_array_index
= power_state
->v2
.nonClockInfoIndex
;
7251 non_clock_info
= (struct _ATOM_PPLIB_NONCLOCK_INFO
*)
7252 &non_clock_info_array
->nonClockInfo
[non_clock_array_index
];
7253 ps
= kzalloc(sizeof(struct si_ps
), GFP_KERNEL
);
7255 kfree(adev
->pm
.dpm
.ps
);
7258 adev
->pm
.dpm
.ps
[i
].ps_priv
= ps
;
7259 si_parse_pplib_non_clock_info(adev
, &adev
->pm
.dpm
.ps
[i
],
7261 non_clock_info_array
->ucEntrySize
);
7263 idx
= (u8
*)&power_state
->v2
.clockInfoIndex
[0];
7264 for (j
= 0; j
< power_state
->v2
.ucNumDPMLevels
; j
++) {
7265 clock_array_index
= idx
[j
];
7266 if (clock_array_index
>= clock_info_array
->ucNumEntries
)
7268 if (k
>= SISLANDS_MAX_HARDWARE_POWERLEVELS
)
7270 clock_info
= (union pplib_clock_info
*)
7271 ((u8
*)&clock_info_array
->clockInfo
[0] +
7272 (clock_array_index
* clock_info_array
->ucEntrySize
));
7273 si_parse_pplib_clock_info(adev
,
7274 &adev
->pm
.dpm
.ps
[i
], k
,
7278 power_state_offset
+= 2 + power_state
->v2
.ucNumDPMLevels
;
7280 adev
->pm
.dpm
.num_ps
= state_array
->ucNumEntries
;
7282 /* fill in the vce power states */
7283 for (i
= 0; i
< adev
->pm
.dpm
.num_of_vce_states
; i
++) {
7285 clock_array_index
= adev
->pm
.dpm
.vce_states
[i
].clk_idx
;
7286 clock_info
= (union pplib_clock_info
*)
7287 &clock_info_array
->clockInfo
[clock_array_index
* clock_info_array
->ucEntrySize
];
7288 sclk
= le16_to_cpu(clock_info
->si
.usEngineClockLow
);
7289 sclk
|= clock_info
->si
.ucEngineClockHigh
<< 16;
7290 mclk
= le16_to_cpu(clock_info
->si
.usMemoryClockLow
);
7291 mclk
|= clock_info
->si
.ucMemoryClockHigh
<< 16;
7292 adev
->pm
.dpm
.vce_states
[i
].sclk
= sclk
;
7293 adev
->pm
.dpm
.vce_states
[i
].mclk
= mclk
;
7299 static int si_dpm_init(struct amdgpu_device
*adev
)
7301 struct rv7xx_power_info
*pi
;
7302 struct evergreen_power_info
*eg_pi
;
7303 struct ni_power_info
*ni_pi
;
7304 struct si_power_info
*si_pi
;
7305 struct atom_clock_dividers dividers
;
7309 si_pi
= kzalloc(sizeof(struct si_power_info
), GFP_KERNEL
);
7312 adev
->pm
.dpm
.priv
= si_pi
;
7317 ret
= drm_pcie_get_speed_cap_mask(adev
->ddev
, &mask
);
7319 si_pi
->sys_pcie_mask
= 0;
7321 si_pi
->sys_pcie_mask
= mask
;
7322 si_pi
->force_pcie_gen
= AMDGPU_PCIE_GEN_INVALID
;
7323 si_pi
->boot_pcie_gen
= si_get_current_pcie_speed(adev
);
7325 si_set_max_cu_value(adev
);
7327 rv770_get_max_vddc(adev
);
7328 si_get_leakage_vddc(adev
);
7329 si_patch_dependency_tables_based_on_leakage(adev
);
7332 eg_pi
->acpi_vddci
= 0;
7333 pi
->min_vddc_in_table
= 0;
7334 pi
->max_vddc_in_table
= 0;
7336 ret
= amdgpu_get_platform_caps(adev
);
7340 ret
= amdgpu_parse_extended_power_table(adev
);
7344 ret
= si_parse_power_table(adev
);
7348 adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_dispclk
.entries
=
7349 kzalloc(4 * sizeof(struct amdgpu_clock_voltage_dependency_entry
), GFP_KERNEL
);
7350 if (!adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_dispclk
.entries
) {
7351 amdgpu_free_extended_power_table(adev
);
7354 adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_dispclk
.count
= 4;
7355 adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_dispclk
.entries
[0].clk
= 0;
7356 adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_dispclk
.entries
[0].v
= 0;
7357 adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_dispclk
.entries
[1].clk
= 36000;
7358 adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_dispclk
.entries
[1].v
= 720;
7359 adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_dispclk
.entries
[2].clk
= 54000;
7360 adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_dispclk
.entries
[2].v
= 810;
7361 adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_dispclk
.entries
[3].clk
= 72000;
7362 adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_dispclk
.entries
[3].v
= 900;
7364 if (adev
->pm
.dpm
.voltage_response_time
== 0)
7365 adev
->pm
.dpm
.voltage_response_time
= R600_VOLTAGERESPONSETIME_DFLT
;
7366 if (adev
->pm
.dpm
.backbias_response_time
== 0)
7367 adev
->pm
.dpm
.backbias_response_time
= R600_BACKBIASRESPONSETIME_DFLT
;
7369 ret
= amdgpu_atombios_get_clock_dividers(adev
, COMPUTE_ENGINE_PLL_PARAM
,
7370 0, false, ÷rs
);
7372 pi
->ref_div
= dividers
.ref_div
+ 1;
7374 pi
->ref_div
= R600_REFERENCEDIVIDER_DFLT
;
7376 eg_pi
->smu_uvd_hs
= false;
7378 pi
->mclk_strobe_mode_threshold
= 40000;
7379 if (si_is_special_1gb_platform(adev
))
7380 pi
->mclk_stutter_mode_threshold
= 0;
7382 pi
->mclk_stutter_mode_threshold
= pi
->mclk_strobe_mode_threshold
;
7383 pi
->mclk_edc_enable_threshold
= 40000;
7384 eg_pi
->mclk_edc_wr_enable_threshold
= 40000;
7386 ni_pi
->mclk_rtt_mode_threshold
= eg_pi
->mclk_edc_wr_enable_threshold
;
7388 pi
->voltage_control
=
7389 amdgpu_atombios_is_voltage_gpio(adev
, SET_VOLTAGE_TYPE_ASIC_VDDC
,
7390 VOLTAGE_OBJ_GPIO_LUT
);
7391 if (!pi
->voltage_control
) {
7392 si_pi
->voltage_control_svi2
=
7393 amdgpu_atombios_is_voltage_gpio(adev
, SET_VOLTAGE_TYPE_ASIC_VDDC
,
7395 if (si_pi
->voltage_control_svi2
)
7396 amdgpu_atombios_get_svi2_info(adev
, SET_VOLTAGE_TYPE_ASIC_VDDC
,
7397 &si_pi
->svd_gpio_id
, &si_pi
->svc_gpio_id
);
7401 amdgpu_atombios_is_voltage_gpio(adev
, SET_VOLTAGE_TYPE_ASIC_MVDDC
,
7402 VOLTAGE_OBJ_GPIO_LUT
);
7404 eg_pi
->vddci_control
=
7405 amdgpu_atombios_is_voltage_gpio(adev
, SET_VOLTAGE_TYPE_ASIC_VDDCI
,
7406 VOLTAGE_OBJ_GPIO_LUT
);
7407 if (!eg_pi
->vddci_control
)
7408 si_pi
->vddci_control_svi2
=
7409 amdgpu_atombios_is_voltage_gpio(adev
, SET_VOLTAGE_TYPE_ASIC_VDDCI
,
7412 si_pi
->vddc_phase_shed_control
=
7413 amdgpu_atombios_is_voltage_gpio(adev
, SET_VOLTAGE_TYPE_ASIC_VDDC
,
7414 VOLTAGE_OBJ_PHASE_LUT
);
7416 rv770_get_engine_memory_ss(adev
);
7418 pi
->asi
= RV770_ASI_DFLT
;
7419 pi
->pasi
= CYPRESS_HASI_DFLT
;
7420 pi
->vrc
= SISLANDS_VRC_DFLT
;
7422 pi
->gfx_clock_gating
= true;
7424 eg_pi
->sclk_deep_sleep
= true;
7425 si_pi
->sclk_deep_sleep_above_low
= false;
7427 if (adev
->pm
.int_thermal_type
!= THERMAL_TYPE_NONE
)
7428 pi
->thermal_protection
= true;
7430 pi
->thermal_protection
= false;
7432 eg_pi
->dynamic_ac_timing
= true;
7434 eg_pi
->light_sleep
= true;
7435 #if defined(CONFIG_ACPI)
7436 eg_pi
->pcie_performance_request
=
7437 amdgpu_acpi_is_pcie_performance_request_supported(adev
);
7439 eg_pi
->pcie_performance_request
= false;
7442 si_pi
->sram_end
= SMC_RAM_END
;
7444 adev
->pm
.dpm
.dyn_state
.mclk_sclk_ratio
= 4;
7445 adev
->pm
.dpm
.dyn_state
.sclk_mclk_delta
= 15000;
7446 adev
->pm
.dpm
.dyn_state
.vddc_vddci_delta
= 200;
7447 adev
->pm
.dpm
.dyn_state
.valid_sclk_values
.count
= 0;
7448 adev
->pm
.dpm
.dyn_state
.valid_sclk_values
.values
= NULL
;
7449 adev
->pm
.dpm
.dyn_state
.valid_mclk_values
.count
= 0;
7450 adev
->pm
.dpm
.dyn_state
.valid_mclk_values
.values
= NULL
;
7452 si_initialize_powertune_defaults(adev
);
7454 /* make sure dc limits are valid */
7455 if ((adev
->pm
.dpm
.dyn_state
.max_clock_voltage_on_dc
.sclk
== 0) ||
7456 (adev
->pm
.dpm
.dyn_state
.max_clock_voltage_on_dc
.mclk
== 0))
7457 adev
->pm
.dpm
.dyn_state
.max_clock_voltage_on_dc
=
7458 adev
->pm
.dpm
.dyn_state
.max_clock_voltage_on_ac
;
7460 si_pi
->fan_ctrl_is_in_default_mode
= true;
7465 static void si_dpm_fini(struct amdgpu_device
*adev
)
7469 if (adev
->pm
.dpm
.ps
)
7470 for (i
= 0; i
< adev
->pm
.dpm
.num_ps
; i
++)
7471 kfree(adev
->pm
.dpm
.ps
[i
].ps_priv
);
7472 kfree(adev
->pm
.dpm
.ps
);
7473 kfree(adev
->pm
.dpm
.priv
);
7474 kfree(adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_dispclk
.entries
);
7475 amdgpu_free_extended_power_table(adev
);
7478 static void si_dpm_debugfs_print_current_performance_level(struct amdgpu_device
*adev
,
7481 struct evergreen_power_info
*eg_pi
= evergreen_get_pi(adev
);
7482 struct amdgpu_ps
*rps
= &eg_pi
->current_rps
;
7483 struct si_ps
*ps
= si_get_ps(rps
);
7484 struct rv7xx_pl
*pl
;
7486 (RREG32(TARGET_AND_CURRENT_PROFILE_INDEX
) & CURRENT_STATE_INDEX_MASK
) >>
7487 CURRENT_STATE_INDEX_SHIFT
;
7489 if (current_index
>= ps
->performance_level_count
) {
7490 seq_printf(m
, "invalid dpm profile %d\n", current_index
);
7492 pl
= &ps
->performance_levels
[current_index
];
7493 seq_printf(m
, "uvd vclk: %d dclk: %d\n", rps
->vclk
, rps
->dclk
);
7494 seq_printf(m
, "power level %d sclk: %u mclk: %u vddc: %u vddci: %u pcie gen: %u\n",
7495 current_index
, pl
->sclk
, pl
->mclk
, pl
->vddc
, pl
->vddci
, pl
->pcie_gen
+ 1);
7499 static int si_dpm_set_interrupt_state(struct amdgpu_device
*adev
,
7500 struct amdgpu_irq_src
*source
,
7502 enum amdgpu_interrupt_state state
)
7507 case AMDGPU_THERMAL_IRQ_LOW_TO_HIGH
:
7509 case AMDGPU_IRQ_STATE_DISABLE
:
7510 cg_thermal_int
= RREG32_SMC(CG_THERMAL_INT
);
7511 cg_thermal_int
|= THERM_INT_MASK_HIGH
;
7512 WREG32_SMC(CG_THERMAL_INT
, cg_thermal_int
);
7514 case AMDGPU_IRQ_STATE_ENABLE
:
7515 cg_thermal_int
= RREG32_SMC(CG_THERMAL_INT
);
7516 cg_thermal_int
&= ~THERM_INT_MASK_HIGH
;
7517 WREG32_SMC(CG_THERMAL_INT
, cg_thermal_int
);
7524 case AMDGPU_THERMAL_IRQ_HIGH_TO_LOW
:
7526 case AMDGPU_IRQ_STATE_DISABLE
:
7527 cg_thermal_int
= RREG32_SMC(CG_THERMAL_INT
);
7528 cg_thermal_int
|= THERM_INT_MASK_LOW
;
7529 WREG32_SMC(CG_THERMAL_INT
, cg_thermal_int
);
7531 case AMDGPU_IRQ_STATE_ENABLE
:
7532 cg_thermal_int
= RREG32_SMC(CG_THERMAL_INT
);
7533 cg_thermal_int
&= ~THERM_INT_MASK_LOW
;
7534 WREG32_SMC(CG_THERMAL_INT
, cg_thermal_int
);
7547 static int si_dpm_process_interrupt(struct amdgpu_device
*adev
,
7548 struct amdgpu_irq_src
*source
,
7549 struct amdgpu_iv_entry
*entry
)
7551 bool queue_thermal
= false;
7556 switch (entry
->src_id
) {
7557 case 230: /* thermal low to high */
7558 DRM_DEBUG("IH: thermal low to high\n");
7559 adev
->pm
.dpm
.thermal
.high_to_low
= false;
7560 queue_thermal
= true;
7562 case 231: /* thermal high to low */
7563 DRM_DEBUG("IH: thermal high to low\n");
7564 adev
->pm
.dpm
.thermal
.high_to_low
= true;
7565 queue_thermal
= true;
7572 schedule_work(&adev
->pm
.dpm
.thermal
.work
);
7577 static int si_dpm_late_init(void *handle
)
7580 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
7585 /* init the sysfs and debugfs files late */
7586 ret
= amdgpu_pm_sysfs_init(adev
);
7590 ret
= si_set_temperature_range(adev
);
7594 si_dpm_powergate_uvd(adev
, true);
7600 * si_dpm_init_microcode - load ucode images from disk
7602 * @adev: amdgpu_device pointer
7604 * Use the firmware interface to load the ucode images into
7605 * the driver (not loaded into hw).
7606 * Returns 0 on success, error on failure.
7608 static int si_dpm_init_microcode(struct amdgpu_device
*adev
)
7610 const char *chip_name
;
7615 switch (adev
->asic_type
) {
7617 chip_name
= "tahiti";
7620 if ((adev
->pdev
->revision
== 0x81) &&
7621 ((adev
->pdev
->device
== 0x6810) ||
7622 (adev
->pdev
->device
== 0x6811)))
7623 chip_name
= "pitcairn_k";
7625 chip_name
= "pitcairn";
7628 if (((adev
->pdev
->device
== 0x6820) &&
7629 ((adev
->pdev
->revision
== 0x81) ||
7630 (adev
->pdev
->revision
== 0x83))) ||
7631 ((adev
->pdev
->device
== 0x6821) &&
7632 ((adev
->pdev
->revision
== 0x83) ||
7633 (adev
->pdev
->revision
== 0x87))) ||
7634 ((adev
->pdev
->revision
== 0x87) &&
7635 ((adev
->pdev
->device
== 0x6823) ||
7636 (adev
->pdev
->device
== 0x682b))))
7637 chip_name
= "verde_k";
7639 chip_name
= "verde";
7642 if (((adev
->pdev
->revision
== 0x81) &&
7643 ((adev
->pdev
->device
== 0x6600) ||
7644 (adev
->pdev
->device
== 0x6604) ||
7645 (adev
->pdev
->device
== 0x6605) ||
7646 (adev
->pdev
->device
== 0x6610))) ||
7647 ((adev
->pdev
->revision
== 0x83) &&
7648 (adev
->pdev
->device
== 0x6610)))
7649 chip_name
= "oland_k";
7651 chip_name
= "oland";
7654 if (((adev
->pdev
->revision
== 0x81) &&
7655 (adev
->pdev
->device
== 0x6660)) ||
7656 ((adev
->pdev
->revision
== 0x83) &&
7657 ((adev
->pdev
->device
== 0x6660) ||
7658 (adev
->pdev
->device
== 0x6663) ||
7659 (adev
->pdev
->device
== 0x6665) ||
7660 (adev
->pdev
->device
== 0x6667))))
7661 chip_name
= "hainan_k";
7662 else if ((adev
->pdev
->revision
== 0xc3) &&
7663 (adev
->pdev
->device
== 0x6665))
7664 chip_name
= "banks_k_2";
7666 chip_name
= "hainan";
7671 snprintf(fw_name
, sizeof(fw_name
), "radeon/%s_smc.bin", chip_name
);
7672 err
= request_firmware(&adev
->pm
.fw
, fw_name
, adev
->dev
);
7675 err
= amdgpu_ucode_validate(adev
->pm
.fw
);
7679 DRM_ERROR("si_smc: Failed to load firmware. err = %d\"%s\"\n",
7681 release_firmware(adev
->pm
.fw
);
7688 static int si_dpm_sw_init(void *handle
)
7691 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
7693 ret
= amdgpu_irq_add_id(adev
, 230, &adev
->pm
.dpm
.thermal
.irq
);
7697 ret
= amdgpu_irq_add_id(adev
, 231, &adev
->pm
.dpm
.thermal
.irq
);
7701 /* default to balanced state */
7702 adev
->pm
.dpm
.state
= POWER_STATE_TYPE_BALANCED
;
7703 adev
->pm
.dpm
.user_state
= POWER_STATE_TYPE_BALANCED
;
7704 adev
->pm
.dpm
.forced_level
= AMD_DPM_FORCED_LEVEL_AUTO
;
7705 adev
->pm
.default_sclk
= adev
->clock
.default_sclk
;
7706 adev
->pm
.default_mclk
= adev
->clock
.default_mclk
;
7707 adev
->pm
.current_sclk
= adev
->clock
.default_sclk
;
7708 adev
->pm
.current_mclk
= adev
->clock
.default_mclk
;
7709 adev
->pm
.int_thermal_type
= THERMAL_TYPE_NONE
;
7711 if (amdgpu_dpm
== 0)
7714 ret
= si_dpm_init_microcode(adev
);
7718 INIT_WORK(&adev
->pm
.dpm
.thermal
.work
, amdgpu_dpm_thermal_work_handler
);
7719 mutex_lock(&adev
->pm
.mutex
);
7720 ret
= si_dpm_init(adev
);
7723 adev
->pm
.dpm
.current_ps
= adev
->pm
.dpm
.requested_ps
= adev
->pm
.dpm
.boot_ps
;
7724 if (amdgpu_dpm
== 1)
7725 amdgpu_pm_print_power_states(adev
);
7726 mutex_unlock(&adev
->pm
.mutex
);
7727 DRM_INFO("amdgpu: dpm initialized\n");
7733 mutex_unlock(&adev
->pm
.mutex
);
7734 DRM_ERROR("amdgpu: dpm initialization failed\n");
7738 static int si_dpm_sw_fini(void *handle
)
7740 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
7742 flush_work(&adev
->pm
.dpm
.thermal
.work
);
7744 mutex_lock(&adev
->pm
.mutex
);
7745 amdgpu_pm_sysfs_fini(adev
);
7747 mutex_unlock(&adev
->pm
.mutex
);
7752 static int si_dpm_hw_init(void *handle
)
7756 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
7761 mutex_lock(&adev
->pm
.mutex
);
7762 si_dpm_setup_asic(adev
);
7763 ret
= si_dpm_enable(adev
);
7765 adev
->pm
.dpm_enabled
= false;
7767 adev
->pm
.dpm_enabled
= true;
7768 mutex_unlock(&adev
->pm
.mutex
);
7773 static int si_dpm_hw_fini(void *handle
)
7775 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
7777 if (adev
->pm
.dpm_enabled
) {
7778 mutex_lock(&adev
->pm
.mutex
);
7779 si_dpm_disable(adev
);
7780 mutex_unlock(&adev
->pm
.mutex
);
7786 static int si_dpm_suspend(void *handle
)
7788 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
7790 if (adev
->pm
.dpm_enabled
) {
7791 mutex_lock(&adev
->pm
.mutex
);
7793 si_dpm_disable(adev
);
7794 /* reset the power state */
7795 adev
->pm
.dpm
.current_ps
= adev
->pm
.dpm
.requested_ps
= adev
->pm
.dpm
.boot_ps
;
7796 mutex_unlock(&adev
->pm
.mutex
);
7801 static int si_dpm_resume(void *handle
)
7804 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
7806 if (adev
->pm
.dpm_enabled
) {
7807 /* asic init will reset to the boot state */
7808 mutex_lock(&adev
->pm
.mutex
);
7809 si_dpm_setup_asic(adev
);
7810 ret
= si_dpm_enable(adev
);
7812 adev
->pm
.dpm_enabled
= false;
7814 adev
->pm
.dpm_enabled
= true;
7815 mutex_unlock(&adev
->pm
.mutex
);
7816 if (adev
->pm
.dpm_enabled
)
7817 amdgpu_pm_compute_clocks(adev
);
7822 static bool si_dpm_is_idle(void *handle
)
7828 static int si_dpm_wait_for_idle(void *handle
)
7834 static int si_dpm_soft_reset(void *handle
)
7839 static int si_dpm_set_clockgating_state(void *handle
,
7840 enum amd_clockgating_state state
)
7845 static int si_dpm_set_powergating_state(void *handle
,
7846 enum amd_powergating_state state
)
7851 /* get temperature in millidegrees */
7852 static int si_dpm_get_temp(struct amdgpu_device
*adev
)
7855 int actual_temp
= 0;
7857 temp
= (RREG32(CG_MULT_THERMAL_STATUS
) & CTF_TEMP_MASK
) >>
7863 actual_temp
= temp
& 0x1ff;
7865 actual_temp
= (actual_temp
* 1000);
7870 static u32
si_dpm_get_sclk(struct amdgpu_device
*adev
, bool low
)
7872 struct evergreen_power_info
*eg_pi
= evergreen_get_pi(adev
);
7873 struct si_ps
*requested_state
= si_get_ps(&eg_pi
->requested_rps
);
7876 return requested_state
->performance_levels
[0].sclk
;
7878 return requested_state
->performance_levels
[requested_state
->performance_level_count
- 1].sclk
;
7881 static u32
si_dpm_get_mclk(struct amdgpu_device
*adev
, bool low
)
7883 struct evergreen_power_info
*eg_pi
= evergreen_get_pi(adev
);
7884 struct si_ps
*requested_state
= si_get_ps(&eg_pi
->requested_rps
);
7887 return requested_state
->performance_levels
[0].mclk
;
7889 return requested_state
->performance_levels
[requested_state
->performance_level_count
- 1].mclk
;
7892 static void si_dpm_print_power_state(struct amdgpu_device
*adev
,
7893 struct amdgpu_ps
*rps
)
7895 struct si_ps
*ps
= si_get_ps(rps
);
7896 struct rv7xx_pl
*pl
;
7899 amdgpu_dpm_print_class_info(rps
->class, rps
->class2
);
7900 amdgpu_dpm_print_cap_info(rps
->caps
);
7901 DRM_INFO("\tuvd vclk: %d dclk: %d\n", rps
->vclk
, rps
->dclk
);
7902 for (i
= 0; i
< ps
->performance_level_count
; i
++) {
7903 pl
= &ps
->performance_levels
[i
];
7904 if (adev
->asic_type
>= CHIP_TAHITI
)
7905 DRM_INFO("\t\tpower level %d sclk: %u mclk: %u vddc: %u vddci: %u pcie gen: %u\n",
7906 i
, pl
->sclk
, pl
->mclk
, pl
->vddc
, pl
->vddci
, pl
->pcie_gen
+ 1);
7908 DRM_INFO("\t\tpower level %d sclk: %u mclk: %u vddc: %u vddci: %u\n",
7909 i
, pl
->sclk
, pl
->mclk
, pl
->vddc
, pl
->vddci
);
7911 amdgpu_dpm_print_ps_status(adev
, rps
);
7914 static int si_dpm_early_init(void *handle
)
7917 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
7919 si_dpm_set_dpm_funcs(adev
);
7920 si_dpm_set_irq_funcs(adev
);
7924 static inline bool si_are_power_levels_equal(const struct rv7xx_pl
*si_cpl1
,
7925 const struct rv7xx_pl
*si_cpl2
)
7927 return ((si_cpl1
->mclk
== si_cpl2
->mclk
) &&
7928 (si_cpl1
->sclk
== si_cpl2
->sclk
) &&
7929 (si_cpl1
->pcie_gen
== si_cpl2
->pcie_gen
) &&
7930 (si_cpl1
->vddc
== si_cpl2
->vddc
) &&
7931 (si_cpl1
->vddci
== si_cpl2
->vddci
));
7934 static int si_check_state_equal(struct amdgpu_device
*adev
,
7935 struct amdgpu_ps
*cps
,
7936 struct amdgpu_ps
*rps
,
7939 struct si_ps
*si_cps
;
7940 struct si_ps
*si_rps
;
7943 if (adev
== NULL
|| cps
== NULL
|| rps
== NULL
|| equal
== NULL
)
7946 si_cps
= si_get_ps(cps
);
7947 si_rps
= si_get_ps(rps
);
7949 if (si_cps
== NULL
) {
7950 printk("si_cps is NULL\n");
7955 if (si_cps
->performance_level_count
!= si_rps
->performance_level_count
) {
7960 for (i
= 0; i
< si_cps
->performance_level_count
; i
++) {
7961 if (!si_are_power_levels_equal(&(si_cps
->performance_levels
[i
]),
7962 &(si_rps
->performance_levels
[i
]))) {
7968 /* If all performance levels are the same try to use the UVD clocks to break the tie.*/
7969 *equal
= ((cps
->vclk
== rps
->vclk
) && (cps
->dclk
== rps
->dclk
));
7970 *equal
&= ((cps
->evclk
== rps
->evclk
) && (cps
->ecclk
== rps
->ecclk
));
7976 const struct amd_ip_funcs si_dpm_ip_funcs
= {
7978 .early_init
= si_dpm_early_init
,
7979 .late_init
= si_dpm_late_init
,
7980 .sw_init
= si_dpm_sw_init
,
7981 .sw_fini
= si_dpm_sw_fini
,
7982 .hw_init
= si_dpm_hw_init
,
7983 .hw_fini
= si_dpm_hw_fini
,
7984 .suspend
= si_dpm_suspend
,
7985 .resume
= si_dpm_resume
,
7986 .is_idle
= si_dpm_is_idle
,
7987 .wait_for_idle
= si_dpm_wait_for_idle
,
7988 .soft_reset
= si_dpm_soft_reset
,
7989 .set_clockgating_state
= si_dpm_set_clockgating_state
,
7990 .set_powergating_state
= si_dpm_set_powergating_state
,
7993 static const struct amdgpu_dpm_funcs si_dpm_funcs
= {
7994 .get_temperature
= &si_dpm_get_temp
,
7995 .pre_set_power_state
= &si_dpm_pre_set_power_state
,
7996 .set_power_state
= &si_dpm_set_power_state
,
7997 .post_set_power_state
= &si_dpm_post_set_power_state
,
7998 .display_configuration_changed
= &si_dpm_display_configuration_changed
,
7999 .get_sclk
= &si_dpm_get_sclk
,
8000 .get_mclk
= &si_dpm_get_mclk
,
8001 .print_power_state
= &si_dpm_print_power_state
,
8002 .debugfs_print_current_performance_level
= &si_dpm_debugfs_print_current_performance_level
,
8003 .force_performance_level
= &si_dpm_force_performance_level
,
8004 .vblank_too_short
= &si_dpm_vblank_too_short
,
8005 .set_fan_control_mode
= &si_dpm_set_fan_control_mode
,
8006 .get_fan_control_mode
= &si_dpm_get_fan_control_mode
,
8007 .set_fan_speed_percent
= &si_dpm_set_fan_speed_percent
,
8008 .get_fan_speed_percent
= &si_dpm_get_fan_speed_percent
,
8009 .check_state_equal
= &si_check_state_equal
,
8010 .get_vce_clock_state
= amdgpu_get_vce_clock_state
,
8013 static void si_dpm_set_dpm_funcs(struct amdgpu_device
*adev
)
8015 if (adev
->pm
.funcs
== NULL
)
8016 adev
->pm
.funcs
= &si_dpm_funcs
;
8019 static const struct amdgpu_irq_src_funcs si_dpm_irq_funcs
= {
8020 .set
= si_dpm_set_interrupt_state
,
8021 .process
= si_dpm_process_interrupt
,
8024 static void si_dpm_set_irq_funcs(struct amdgpu_device
*adev
)
8026 adev
->pm
.dpm
.thermal
.irq
.num_types
= AMDGPU_THERMAL_IRQ_LAST
;
8027 adev
->pm
.dpm
.thermal
.irq
.funcs
= &si_dpm_irq_funcs
;