]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - drivers/gpu/drm/msm/adreno/a5xx_power.c
70e65c94e52514e1be323313c36e536de26ef76a
[mirror_ubuntu-jammy-kernel.git] / drivers / gpu / drm / msm / adreno / a5xx_power.c
1 /* Copyright (c) 2016 The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14 #include <linux/pm_opp.h>
15 #include "a5xx_gpu.h"
16
17 /*
18 * The GPMU data block is a block of shared registers that can be used to
19 * communicate back and forth. These "registers" are by convention with the GPMU
20 * firwmare and not bound to any specific hardware design
21 */
22
23 #define AGC_INIT_BASE REG_A5XX_GPMU_DATA_RAM_BASE
24 #define AGC_INIT_MSG_MAGIC (AGC_INIT_BASE + 5)
25 #define AGC_MSG_BASE (AGC_INIT_BASE + 7)
26
27 #define AGC_MSG_STATE (AGC_MSG_BASE + 0)
28 #define AGC_MSG_COMMAND (AGC_MSG_BASE + 1)
29 #define AGC_MSG_PAYLOAD_SIZE (AGC_MSG_BASE + 3)
30 #define AGC_MSG_PAYLOAD(_o) ((AGC_MSG_BASE + 5) + (_o))
31
32 #define AGC_POWER_CONFIG_PRODUCTION_ID 1
33 #define AGC_INIT_MSG_VALUE 0xBABEFACE
34
35 static struct {
36 uint32_t reg;
37 uint32_t value;
38 } a5xx_sequence_regs[] = {
39 { 0xB9A1, 0x00010303 },
40 { 0xB9A2, 0x13000000 },
41 { 0xB9A3, 0x00460020 },
42 { 0xB9A4, 0x10000000 },
43 { 0xB9A5, 0x040A1707 },
44 { 0xB9A6, 0x00010000 },
45 { 0xB9A7, 0x0E000904 },
46 { 0xB9A8, 0x10000000 },
47 { 0xB9A9, 0x01165000 },
48 { 0xB9AA, 0x000E0002 },
49 { 0xB9AB, 0x03884141 },
50 { 0xB9AC, 0x10000840 },
51 { 0xB9AD, 0x572A5000 },
52 { 0xB9AE, 0x00000003 },
53 { 0xB9AF, 0x00000000 },
54 { 0xB9B0, 0x10000000 },
55 { 0xB828, 0x6C204010 },
56 { 0xB829, 0x6C204011 },
57 { 0xB82A, 0x6C204012 },
58 { 0xB82B, 0x6C204013 },
59 { 0xB82C, 0x6C204014 },
60 { 0xB90F, 0x00000004 },
61 { 0xB910, 0x00000002 },
62 { 0xB911, 0x00000002 },
63 { 0xB912, 0x00000002 },
64 { 0xB913, 0x00000002 },
65 { 0xB92F, 0x00000004 },
66 { 0xB930, 0x00000005 },
67 { 0xB931, 0x00000005 },
68 { 0xB932, 0x00000005 },
69 { 0xB933, 0x00000005 },
70 { 0xB96F, 0x00000001 },
71 { 0xB970, 0x00000003 },
72 { 0xB94F, 0x00000004 },
73 { 0xB950, 0x0000000B },
74 { 0xB951, 0x0000000B },
75 { 0xB952, 0x0000000B },
76 { 0xB953, 0x0000000B },
77 { 0xB907, 0x00000019 },
78 { 0xB927, 0x00000019 },
79 { 0xB947, 0x00000019 },
80 { 0xB967, 0x00000019 },
81 { 0xB987, 0x00000019 },
82 { 0xB906, 0x00220001 },
83 { 0xB926, 0x00220001 },
84 { 0xB946, 0x00220001 },
85 { 0xB966, 0x00220001 },
86 { 0xB986, 0x00300000 },
87 { 0xAC40, 0x0340FF41 },
88 { 0xAC41, 0x03BEFED0 },
89 { 0xAC42, 0x00331FED },
90 { 0xAC43, 0x021FFDD3 },
91 { 0xAC44, 0x5555AAAA },
92 { 0xAC45, 0x5555AAAA },
93 { 0xB9BA, 0x00000008 },
94 };
95
96 /*
97 * Get the actual voltage value for the operating point at the specified
98 * frequency
99 */
100 static inline uint32_t _get_mvolts(struct msm_gpu *gpu, uint32_t freq)
101 {
102 struct drm_device *dev = gpu->dev;
103 struct msm_drm_private *priv = dev->dev_private;
104 struct platform_device *pdev = priv->gpu_pdev;
105 struct dev_pm_opp *opp;
106 u32 ret = 0;
107
108 opp = dev_pm_opp_find_freq_exact(&pdev->dev, freq, true);
109
110 if (!IS_ERR(opp)) {
111 ret = dev_pm_opp_get_voltage(opp) / 1000;
112 dev_pm_opp_put(opp);
113 }
114
115 return ret;
116 }
117
118 /* Setup thermal limit management */
119 static void a5xx_lm_setup(struct msm_gpu *gpu)
120 {
121 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
122 struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
123 unsigned int i;
124
125 /* Write the block of sequence registers */
126 for (i = 0; i < ARRAY_SIZE(a5xx_sequence_regs); i++)
127 gpu_write(gpu, a5xx_sequence_regs[i].reg,
128 a5xx_sequence_regs[i].value);
129
130 /* Hard code the A530 GPU thermal sensor ID for the GPMU */
131 gpu_write(gpu, REG_A5XX_GPMU_TEMP_SENSOR_ID, 0x60007);
132 gpu_write(gpu, REG_A5XX_GPMU_DELTA_TEMP_THRESHOLD, 0x01);
133 gpu_write(gpu, REG_A5XX_GPMU_TEMP_SENSOR_CONFIG, 0x01);
134
135 /* Until we get clock scaling 0 is always the active power level */
136 gpu_write(gpu, REG_A5XX_GPMU_GPMU_VOLTAGE, 0x80000000 | 0);
137
138 gpu_write(gpu, REG_A5XX_GPMU_BASE_LEAKAGE, a5xx_gpu->lm_leakage);
139
140 /* The threshold is fixed at 6000 for A530 */
141 gpu_write(gpu, REG_A5XX_GPMU_GPMU_PWR_THRESHOLD, 0x80000000 | 6000);
142
143 gpu_write(gpu, REG_A5XX_GPMU_BEC_ENABLE, 0x10001FFF);
144 gpu_write(gpu, REG_A5XX_GDPM_CONFIG1, 0x00201FF1);
145
146 /* Write the voltage table */
147 gpu_write(gpu, REG_A5XX_GPMU_BEC_ENABLE, 0x10001FFF);
148 gpu_write(gpu, REG_A5XX_GDPM_CONFIG1, 0x201FF1);
149
150 gpu_write(gpu, AGC_MSG_STATE, 1);
151 gpu_write(gpu, AGC_MSG_COMMAND, AGC_POWER_CONFIG_PRODUCTION_ID);
152
153 /* Write the max power - hard coded to 5448 for A530 */
154 gpu_write(gpu, AGC_MSG_PAYLOAD(0), 5448);
155 gpu_write(gpu, AGC_MSG_PAYLOAD(1), 1);
156
157 /*
158 * For now just write the one voltage level - we will do more when we
159 * can do scaling
160 */
161 gpu_write(gpu, AGC_MSG_PAYLOAD(2), _get_mvolts(gpu, gpu->fast_rate));
162 gpu_write(gpu, AGC_MSG_PAYLOAD(3), gpu->fast_rate / 1000000);
163
164 gpu_write(gpu, AGC_MSG_PAYLOAD_SIZE, 4 * sizeof(uint32_t));
165 gpu_write(gpu, AGC_INIT_MSG_MAGIC, AGC_INIT_MSG_VALUE);
166 }
167
168 /* Enable SP/TP cpower collapse */
169 static void a5xx_pc_init(struct msm_gpu *gpu)
170 {
171 gpu_write(gpu, REG_A5XX_GPMU_PWR_COL_INTER_FRAME_CTRL, 0x7F);
172 gpu_write(gpu, REG_A5XX_GPMU_PWR_COL_BINNING_CTRL, 0);
173 gpu_write(gpu, REG_A5XX_GPMU_PWR_COL_INTER_FRAME_HYST, 0xA0080);
174 gpu_write(gpu, REG_A5XX_GPMU_PWR_COL_STAGGER_DELAY, 0x600040);
175 }
176
177 /* Enable the GPMU microcontroller */
178 static int a5xx_gpmu_init(struct msm_gpu *gpu)
179 {
180 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
181 struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
182 struct msm_ringbuffer *ring = gpu->rb[0];
183
184 if (!a5xx_gpu->gpmu_dwords)
185 return 0;
186
187 /* Turn off protected mode for this operation */
188 OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
189 OUT_RING(ring, 0);
190
191 /* Kick off the IB to load the GPMU microcode */
192 OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3);
193 OUT_RING(ring, lower_32_bits(a5xx_gpu->gpmu_iova));
194 OUT_RING(ring, upper_32_bits(a5xx_gpu->gpmu_iova));
195 OUT_RING(ring, a5xx_gpu->gpmu_dwords);
196
197 /* Turn back on protected mode */
198 OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
199 OUT_RING(ring, 1);
200
201 gpu->funcs->flush(gpu, ring);
202
203 if (!a5xx_idle(gpu, ring)) {
204 DRM_ERROR("%s: Unable to load GPMU firmware. GPMU will not be active\n",
205 gpu->name);
206 return -EINVAL;
207 }
208
209 gpu_write(gpu, REG_A5XX_GPMU_WFI_CONFIG, 0x4014);
210
211 /* Kick off the GPMU */
212 gpu_write(gpu, REG_A5XX_GPMU_CM3_SYSRESET, 0x0);
213
214 /*
215 * Wait for the GPMU to respond. It isn't fatal if it doesn't, we just
216 * won't have advanced power collapse.
217 */
218 if (spin_usecs(gpu, 25, REG_A5XX_GPMU_GENERAL_0, 0xFFFFFFFF,
219 0xBABEFACE))
220 DRM_ERROR("%s: GPMU firmware initialization timed out\n",
221 gpu->name);
222
223 return 0;
224 }
225
226 /* Enable limits management */
227 static void a5xx_lm_enable(struct msm_gpu *gpu)
228 {
229 gpu_write(gpu, REG_A5XX_GDPM_INT_MASK, 0x0);
230 gpu_write(gpu, REG_A5XX_GDPM_INT_EN, 0x0A);
231 gpu_write(gpu, REG_A5XX_GPMU_GPMU_VOLTAGE_INTR_EN_MASK, 0x01);
232 gpu_write(gpu, REG_A5XX_GPMU_TEMP_THRESHOLD_INTR_EN_MASK, 0x50000);
233 gpu_write(gpu, REG_A5XX_GPMU_THROTTLE_UNMASK_FORCE_CTRL, 0x30000);
234
235 gpu_write(gpu, REG_A5XX_GPMU_CLOCK_THROTTLE_CTRL, 0x011);
236 }
237
238 int a5xx_power_init(struct msm_gpu *gpu)
239 {
240 int ret;
241
242 /* Set up the limits management */
243 a5xx_lm_setup(gpu);
244
245 /* Set up SP/TP power collpase */
246 a5xx_pc_init(gpu);
247
248 /* Start the GPMU */
249 ret = a5xx_gpmu_init(gpu);
250 if (ret)
251 return ret;
252
253 /* Start the limits management */
254 a5xx_lm_enable(gpu);
255
256 return 0;
257 }
258
259 void a5xx_gpmu_ucode_init(struct msm_gpu *gpu)
260 {
261 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
262 struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
263 struct drm_device *drm = gpu->dev;
264 uint32_t dwords = 0, offset = 0, bosize;
265 unsigned int *data, *ptr, *cmds;
266 unsigned int cmds_size;
267
268 if (a5xx_gpu->gpmu_bo)
269 return;
270
271 data = (unsigned int *) adreno_gpu->fw[ADRENO_FW_GPMU]->data;
272
273 /*
274 * The first dword is the size of the remaining data in dwords. Use it
275 * as a checksum of sorts and make sure it matches the actual size of
276 * the firmware that we read
277 */
278
279 if (adreno_gpu->fw[ADRENO_FW_GPMU]->size < 8 ||
280 (data[0] < 2) || (data[0] >=
281 (adreno_gpu->fw[ADRENO_FW_GPMU]->size >> 2)))
282 return;
283
284 /* The second dword is an ID - look for 2 (GPMU_FIRMWARE_ID) */
285 if (data[1] != 2)
286 return;
287
288 cmds = data + data[2] + 3;
289 cmds_size = data[0] - data[2] - 2;
290
291 /*
292 * A single type4 opcode can only have so many values attached so
293 * add enough opcodes to load the all the commands
294 */
295 bosize = (cmds_size + (cmds_size / TYPE4_MAX_PAYLOAD) + 1) << 2;
296
297 ptr = msm_gem_kernel_new_locked(drm, bosize,
298 MSM_BO_UNCACHED | MSM_BO_GPU_READONLY, gpu->aspace,
299 &a5xx_gpu->gpmu_bo, &a5xx_gpu->gpmu_iova);
300 if (IS_ERR(ptr))
301 return;
302
303 msm_gem_object_set_name(a5xx_gpu->gpmu_bo, "gpmufw");
304
305 while (cmds_size > 0) {
306 int i;
307 uint32_t _size = cmds_size > TYPE4_MAX_PAYLOAD ?
308 TYPE4_MAX_PAYLOAD : cmds_size;
309
310 ptr[dwords++] = PKT4(REG_A5XX_GPMU_INST_RAM_BASE + offset,
311 _size);
312
313 for (i = 0; i < _size; i++)
314 ptr[dwords++] = *cmds++;
315
316 offset += _size;
317 cmds_size -= _size;
318 }
319
320 msm_gem_put_vaddr(a5xx_gpu->gpmu_bo);
321 a5xx_gpu->gpmu_dwords = dwords;
322 }