]>
Commit | Line | Data |
---|---|---|
2002c9c3 | 1 | /* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved. |
b5f103ab JC |
2 | * |
3 | * This program is free software; you can redistribute it and/or modify | |
4 | * it under the terms of the GNU General Public License version 2 and | |
5 | * only version 2 as published by the Free Software Foundation. | |
6 | * | |
7 | * This program is distributed in the hope that it will be useful, | |
8 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
9 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
10 | * GNU General Public License for more details. | |
11 | * | |
12 | */ | |
13 | ||
7c65817e JC |
14 | #include <linux/types.h> |
15 | #include <linux/cpumask.h> | |
16 | #include <linux/qcom_scm.h> | |
17 | #include <linux/dma-mapping.h> | |
18 | #include <linux/of_reserved_mem.h> | |
19 | #include <linux/soc/qcom/mdt_loader.h> | |
b5f103ab | 20 | #include "msm_gem.h" |
7f8036b7 | 21 | #include "msm_mmu.h" |
b5f103ab JC |
22 | #include "a5xx_gpu.h" |
23 | ||
24 | extern bool hang_debug; | |
25 | static void a5xx_dump(struct msm_gpu *gpu); | |
26 | ||
7c65817e JC |
27 | #define GPU_PAS_ID 13 |
28 | ||
29 | #if IS_ENABLED(CONFIG_QCOM_MDT_LOADER) | |
30 | ||
31 | static int zap_shader_load_mdt(struct device *dev, const char *fwname) | |
32 | { | |
33 | const struct firmware *fw; | |
34 | phys_addr_t mem_phys; | |
35 | ssize_t mem_size; | |
36 | void *mem_region = NULL; | |
37 | int ret; | |
38 | ||
39 | /* Request the MDT file for the firmware */ | |
40 | ret = request_firmware(&fw, fwname, dev); | |
41 | if (ret) { | |
42 | DRM_DEV_ERROR(dev, "Unable to load %s\n", fwname); | |
43 | return ret; | |
44 | } | |
45 | ||
46 | /* Figure out how much memory we need */ | |
47 | mem_size = qcom_mdt_get_size(fw); | |
48 | if (mem_size < 0) { | |
49 | ret = mem_size; | |
50 | goto out; | |
51 | } | |
52 | ||
53 | /* Allocate memory for the firmware image */ | |
54 | mem_region = dmam_alloc_coherent(dev, mem_size, &mem_phys, GFP_KERNEL); | |
55 | if (!mem_region) { | |
56 | ret = -ENOMEM; | |
57 | goto out; | |
58 | } | |
59 | ||
60 | /* Load the rest of the MDT */ | |
61 | ret = qcom_mdt_load(dev, fw, fwname, GPU_PAS_ID, mem_region, mem_phys, | |
62 | mem_size); | |
63 | if (ret) | |
64 | goto out; | |
65 | ||
66 | /* Send the image to the secure world */ | |
67 | ret = qcom_scm_pas_auth_and_reset(GPU_PAS_ID); | |
68 | if (ret) | |
69 | DRM_DEV_ERROR(dev, "Unable to authorize the image\n"); | |
70 | ||
71 | out: | |
72 | release_firmware(fw); | |
73 | ||
74 | return ret; | |
75 | } | |
76 | #else | |
77 | static int zap_shader_load_mdt(struct device *dev, const char *fwname) | |
78 | { | |
79 | return -ENODEV; | |
80 | } | |
81 | #endif | |
82 | ||
b5f103ab JC |
83 | static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, |
84 | struct msm_file_private *ctx) | |
85 | { | |
86 | struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); | |
87 | struct msm_drm_private *priv = gpu->dev->dev_private; | |
88 | struct msm_ringbuffer *ring = gpu->rb; | |
89 | unsigned int i, ibs = 0; | |
90 | ||
91 | for (i = 0; i < submit->nr_cmds; i++) { | |
92 | switch (submit->cmd[i].type) { | |
93 | case MSM_SUBMIT_CMD_IB_TARGET_BUF: | |
94 | break; | |
95 | case MSM_SUBMIT_CMD_CTX_RESTORE_BUF: | |
96 | if (priv->lastctx == ctx) | |
97 | break; | |
98 | case MSM_SUBMIT_CMD_BUF: | |
99 | OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3); | |
100 | OUT_RING(ring, lower_32_bits(submit->cmd[i].iova)); | |
101 | OUT_RING(ring, upper_32_bits(submit->cmd[i].iova)); | |
102 | OUT_RING(ring, submit->cmd[i].size); | |
103 | ibs++; | |
104 | break; | |
105 | } | |
106 | } | |
107 | ||
108 | OUT_PKT4(ring, REG_A5XX_CP_SCRATCH_REG(2), 1); | |
109 | OUT_RING(ring, submit->fence->seqno); | |
110 | ||
111 | OUT_PKT7(ring, CP_EVENT_WRITE, 4); | |
112 | OUT_RING(ring, CACHE_FLUSH_TS | (1 << 31)); | |
113 | OUT_RING(ring, lower_32_bits(rbmemptr(adreno_gpu, fence))); | |
114 | OUT_RING(ring, upper_32_bits(rbmemptr(adreno_gpu, fence))); | |
115 | OUT_RING(ring, submit->fence->seqno); | |
116 | ||
117 | gpu->funcs->flush(gpu); | |
118 | } | |
119 | ||
120 | struct a5xx_hwcg { | |
121 | u32 offset; | |
122 | u32 value; | |
123 | }; | |
124 | ||
125 | static const struct a5xx_hwcg a530_hwcg[] = { | |
126 | {REG_A5XX_RBBM_CLOCK_CNTL_SP0, 0x02222222}, | |
127 | {REG_A5XX_RBBM_CLOCK_CNTL_SP1, 0x02222222}, | |
128 | {REG_A5XX_RBBM_CLOCK_CNTL_SP2, 0x02222222}, | |
129 | {REG_A5XX_RBBM_CLOCK_CNTL_SP3, 0x02222222}, | |
130 | {REG_A5XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220}, | |
131 | {REG_A5XX_RBBM_CLOCK_CNTL2_SP1, 0x02222220}, | |
132 | {REG_A5XX_RBBM_CLOCK_CNTL2_SP2, 0x02222220}, | |
133 | {REG_A5XX_RBBM_CLOCK_CNTL2_SP3, 0x02222220}, | |
134 | {REG_A5XX_RBBM_CLOCK_HYST_SP0, 0x0000F3CF}, | |
135 | {REG_A5XX_RBBM_CLOCK_HYST_SP1, 0x0000F3CF}, | |
136 | {REG_A5XX_RBBM_CLOCK_HYST_SP2, 0x0000F3CF}, | |
137 | {REG_A5XX_RBBM_CLOCK_HYST_SP3, 0x0000F3CF}, | |
138 | {REG_A5XX_RBBM_CLOCK_DELAY_SP0, 0x00000080}, | |
139 | {REG_A5XX_RBBM_CLOCK_DELAY_SP1, 0x00000080}, | |
140 | {REG_A5XX_RBBM_CLOCK_DELAY_SP2, 0x00000080}, | |
141 | {REG_A5XX_RBBM_CLOCK_DELAY_SP3, 0x00000080}, | |
142 | {REG_A5XX_RBBM_CLOCK_CNTL_TP0, 0x22222222}, | |
143 | {REG_A5XX_RBBM_CLOCK_CNTL_TP1, 0x22222222}, | |
144 | {REG_A5XX_RBBM_CLOCK_CNTL_TP2, 0x22222222}, | |
145 | {REG_A5XX_RBBM_CLOCK_CNTL_TP3, 0x22222222}, | |
146 | {REG_A5XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222}, | |
147 | {REG_A5XX_RBBM_CLOCK_CNTL2_TP1, 0x22222222}, | |
148 | {REG_A5XX_RBBM_CLOCK_CNTL2_TP2, 0x22222222}, | |
149 | {REG_A5XX_RBBM_CLOCK_CNTL2_TP3, 0x22222222}, | |
150 | {REG_A5XX_RBBM_CLOCK_CNTL3_TP0, 0x00002222}, | |
151 | {REG_A5XX_RBBM_CLOCK_CNTL3_TP1, 0x00002222}, | |
152 | {REG_A5XX_RBBM_CLOCK_CNTL3_TP2, 0x00002222}, | |
153 | {REG_A5XX_RBBM_CLOCK_CNTL3_TP3, 0x00002222}, | |
154 | {REG_A5XX_RBBM_CLOCK_HYST_TP0, 0x77777777}, | |
155 | {REG_A5XX_RBBM_CLOCK_HYST_TP1, 0x77777777}, | |
156 | {REG_A5XX_RBBM_CLOCK_HYST_TP2, 0x77777777}, | |
157 | {REG_A5XX_RBBM_CLOCK_HYST_TP3, 0x77777777}, | |
158 | {REG_A5XX_RBBM_CLOCK_HYST2_TP0, 0x77777777}, | |
159 | {REG_A5XX_RBBM_CLOCK_HYST2_TP1, 0x77777777}, | |
160 | {REG_A5XX_RBBM_CLOCK_HYST2_TP2, 0x77777777}, | |
161 | {REG_A5XX_RBBM_CLOCK_HYST2_TP3, 0x77777777}, | |
162 | {REG_A5XX_RBBM_CLOCK_HYST3_TP0, 0x00007777}, | |
163 | {REG_A5XX_RBBM_CLOCK_HYST3_TP1, 0x00007777}, | |
164 | {REG_A5XX_RBBM_CLOCK_HYST3_TP2, 0x00007777}, | |
165 | {REG_A5XX_RBBM_CLOCK_HYST3_TP3, 0x00007777}, | |
166 | {REG_A5XX_RBBM_CLOCK_DELAY_TP0, 0x11111111}, | |
167 | {REG_A5XX_RBBM_CLOCK_DELAY_TP1, 0x11111111}, | |
168 | {REG_A5XX_RBBM_CLOCK_DELAY_TP2, 0x11111111}, | |
169 | {REG_A5XX_RBBM_CLOCK_DELAY_TP3, 0x11111111}, | |
170 | {REG_A5XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111}, | |
171 | {REG_A5XX_RBBM_CLOCK_DELAY2_TP1, 0x11111111}, | |
172 | {REG_A5XX_RBBM_CLOCK_DELAY2_TP2, 0x11111111}, | |
173 | {REG_A5XX_RBBM_CLOCK_DELAY2_TP3, 0x11111111}, | |
174 | {REG_A5XX_RBBM_CLOCK_DELAY3_TP0, 0x00001111}, | |
175 | {REG_A5XX_RBBM_CLOCK_DELAY3_TP1, 0x00001111}, | |
176 | {REG_A5XX_RBBM_CLOCK_DELAY3_TP2, 0x00001111}, | |
177 | {REG_A5XX_RBBM_CLOCK_DELAY3_TP3, 0x00001111}, | |
178 | {REG_A5XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222}, | |
179 | {REG_A5XX_RBBM_CLOCK_CNTL2_UCHE, 0x22222222}, | |
180 | {REG_A5XX_RBBM_CLOCK_CNTL3_UCHE, 0x22222222}, | |
181 | {REG_A5XX_RBBM_CLOCK_CNTL4_UCHE, 0x00222222}, | |
182 | {REG_A5XX_RBBM_CLOCK_HYST_UCHE, 0x00444444}, | |
183 | {REG_A5XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002}, | |
184 | {REG_A5XX_RBBM_CLOCK_CNTL_RB0, 0x22222222}, | |
185 | {REG_A5XX_RBBM_CLOCK_CNTL_RB1, 0x22222222}, | |
186 | {REG_A5XX_RBBM_CLOCK_CNTL_RB2, 0x22222222}, | |
187 | {REG_A5XX_RBBM_CLOCK_CNTL_RB3, 0x22222222}, | |
188 | {REG_A5XX_RBBM_CLOCK_CNTL2_RB0, 0x00222222}, | |
189 | {REG_A5XX_RBBM_CLOCK_CNTL2_RB1, 0x00222222}, | |
190 | {REG_A5XX_RBBM_CLOCK_CNTL2_RB2, 0x00222222}, | |
191 | {REG_A5XX_RBBM_CLOCK_CNTL2_RB3, 0x00222222}, | |
192 | {REG_A5XX_RBBM_CLOCK_CNTL_CCU0, 0x00022220}, | |
193 | {REG_A5XX_RBBM_CLOCK_CNTL_CCU1, 0x00022220}, | |
194 | {REG_A5XX_RBBM_CLOCK_CNTL_CCU2, 0x00022220}, | |
195 | {REG_A5XX_RBBM_CLOCK_CNTL_CCU3, 0x00022220}, | |
196 | {REG_A5XX_RBBM_CLOCK_CNTL_RAC, 0x05522222}, | |
197 | {REG_A5XX_RBBM_CLOCK_CNTL2_RAC, 0x00505555}, | |
198 | {REG_A5XX_RBBM_CLOCK_HYST_RB_CCU0, 0x04040404}, | |
199 | {REG_A5XX_RBBM_CLOCK_HYST_RB_CCU1, 0x04040404}, | |
200 | {REG_A5XX_RBBM_CLOCK_HYST_RB_CCU2, 0x04040404}, | |
201 | {REG_A5XX_RBBM_CLOCK_HYST_RB_CCU3, 0x04040404}, | |
202 | {REG_A5XX_RBBM_CLOCK_HYST_RAC, 0x07444044}, | |
203 | {REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_0, 0x00000002}, | |
204 | {REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_1, 0x00000002}, | |
205 | {REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_2, 0x00000002}, | |
206 | {REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_3, 0x00000002}, | |
207 | {REG_A5XX_RBBM_CLOCK_DELAY_RAC, 0x00010011}, | |
208 | {REG_A5XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222}, | |
209 | {REG_A5XX_RBBM_CLOCK_MODE_GPC, 0x02222222}, | |
210 | {REG_A5XX_RBBM_CLOCK_MODE_VFD, 0x00002222}, | |
211 | {REG_A5XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000}, | |
212 | {REG_A5XX_RBBM_CLOCK_HYST_GPC, 0x04104004}, | |
213 | {REG_A5XX_RBBM_CLOCK_HYST_VFD, 0x00000000}, | |
214 | {REG_A5XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000}, | |
215 | {REG_A5XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000}, | |
216 | {REG_A5XX_RBBM_CLOCK_DELAY_GPC, 0x00000200}, | |
217 | {REG_A5XX_RBBM_CLOCK_DELAY_VFD, 0x00002222} | |
218 | }; | |
219 | ||
220 | static const struct { | |
221 | int (*test)(struct adreno_gpu *gpu); | |
222 | const struct a5xx_hwcg *regs; | |
223 | unsigned int count; | |
224 | } a5xx_hwcg_regs[] = { | |
225 | { adreno_is_a530, a530_hwcg, ARRAY_SIZE(a530_hwcg), }, | |
226 | }; | |
227 | ||
228 | static void _a5xx_enable_hwcg(struct msm_gpu *gpu, | |
229 | const struct a5xx_hwcg *regs, unsigned int count) | |
230 | { | |
231 | unsigned int i; | |
232 | ||
233 | for (i = 0; i < count; i++) | |
234 | gpu_write(gpu, regs[i].offset, regs[i].value); | |
235 | ||
236 | gpu_write(gpu, REG_A5XX_RBBM_CLOCK_CNTL, 0xAAA8AA00); | |
237 | gpu_write(gpu, REG_A5XX_RBBM_ISDB_CNT, 0x182); | |
238 | } | |
239 | ||
240 | static void a5xx_enable_hwcg(struct msm_gpu *gpu) | |
241 | { | |
242 | struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); | |
243 | unsigned int i; | |
244 | ||
245 | for (i = 0; i < ARRAY_SIZE(a5xx_hwcg_regs); i++) { | |
246 | if (a5xx_hwcg_regs[i].test(adreno_gpu)) { | |
247 | _a5xx_enable_hwcg(gpu, a5xx_hwcg_regs[i].regs, | |
248 | a5xx_hwcg_regs[i].count); | |
249 | return; | |
250 | } | |
251 | } | |
252 | } | |
253 | ||
254 | static int a5xx_me_init(struct msm_gpu *gpu) | |
255 | { | |
256 | struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); | |
257 | struct msm_ringbuffer *ring = gpu->rb; | |
258 | ||
259 | OUT_PKT7(ring, CP_ME_INIT, 8); | |
260 | ||
261 | OUT_RING(ring, 0x0000002F); | |
262 | ||
263 | /* Enable multiple hardware contexts */ | |
264 | OUT_RING(ring, 0x00000003); | |
265 | ||
266 | /* Enable error detection */ | |
267 | OUT_RING(ring, 0x20000000); | |
268 | ||
269 | /* Don't enable header dump */ | |
270 | OUT_RING(ring, 0x00000000); | |
271 | OUT_RING(ring, 0x00000000); | |
272 | ||
273 | /* Specify workarounds for various microcode issues */ | |
274 | if (adreno_is_a530(adreno_gpu)) { | |
275 | /* Workaround for token end syncs | |
276 | * Force a WFI after every direct-render 3D mode draw and every | |
277 | * 2D mode 3 draw | |
278 | */ | |
279 | OUT_RING(ring, 0x0000000B); | |
280 | } else { | |
281 | /* No workarounds enabled */ | |
282 | OUT_RING(ring, 0x00000000); | |
283 | } | |
284 | ||
285 | OUT_RING(ring, 0x00000000); | |
286 | OUT_RING(ring, 0x00000000); | |
287 | ||
288 | gpu->funcs->flush(gpu); | |
289 | ||
e895c7bd | 290 | return a5xx_idle(gpu) ? 0 : -EINVAL; |
b5f103ab JC |
291 | } |
292 | ||
293 | static struct drm_gem_object *a5xx_ucode_load_bo(struct msm_gpu *gpu, | |
294 | const struct firmware *fw, u64 *iova) | |
295 | { | |
296 | struct drm_device *drm = gpu->dev; | |
297 | struct drm_gem_object *bo; | |
298 | void *ptr; | |
299 | ||
300 | mutex_lock(&drm->struct_mutex); | |
301 | bo = msm_gem_new(drm, fw->size - 4, MSM_BO_UNCACHED); | |
302 | mutex_unlock(&drm->struct_mutex); | |
303 | ||
304 | if (IS_ERR(bo)) | |
305 | return bo; | |
306 | ||
307 | ptr = msm_gem_get_vaddr(bo); | |
308 | if (!ptr) { | |
309 | drm_gem_object_unreference_unlocked(bo); | |
310 | return ERR_PTR(-ENOMEM); | |
311 | } | |
312 | ||
313 | if (iova) { | |
314 | int ret = msm_gem_get_iova(bo, gpu->id, iova); | |
315 | ||
316 | if (ret) { | |
317 | drm_gem_object_unreference_unlocked(bo); | |
318 | return ERR_PTR(ret); | |
319 | } | |
320 | } | |
321 | ||
322 | memcpy(ptr, &fw->data[4], fw->size - 4); | |
323 | ||
324 | msm_gem_put_vaddr(bo); | |
325 | return bo; | |
326 | } | |
327 | ||
328 | static int a5xx_ucode_init(struct msm_gpu *gpu) | |
329 | { | |
330 | struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); | |
331 | struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu); | |
332 | int ret; | |
333 | ||
334 | if (!a5xx_gpu->pm4_bo) { | |
335 | a5xx_gpu->pm4_bo = a5xx_ucode_load_bo(gpu, adreno_gpu->pm4, | |
336 | &a5xx_gpu->pm4_iova); | |
337 | ||
338 | if (IS_ERR(a5xx_gpu->pm4_bo)) { | |
339 | ret = PTR_ERR(a5xx_gpu->pm4_bo); | |
340 | a5xx_gpu->pm4_bo = NULL; | |
341 | dev_err(gpu->dev->dev, "could not allocate PM4: %d\n", | |
342 | ret); | |
343 | return ret; | |
344 | } | |
345 | } | |
346 | ||
347 | if (!a5xx_gpu->pfp_bo) { | |
348 | a5xx_gpu->pfp_bo = a5xx_ucode_load_bo(gpu, adreno_gpu->pfp, | |
349 | &a5xx_gpu->pfp_iova); | |
350 | ||
351 | if (IS_ERR(a5xx_gpu->pfp_bo)) { | |
352 | ret = PTR_ERR(a5xx_gpu->pfp_bo); | |
353 | a5xx_gpu->pfp_bo = NULL; | |
354 | dev_err(gpu->dev->dev, "could not allocate PFP: %d\n", | |
355 | ret); | |
356 | return ret; | |
357 | } | |
358 | } | |
359 | ||
360 | gpu_write64(gpu, REG_A5XX_CP_ME_INSTR_BASE_LO, | |
361 | REG_A5XX_CP_ME_INSTR_BASE_HI, a5xx_gpu->pm4_iova); | |
362 | ||
363 | gpu_write64(gpu, REG_A5XX_CP_PFP_INSTR_BASE_LO, | |
364 | REG_A5XX_CP_PFP_INSTR_BASE_HI, a5xx_gpu->pfp_iova); | |
365 | ||
366 | return 0; | |
367 | } | |
368 | ||
7c65817e JC |
369 | #define SCM_GPU_ZAP_SHADER_RESUME 0 |
370 | ||
371 | static int a5xx_zap_shader_resume(struct msm_gpu *gpu) | |
372 | { | |
373 | int ret; | |
374 | ||
375 | ret = qcom_scm_set_remote_state(SCM_GPU_ZAP_SHADER_RESUME, GPU_PAS_ID); | |
376 | if (ret) | |
377 | DRM_ERROR("%s: zap-shader resume failed: %d\n", | |
378 | gpu->name, ret); | |
379 | ||
380 | return ret; | |
381 | } | |
382 | ||
383 | /* Set up a child device to "own" the zap shader */ | |
384 | static int a5xx_zap_shader_dev_init(struct device *parent, struct device *dev) | |
385 | { | |
386 | struct device_node *node; | |
387 | int ret; | |
388 | ||
389 | if (dev->parent) | |
390 | return 0; | |
391 | ||
392 | /* Find the sub-node for the zap shader */ | |
393 | node = of_get_child_by_name(parent->of_node, "zap-shader"); | |
394 | if (!node) { | |
395 | DRM_DEV_ERROR(parent, "zap-shader not found in device tree\n"); | |
396 | return -ENODEV; | |
397 | } | |
398 | ||
399 | dev->parent = parent; | |
400 | dev->of_node = node; | |
401 | dev_set_name(dev, "adreno_zap_shader"); | |
402 | ||
403 | ret = device_register(dev); | |
404 | if (ret) { | |
405 | DRM_DEV_ERROR(parent, "Couldn't register zap shader device\n"); | |
406 | goto out; | |
407 | } | |
408 | ||
409 | ret = of_reserved_mem_device_init(dev); | |
410 | if (ret) { | |
411 | DRM_DEV_ERROR(parent, "Unable to set up the reserved memory\n"); | |
412 | device_unregister(dev); | |
413 | } | |
414 | ||
415 | out: | |
416 | if (ret) | |
417 | dev->parent = NULL; | |
418 | ||
419 | return ret; | |
420 | } | |
421 | ||
422 | static int a5xx_zap_shader_init(struct msm_gpu *gpu) | |
423 | { | |
424 | static bool loaded; | |
425 | struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); | |
426 | struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu); | |
427 | struct platform_device *pdev = a5xx_gpu->pdev; | |
428 | int ret; | |
429 | ||
430 | /* | |
431 | * If the zap shader is already loaded into memory we just need to kick | |
432 | * the remote processor to reinitialize it | |
433 | */ | |
434 | if (loaded) | |
435 | return a5xx_zap_shader_resume(gpu); | |
436 | ||
437 | /* We need SCM to be able to load the firmware */ | |
438 | if (!qcom_scm_is_available()) { | |
439 | DRM_DEV_ERROR(&pdev->dev, "SCM is not available\n"); | |
440 | return -EPROBE_DEFER; | |
441 | } | |
442 | ||
443 | /* Each GPU has a target specific zap shader firmware name to use */ | |
444 | if (!adreno_gpu->info->zapfw) { | |
445 | DRM_DEV_ERROR(&pdev->dev, | |
446 | "Zap shader firmware file not specified for this target\n"); | |
447 | return -ENODEV; | |
448 | } | |
449 | ||
450 | ret = a5xx_zap_shader_dev_init(&pdev->dev, &a5xx_gpu->zap_dev); | |
451 | ||
452 | if (!ret) | |
453 | ret = zap_shader_load_mdt(&a5xx_gpu->zap_dev, | |
454 | adreno_gpu->info->zapfw); | |
455 | ||
456 | loaded = !ret; | |
457 | ||
458 | return ret; | |
459 | } | |
460 | ||
b5f103ab JC |
461 | #define A5XX_INT_MASK (A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR | \ |
462 | A5XX_RBBM_INT_0_MASK_RBBM_TRANSFER_TIMEOUT | \ | |
463 | A5XX_RBBM_INT_0_MASK_RBBM_ME_MS_TIMEOUT | \ | |
464 | A5XX_RBBM_INT_0_MASK_RBBM_PFP_MS_TIMEOUT | \ | |
465 | A5XX_RBBM_INT_0_MASK_RBBM_ETS_MS_TIMEOUT | \ | |
466 | A5XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNC_OVERFLOW | \ | |
467 | A5XX_RBBM_INT_0_MASK_CP_HW_ERROR | \ | |
468 | A5XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS | \ | |
469 | A5XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS | \ | |
470 | A5XX_RBBM_INT_0_MASK_GPMU_VOLTAGE_DROOP) | |
471 | ||
472 | static int a5xx_hw_init(struct msm_gpu *gpu) | |
473 | { | |
474 | struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); | |
475 | int ret; | |
476 | ||
477 | gpu_write(gpu, REG_A5XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x00000003); | |
478 | ||
479 | /* Make all blocks contribute to the GPU BUSY perf counter */ | |
480 | gpu_write(gpu, REG_A5XX_RBBM_PERFCTR_GPU_BUSY_MASKED, 0xFFFFFFFF); | |
481 | ||
482 | /* Enable RBBM error reporting bits */ | |
483 | gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL0, 0x00000001); | |
484 | ||
4e09b95d | 485 | if (adreno_gpu->info->quirks & ADRENO_QUIRK_FAULT_DETECT_MASK) { |
b5f103ab JC |
486 | /* |
487 | * Mask out the activity signals from RB1-3 to avoid false | |
488 | * positives | |
489 | */ | |
490 | ||
491 | gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL11, | |
492 | 0xF0000000); | |
493 | gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL12, | |
494 | 0xFFFFFFFF); | |
495 | gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL13, | |
496 | 0xFFFFFFFF); | |
497 | gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL14, | |
498 | 0xFFFFFFFF); | |
499 | gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL15, | |
500 | 0xFFFFFFFF); | |
501 | gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL16, | |
502 | 0xFFFFFFFF); | |
503 | gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL17, | |
504 | 0xFFFFFFFF); | |
505 | gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL18, | |
506 | 0xFFFFFFFF); | |
507 | } | |
508 | ||
509 | /* Enable fault detection */ | |
510 | gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_INT_CNTL, | |
511 | (1 << 30) | 0xFFFF); | |
512 | ||
513 | /* Turn on performance counters */ | |
514 | gpu_write(gpu, REG_A5XX_RBBM_PERFCTR_CNTL, 0x01); | |
515 | ||
516 | /* Increase VFD cache access so LRZ and other data gets evicted less */ | |
517 | gpu_write(gpu, REG_A5XX_UCHE_CACHE_WAYS, 0x02); | |
518 | ||
519 | /* Disable L2 bypass in the UCHE */ | |
520 | gpu_write(gpu, REG_A5XX_UCHE_TRAP_BASE_LO, 0xFFFF0000); | |
521 | gpu_write(gpu, REG_A5XX_UCHE_TRAP_BASE_HI, 0x0001FFFF); | |
522 | gpu_write(gpu, REG_A5XX_UCHE_WRITE_THRU_BASE_LO, 0xFFFF0000); | |
523 | gpu_write(gpu, REG_A5XX_UCHE_WRITE_THRU_BASE_HI, 0x0001FFFF); | |
524 | ||
525 | /* Set the GMEM VA range (0 to gpu->gmem) */ | |
526 | gpu_write(gpu, REG_A5XX_UCHE_GMEM_RANGE_MIN_LO, 0x00100000); | |
527 | gpu_write(gpu, REG_A5XX_UCHE_GMEM_RANGE_MIN_HI, 0x00000000); | |
528 | gpu_write(gpu, REG_A5XX_UCHE_GMEM_RANGE_MAX_LO, | |
529 | 0x00100000 + adreno_gpu->gmem - 1); | |
530 | gpu_write(gpu, REG_A5XX_UCHE_GMEM_RANGE_MAX_HI, 0x00000000); | |
531 | ||
532 | gpu_write(gpu, REG_A5XX_CP_MEQ_THRESHOLDS, 0x40); | |
533 | gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x40); | |
534 | gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_2, 0x80000060); | |
535 | gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_1, 0x40201B16); | |
536 | ||
537 | gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL, (0x400 << 11 | 0x300 << 22)); | |
538 | ||
4e09b95d | 539 | if (adreno_gpu->info->quirks & ADRENO_QUIRK_TWO_PASS_USE_WFI) |
b5f103ab JC |
540 | gpu_rmw(gpu, REG_A5XX_PC_DBG_ECO_CNTL, 0, (1 << 8)); |
541 | ||
542 | gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL, 0xc0200100); | |
543 | ||
544 | /* Enable USE_RETENTION_FLOPS */ | |
545 | gpu_write(gpu, REG_A5XX_CP_CHICKEN_DBG, 0x02000000); | |
546 | ||
547 | /* Enable ME/PFP split notification */ | |
548 | gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL1, 0xA6FFFFFF); | |
549 | ||
550 | /* Enable HWCG */ | |
551 | a5xx_enable_hwcg(gpu); | |
552 | ||
553 | gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL2, 0x0000003F); | |
554 | ||
555 | /* Set the highest bank bit */ | |
556 | gpu_write(gpu, REG_A5XX_TPL1_MODE_CNTL, 2 << 7); | |
557 | gpu_write(gpu, REG_A5XX_RB_MODE_CNTL, 2 << 1); | |
558 | ||
559 | /* Protect registers from the CP */ | |
560 | gpu_write(gpu, REG_A5XX_CP_PROTECT_CNTL, 0x00000007); | |
561 | ||
562 | /* RBBM */ | |
563 | gpu_write(gpu, REG_A5XX_CP_PROTECT(0), ADRENO_PROTECT_RW(0x04, 4)); | |
564 | gpu_write(gpu, REG_A5XX_CP_PROTECT(1), ADRENO_PROTECT_RW(0x08, 8)); | |
565 | gpu_write(gpu, REG_A5XX_CP_PROTECT(2), ADRENO_PROTECT_RW(0x10, 16)); | |
566 | gpu_write(gpu, REG_A5XX_CP_PROTECT(3), ADRENO_PROTECT_RW(0x20, 32)); | |
567 | gpu_write(gpu, REG_A5XX_CP_PROTECT(4), ADRENO_PROTECT_RW(0x40, 64)); | |
568 | gpu_write(gpu, REG_A5XX_CP_PROTECT(5), ADRENO_PROTECT_RW(0x80, 64)); | |
569 | ||
570 | /* Content protect */ | |
571 | gpu_write(gpu, REG_A5XX_CP_PROTECT(6), | |
572 | ADRENO_PROTECT_RW(REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO, | |
573 | 16)); | |
574 | gpu_write(gpu, REG_A5XX_CP_PROTECT(7), | |
575 | ADRENO_PROTECT_RW(REG_A5XX_RBBM_SECVID_TRUST_CNTL, 2)); | |
576 | ||
577 | /* CP */ | |
578 | gpu_write(gpu, REG_A5XX_CP_PROTECT(8), ADRENO_PROTECT_RW(0x800, 64)); | |
579 | gpu_write(gpu, REG_A5XX_CP_PROTECT(9), ADRENO_PROTECT_RW(0x840, 8)); | |
580 | gpu_write(gpu, REG_A5XX_CP_PROTECT(10), ADRENO_PROTECT_RW(0x880, 32)); | |
581 | gpu_write(gpu, REG_A5XX_CP_PROTECT(11), ADRENO_PROTECT_RW(0xAA0, 1)); | |
582 | ||
583 | /* RB */ | |
584 | gpu_write(gpu, REG_A5XX_CP_PROTECT(12), ADRENO_PROTECT_RW(0xCC0, 1)); | |
585 | gpu_write(gpu, REG_A5XX_CP_PROTECT(13), ADRENO_PROTECT_RW(0xCF0, 2)); | |
586 | ||
587 | /* VPC */ | |
588 | gpu_write(gpu, REG_A5XX_CP_PROTECT(14), ADRENO_PROTECT_RW(0xE68, 8)); | |
589 | gpu_write(gpu, REG_A5XX_CP_PROTECT(15), ADRENO_PROTECT_RW(0xE70, 4)); | |
590 | ||
591 | /* UCHE */ | |
592 | gpu_write(gpu, REG_A5XX_CP_PROTECT(16), ADRENO_PROTECT_RW(0xE80, 16)); | |
593 | ||
594 | if (adreno_is_a530(adreno_gpu)) | |
595 | gpu_write(gpu, REG_A5XX_CP_PROTECT(17), | |
596 | ADRENO_PROTECT_RW(0x10000, 0x8000)); | |
597 | ||
598 | gpu_write(gpu, REG_A5XX_RBBM_SECVID_TSB_CNTL, 0); | |
599 | /* | |
600 | * Disable the trusted memory range - we don't actually supported secure | |
601 | * memory rendering at this point in time and we don't want to block off | |
602 | * part of the virtual memory space. | |
603 | */ | |
604 | gpu_write64(gpu, REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO, | |
605 | REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_HI, 0x00000000); | |
606 | gpu_write(gpu, REG_A5XX_RBBM_SECVID_TSB_TRUSTED_SIZE, 0x00000000); | |
607 | ||
2401a008 JC |
608 | /* Load the GPMU firmware before starting the HW init */ |
609 | a5xx_gpmu_ucode_init(gpu); | |
610 | ||
b5f103ab JC |
611 | ret = adreno_hw_init(gpu); |
612 | if (ret) | |
613 | return ret; | |
614 | ||
615 | ret = a5xx_ucode_init(gpu); | |
616 | if (ret) | |
617 | return ret; | |
618 | ||
619 | /* Disable the interrupts through the initial bringup stage */ | |
620 | gpu_write(gpu, REG_A5XX_RBBM_INT_0_MASK, A5XX_INT_MASK); | |
621 | ||
622 | /* Clear ME_HALT to start the micro engine */ | |
623 | gpu_write(gpu, REG_A5XX_CP_PFP_ME_CNTL, 0); | |
624 | ret = a5xx_me_init(gpu); | |
625 | if (ret) | |
626 | return ret; | |
627 | ||
2401a008 JC |
628 | ret = a5xx_power_init(gpu); |
629 | if (ret) | |
630 | return ret; | |
b5f103ab JC |
631 | |
632 | /* | |
633 | * Send a pipeline event stat to get misbehaving counters to start | |
634 | * ticking correctly | |
635 | */ | |
636 | if (adreno_is_a530(adreno_gpu)) { | |
637 | OUT_PKT7(gpu->rb, CP_EVENT_WRITE, 1); | |
638 | OUT_RING(gpu->rb, 0x0F); | |
639 | ||
640 | gpu->funcs->flush(gpu); | |
e895c7bd | 641 | if (!a5xx_idle(gpu)) |
b5f103ab JC |
642 | return -EINVAL; |
643 | } | |
644 | ||
7c65817e JC |
645 | /* |
646 | * Try to load a zap shader into the secure world. If successful | |
647 | * we can use the CP to switch out of secure mode. If not then we | |
648 | * have no resource but to try to switch ourselves out manually. If we | |
649 | * guessed wrong then access to the RBBM_SECVID_TRUST_CNTL register will | |
650 | * be blocked and a permissions violation will soon follow. | |
651 | */ | |
652 | ret = a5xx_zap_shader_init(gpu); | |
653 | if (!ret) { | |
654 | OUT_PKT7(gpu->rb, CP_SET_SECURE_MODE, 1); | |
655 | OUT_RING(gpu->rb, 0x00000000); | |
656 | ||
657 | gpu->funcs->flush(gpu); | |
e895c7bd | 658 | if (!a5xx_idle(gpu)) |
7c65817e JC |
659 | return -EINVAL; |
660 | } else { | |
661 | /* Print a warning so if we die, we know why */ | |
662 | dev_warn_once(gpu->dev->dev, | |
663 | "Zap shader not enabled - using SECVID_TRUST_CNTL instead\n"); | |
664 | gpu_write(gpu, REG_A5XX_RBBM_SECVID_TRUST_CNTL, 0x0); | |
665 | } | |
2401a008 | 666 | |
b5f103ab JC |
667 | return 0; |
668 | } | |
669 | ||
670 | static void a5xx_recover(struct msm_gpu *gpu) | |
671 | { | |
672 | int i; | |
673 | ||
674 | adreno_dump_info(gpu); | |
675 | ||
676 | for (i = 0; i < 8; i++) { | |
677 | printk("CP_SCRATCH_REG%d: %u\n", i, | |
678 | gpu_read(gpu, REG_A5XX_CP_SCRATCH_REG(i))); | |
679 | } | |
680 | ||
681 | if (hang_debug) | |
682 | a5xx_dump(gpu); | |
683 | ||
684 | gpu_write(gpu, REG_A5XX_RBBM_SW_RESET_CMD, 1); | |
685 | gpu_read(gpu, REG_A5XX_RBBM_SW_RESET_CMD); | |
686 | gpu_write(gpu, REG_A5XX_RBBM_SW_RESET_CMD, 0); | |
687 | adreno_recover(gpu); | |
688 | } | |
689 | ||
690 | static void a5xx_destroy(struct msm_gpu *gpu) | |
691 | { | |
692 | struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); | |
693 | struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu); | |
694 | ||
695 | DBG("%s", gpu->name); | |
696 | ||
7c65817e JC |
697 | if (a5xx_gpu->zap_dev.parent) |
698 | device_unregister(&a5xx_gpu->zap_dev); | |
699 | ||
b5f103ab JC |
700 | if (a5xx_gpu->pm4_bo) { |
701 | if (a5xx_gpu->pm4_iova) | |
702 | msm_gem_put_iova(a5xx_gpu->pm4_bo, gpu->id); | |
703 | drm_gem_object_unreference_unlocked(a5xx_gpu->pm4_bo); | |
704 | } | |
705 | ||
706 | if (a5xx_gpu->pfp_bo) { | |
707 | if (a5xx_gpu->pfp_iova) | |
708 | msm_gem_put_iova(a5xx_gpu->pfp_bo, gpu->id); | |
709 | drm_gem_object_unreference_unlocked(a5xx_gpu->pfp_bo); | |
710 | } | |
711 | ||
2401a008 | 712 | if (a5xx_gpu->gpmu_bo) { |
2002c9c3 | 713 | if (a5xx_gpu->gpmu_iova) |
2401a008 JC |
714 | msm_gem_put_iova(a5xx_gpu->gpmu_bo, gpu->id); |
715 | drm_gem_object_unreference_unlocked(a5xx_gpu->gpmu_bo); | |
716 | } | |
717 | ||
b5f103ab JC |
718 | adreno_gpu_cleanup(adreno_gpu); |
719 | kfree(a5xx_gpu); | |
720 | } | |
721 | ||
722 | static inline bool _a5xx_check_idle(struct msm_gpu *gpu) | |
723 | { | |
724 | if (gpu_read(gpu, REG_A5XX_RBBM_STATUS) & ~A5XX_RBBM_STATUS_HI_BUSY) | |
725 | return false; | |
726 | ||
727 | /* | |
728 | * Nearly every abnormality ends up pausing the GPU and triggering a | |
729 | * fault so we can safely just watch for this one interrupt to fire | |
730 | */ | |
731 | return !(gpu_read(gpu, REG_A5XX_RBBM_INT_0_STATUS) & | |
732 | A5XX_RBBM_INT_0_MASK_MISC_HANG_DETECT); | |
733 | } | |
734 | ||
e895c7bd | 735 | bool a5xx_idle(struct msm_gpu *gpu) |
b5f103ab JC |
736 | { |
737 | /* wait for CP to drain ringbuffer: */ | |
738 | if (!adreno_idle(gpu)) | |
739 | return false; | |
740 | ||
741 | if (spin_until(_a5xx_check_idle(gpu))) { | |
742 | DRM_ERROR("%s: %ps: timeout waiting for GPU to idle: status %8.8X irq %8.8X\n", | |
743 | gpu->name, __builtin_return_address(0), | |
744 | gpu_read(gpu, REG_A5XX_RBBM_STATUS), | |
745 | gpu_read(gpu, REG_A5XX_RBBM_INT_0_STATUS)); | |
746 | ||
747 | return false; | |
748 | } | |
749 | ||
750 | return true; | |
751 | } | |
752 | ||
7f8036b7 RC |
753 | static int a5xx_fault_handler(void *arg, unsigned long iova, int flags) |
754 | { | |
755 | struct msm_gpu *gpu = arg; | |
756 | pr_warn_ratelimited("*** gpu fault: iova=%08lx, flags=%d (%u,%u,%u,%u)\n", | |
757 | iova, flags, | |
758 | gpu_read(gpu, REG_A5XX_CP_SCRATCH_REG(4)), | |
759 | gpu_read(gpu, REG_A5XX_CP_SCRATCH_REG(5)), | |
760 | gpu_read(gpu, REG_A5XX_CP_SCRATCH_REG(6)), | |
761 | gpu_read(gpu, REG_A5XX_CP_SCRATCH_REG(7))); | |
762 | ||
763 | return -EFAULT; | |
764 | } | |
765 | ||
b5f103ab JC |
766 | static void a5xx_cp_err_irq(struct msm_gpu *gpu) |
767 | { | |
768 | u32 status = gpu_read(gpu, REG_A5XX_CP_INTERRUPT_STATUS); | |
769 | ||
770 | if (status & A5XX_CP_INT_CP_OPCODE_ERROR) { | |
771 | u32 val; | |
772 | ||
773 | gpu_write(gpu, REG_A5XX_CP_PFP_STAT_ADDR, 0); | |
774 | ||
775 | /* | |
776 | * REG_A5XX_CP_PFP_STAT_DATA is indexed, and we want index 1 so | |
777 | * read it twice | |
778 | */ | |
779 | ||
780 | gpu_read(gpu, REG_A5XX_CP_PFP_STAT_DATA); | |
781 | val = gpu_read(gpu, REG_A5XX_CP_PFP_STAT_DATA); | |
782 | ||
783 | dev_err_ratelimited(gpu->dev->dev, "CP | opcode error | possible opcode=0x%8.8X\n", | |
784 | val); | |
785 | } | |
786 | ||
787 | if (status & A5XX_CP_INT_CP_HW_FAULT_ERROR) | |
788 | dev_err_ratelimited(gpu->dev->dev, "CP | HW fault | status=0x%8.8X\n", | |
789 | gpu_read(gpu, REG_A5XX_CP_HW_FAULT)); | |
790 | ||
791 | if (status & A5XX_CP_INT_CP_DMA_ERROR) | |
792 | dev_err_ratelimited(gpu->dev->dev, "CP | DMA error\n"); | |
793 | ||
794 | if (status & A5XX_CP_INT_CP_REGISTER_PROTECTION_ERROR) { | |
795 | u32 val = gpu_read(gpu, REG_A5XX_CP_PROTECT_STATUS); | |
796 | ||
797 | dev_err_ratelimited(gpu->dev->dev, | |
798 | "CP | protected mode error | %s | addr=0x%8.8X | status=0x%8.8X\n", | |
799 | val & (1 << 24) ? "WRITE" : "READ", | |
800 | (val & 0xFFFFF) >> 2, val); | |
801 | } | |
802 | ||
803 | if (status & A5XX_CP_INT_CP_AHB_ERROR) { | |
804 | u32 status = gpu_read(gpu, REG_A5XX_CP_AHB_FAULT); | |
805 | const char *access[16] = { "reserved", "reserved", | |
806 | "timestamp lo", "timestamp hi", "pfp read", "pfp write", | |
807 | "", "", "me read", "me write", "", "", "crashdump read", | |
808 | "crashdump write" }; | |
809 | ||
810 | dev_err_ratelimited(gpu->dev->dev, | |
811 | "CP | AHB error | addr=%X access=%s error=%d | status=0x%8.8X\n", | |
812 | status & 0xFFFFF, access[(status >> 24) & 0xF], | |
813 | (status & (1 << 31)), status); | |
814 | } | |
815 | } | |
816 | ||
7352fb5a | 817 | static void a5xx_rbbm_err_irq(struct msm_gpu *gpu, u32 status) |
b5f103ab | 818 | { |
b5f103ab JC |
819 | if (status & A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR) { |
820 | u32 val = gpu_read(gpu, REG_A5XX_RBBM_AHB_ERROR_STATUS); | |
821 | ||
822 | dev_err_ratelimited(gpu->dev->dev, | |
823 | "RBBM | AHB bus error | %s | addr=0x%X | ports=0x%X:0x%X\n", | |
824 | val & (1 << 28) ? "WRITE" : "READ", | |
825 | (val & 0xFFFFF) >> 2, (val >> 20) & 0x3, | |
826 | (val >> 24) & 0xF); | |
827 | ||
828 | /* Clear the error */ | |
829 | gpu_write(gpu, REG_A5XX_RBBM_AHB_CMD, (1 << 4)); | |
7352fb5a JC |
830 | |
831 | /* Clear the interrupt */ | |
832 | gpu_write(gpu, REG_A5XX_RBBM_INT_CLEAR_CMD, | |
833 | A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR); | |
b5f103ab JC |
834 | } |
835 | ||
836 | if (status & A5XX_RBBM_INT_0_MASK_RBBM_TRANSFER_TIMEOUT) | |
837 | dev_err_ratelimited(gpu->dev->dev, "RBBM | AHB transfer timeout\n"); | |
838 | ||
839 | if (status & A5XX_RBBM_INT_0_MASK_RBBM_ME_MS_TIMEOUT) | |
840 | dev_err_ratelimited(gpu->dev->dev, "RBBM | ME master split | status=0x%X\n", | |
841 | gpu_read(gpu, REG_A5XX_RBBM_AHB_ME_SPLIT_STATUS)); | |
842 | ||
843 | if (status & A5XX_RBBM_INT_0_MASK_RBBM_PFP_MS_TIMEOUT) | |
844 | dev_err_ratelimited(gpu->dev->dev, "RBBM | PFP master split | status=0x%X\n", | |
845 | gpu_read(gpu, REG_A5XX_RBBM_AHB_PFP_SPLIT_STATUS)); | |
846 | ||
847 | if (status & A5XX_RBBM_INT_0_MASK_RBBM_ETS_MS_TIMEOUT) | |
848 | dev_err_ratelimited(gpu->dev->dev, "RBBM | ETS master split | status=0x%X\n", | |
849 | gpu_read(gpu, REG_A5XX_RBBM_AHB_ETS_SPLIT_STATUS)); | |
850 | ||
851 | if (status & A5XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNC_OVERFLOW) | |
852 | dev_err_ratelimited(gpu->dev->dev, "RBBM | ATB ASYNC overflow\n"); | |
853 | ||
854 | if (status & A5XX_RBBM_INT_0_MASK_RBBM_ATB_BUS_OVERFLOW) | |
855 | dev_err_ratelimited(gpu->dev->dev, "RBBM | ATB bus overflow\n"); | |
856 | } | |
857 | ||
858 | static void a5xx_uche_err_irq(struct msm_gpu *gpu) | |
859 | { | |
860 | uint64_t addr = (uint64_t) gpu_read(gpu, REG_A5XX_UCHE_TRAP_LOG_HI); | |
861 | ||
862 | addr |= gpu_read(gpu, REG_A5XX_UCHE_TRAP_LOG_LO); | |
863 | ||
864 | dev_err_ratelimited(gpu->dev->dev, "UCHE | Out of bounds access | addr=0x%llX\n", | |
865 | addr); | |
866 | } | |
867 | ||
868 | static void a5xx_gpmu_err_irq(struct msm_gpu *gpu) | |
869 | { | |
870 | dev_err_ratelimited(gpu->dev->dev, "GPMU | voltage droop\n"); | |
871 | } | |
872 | ||
873 | #define RBBM_ERROR_MASK \ | |
874 | (A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR | \ | |
875 | A5XX_RBBM_INT_0_MASK_RBBM_TRANSFER_TIMEOUT | \ | |
876 | A5XX_RBBM_INT_0_MASK_RBBM_ME_MS_TIMEOUT | \ | |
877 | A5XX_RBBM_INT_0_MASK_RBBM_PFP_MS_TIMEOUT | \ | |
878 | A5XX_RBBM_INT_0_MASK_RBBM_ETS_MS_TIMEOUT | \ | |
879 | A5XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNC_OVERFLOW) | |
880 | ||
881 | static irqreturn_t a5xx_irq(struct msm_gpu *gpu) | |
882 | { | |
883 | u32 status = gpu_read(gpu, REG_A5XX_RBBM_INT_0_STATUS); | |
884 | ||
7352fb5a JC |
885 | /* |
886 | * Clear all the interrupts except RBBM_AHB_ERROR - if we clear it | |
887 | * before the source is cleared the interrupt will storm. | |
888 | */ | |
889 | gpu_write(gpu, REG_A5XX_RBBM_INT_CLEAR_CMD, | |
890 | status & ~A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR); | |
b5f103ab | 891 | |
7352fb5a | 892 | /* Pass status to a5xx_rbbm_err_irq because we've already cleared it */ |
b5f103ab | 893 | if (status & RBBM_ERROR_MASK) |
7352fb5a | 894 | a5xx_rbbm_err_irq(gpu, status); |
b5f103ab JC |
895 | |
896 | if (status & A5XX_RBBM_INT_0_MASK_CP_HW_ERROR) | |
897 | a5xx_cp_err_irq(gpu); | |
898 | ||
899 | if (status & A5XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS) | |
900 | a5xx_uche_err_irq(gpu); | |
901 | ||
902 | if (status & A5XX_RBBM_INT_0_MASK_GPMU_VOLTAGE_DROOP) | |
903 | a5xx_gpmu_err_irq(gpu); | |
904 | ||
905 | if (status & A5XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS) | |
906 | msm_gpu_retire(gpu); | |
907 | ||
908 | return IRQ_HANDLED; | |
909 | } | |
910 | ||
911 | static const u32 a5xx_register_offsets[REG_ADRENO_REGISTER_MAX] = { | |
912 | REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE, REG_A5XX_CP_RB_BASE), | |
913 | REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE_HI, REG_A5XX_CP_RB_BASE_HI), | |
914 | REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR, REG_A5XX_CP_RB_RPTR_ADDR), | |
915 | REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR_HI, | |
916 | REG_A5XX_CP_RB_RPTR_ADDR_HI), | |
917 | REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR, REG_A5XX_CP_RB_RPTR), | |
918 | REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_WPTR, REG_A5XX_CP_RB_WPTR), | |
919 | REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_A5XX_CP_RB_CNTL), | |
920 | }; | |
921 | ||
922 | static const u32 a5xx_registers[] = { | |
923 | 0x0000, 0x0002, 0x0004, 0x0020, 0x0022, 0x0026, 0x0029, 0x002B, | |
924 | 0x002E, 0x0035, 0x0038, 0x0042, 0x0044, 0x0044, 0x0047, 0x0095, | |
925 | 0x0097, 0x00BB, 0x03A0, 0x0464, 0x0469, 0x046F, 0x04D2, 0x04D3, | |
926 | 0x04E0, 0x0533, 0x0540, 0x0555, 0xF400, 0xF400, 0xF800, 0xF807, | |
927 | 0x0800, 0x081A, 0x081F, 0x0841, 0x0860, 0x0860, 0x0880, 0x08A0, | |
928 | 0x0B00, 0x0B12, 0x0B15, 0x0B28, 0x0B78, 0x0B7F, 0x0BB0, 0x0BBD, | |
929 | 0x0BC0, 0x0BC6, 0x0BD0, 0x0C53, 0x0C60, 0x0C61, 0x0C80, 0x0C82, | |
930 | 0x0C84, 0x0C85, 0x0C90, 0x0C98, 0x0CA0, 0x0CA0, 0x0CB0, 0x0CB2, | |
931 | 0x2180, 0x2185, 0x2580, 0x2585, 0x0CC1, 0x0CC1, 0x0CC4, 0x0CC7, | |
932 | 0x0CCC, 0x0CCC, 0x0CD0, 0x0CD8, 0x0CE0, 0x0CE5, 0x0CE8, 0x0CE8, | |
933 | 0x0CEC, 0x0CF1, 0x0CFB, 0x0D0E, 0x2100, 0x211E, 0x2140, 0x2145, | |
934 | 0x2500, 0x251E, 0x2540, 0x2545, 0x0D10, 0x0D17, 0x0D20, 0x0D23, | |
935 | 0x0D30, 0x0D30, 0x20C0, 0x20C0, 0x24C0, 0x24C0, 0x0E40, 0x0E43, | |
936 | 0x0E4A, 0x0E4A, 0x0E50, 0x0E57, 0x0E60, 0x0E7C, 0x0E80, 0x0E8E, | |
937 | 0x0E90, 0x0E96, 0x0EA0, 0x0EA8, 0x0EB0, 0x0EB2, 0xE140, 0xE147, | |
938 | 0xE150, 0xE187, 0xE1A0, 0xE1A9, 0xE1B0, 0xE1B6, 0xE1C0, 0xE1C7, | |
939 | 0xE1D0, 0xE1D1, 0xE200, 0xE201, 0xE210, 0xE21C, 0xE240, 0xE268, | |
940 | 0xE000, 0xE006, 0xE010, 0xE09A, 0xE0A0, 0xE0A4, 0xE0AA, 0xE0EB, | |
941 | 0xE100, 0xE105, 0xE380, 0xE38F, 0xE3B0, 0xE3B0, 0xE400, 0xE405, | |
942 | 0xE408, 0xE4E9, 0xE4F0, 0xE4F0, 0xE280, 0xE280, 0xE282, 0xE2A3, | |
943 | 0xE2A5, 0xE2C2, 0xE940, 0xE947, 0xE950, 0xE987, 0xE9A0, 0xE9A9, | |
944 | 0xE9B0, 0xE9B6, 0xE9C0, 0xE9C7, 0xE9D0, 0xE9D1, 0xEA00, 0xEA01, | |
945 | 0xEA10, 0xEA1C, 0xEA40, 0xEA68, 0xE800, 0xE806, 0xE810, 0xE89A, | |
946 | 0xE8A0, 0xE8A4, 0xE8AA, 0xE8EB, 0xE900, 0xE905, 0xEB80, 0xEB8F, | |
947 | 0xEBB0, 0xEBB0, 0xEC00, 0xEC05, 0xEC08, 0xECE9, 0xECF0, 0xECF0, | |
948 | 0xEA80, 0xEA80, 0xEA82, 0xEAA3, 0xEAA5, 0xEAC2, 0xA800, 0xA8FF, | |
949 | 0xAC60, 0xAC60, 0xB000, 0xB97F, 0xB9A0, 0xB9BF, | |
950 | ~0 | |
951 | }; | |
952 | ||
953 | static void a5xx_dump(struct msm_gpu *gpu) | |
954 | { | |
955 | dev_info(gpu->dev->dev, "status: %08x\n", | |
956 | gpu_read(gpu, REG_A5XX_RBBM_STATUS)); | |
957 | adreno_dump(gpu); | |
958 | } | |
959 | ||
960 | static int a5xx_pm_resume(struct msm_gpu *gpu) | |
961 | { | |
2401a008 JC |
962 | int ret; |
963 | ||
964 | /* Turn on the core power */ | |
965 | ret = msm_gpu_pm_resume(gpu); | |
966 | if (ret) | |
967 | return ret; | |
968 | ||
969 | /* Turn the RBCCU domain first to limit the chances of voltage droop */ | |
970 | gpu_write(gpu, REG_A5XX_GPMU_RBCCU_POWER_CNTL, 0x778000); | |
971 | ||
972 | /* Wait 3 usecs before polling */ | |
973 | udelay(3); | |
974 | ||
975 | ret = spin_usecs(gpu, 20, REG_A5XX_GPMU_RBCCU_PWR_CLK_STATUS, | |
976 | (1 << 20), (1 << 20)); | |
977 | if (ret) { | |
978 | DRM_ERROR("%s: timeout waiting for RBCCU GDSC enable: %X\n", | |
979 | gpu->name, | |
980 | gpu_read(gpu, REG_A5XX_GPMU_RBCCU_PWR_CLK_STATUS)); | |
981 | return ret; | |
982 | } | |
983 | ||
984 | /* Turn on the SP domain */ | |
985 | gpu_write(gpu, REG_A5XX_GPMU_SP_POWER_CNTL, 0x778000); | |
986 | ret = spin_usecs(gpu, 20, REG_A5XX_GPMU_SP_PWR_CLK_STATUS, | |
987 | (1 << 20), (1 << 20)); | |
988 | if (ret) | |
989 | DRM_ERROR("%s: timeout waiting for SP GDSC enable\n", | |
990 | gpu->name); | |
991 | ||
992 | return ret; | |
b5f103ab JC |
993 | } |
994 | ||
995 | static int a5xx_pm_suspend(struct msm_gpu *gpu) | |
996 | { | |
2401a008 JC |
997 | /* Clear the VBIF pipe before shutting down */ |
998 | gpu_write(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL0, 0xF); | |
999 | spin_until((gpu_read(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL1) & 0xF) == 0xF); | |
1000 | ||
1001 | gpu_write(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL0, 0); | |
1002 | ||
1003 | /* | |
1004 | * Reset the VBIF before power collapse to avoid issue with FIFO | |
1005 | * entries | |
1006 | */ | |
1007 | gpu_write(gpu, REG_A5XX_RBBM_BLOCK_SW_RESET_CMD, 0x003C0000); | |
1008 | gpu_write(gpu, REG_A5XX_RBBM_BLOCK_SW_RESET_CMD, 0x00000000); | |
1009 | ||
b5f103ab JC |
1010 | return msm_gpu_pm_suspend(gpu); |
1011 | } | |
1012 | ||
1013 | static int a5xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value) | |
1014 | { | |
1015 | *value = gpu_read64(gpu, REG_A5XX_RBBM_PERFCTR_CP_0_LO, | |
1016 | REG_A5XX_RBBM_PERFCTR_CP_0_HI); | |
1017 | ||
1018 | return 0; | |
1019 | } | |
1020 | ||
1021 | #ifdef CONFIG_DEBUG_FS | |
1022 | static void a5xx_show(struct msm_gpu *gpu, struct seq_file *m) | |
1023 | { | |
b5f103ab JC |
1024 | seq_printf(m, "status: %08x\n", |
1025 | gpu_read(gpu, REG_A5XX_RBBM_STATUS)); | |
b5f103ab JC |
1026 | adreno_show(gpu, m); |
1027 | } | |
1028 | #endif | |
1029 | ||
1030 | static const struct adreno_gpu_funcs funcs = { | |
1031 | .base = { | |
1032 | .get_param = adreno_get_param, | |
1033 | .hw_init = a5xx_hw_init, | |
1034 | .pm_suspend = a5xx_pm_suspend, | |
1035 | .pm_resume = a5xx_pm_resume, | |
1036 | .recover = a5xx_recover, | |
1037 | .last_fence = adreno_last_fence, | |
1038 | .submit = a5xx_submit, | |
1039 | .flush = adreno_flush, | |
b5f103ab JC |
1040 | .irq = a5xx_irq, |
1041 | .destroy = a5xx_destroy, | |
0c3eaf1f | 1042 | #ifdef CONFIG_DEBUG_FS |
b5f103ab | 1043 | .show = a5xx_show, |
0c3eaf1f | 1044 | #endif |
b5f103ab JC |
1045 | }, |
1046 | .get_timestamp = a5xx_get_timestamp, | |
1047 | }; | |
1048 | ||
1049 | struct msm_gpu *a5xx_gpu_init(struct drm_device *dev) | |
1050 | { | |
1051 | struct msm_drm_private *priv = dev->dev_private; | |
1052 | struct platform_device *pdev = priv->gpu_pdev; | |
1053 | struct a5xx_gpu *a5xx_gpu = NULL; | |
1054 | struct adreno_gpu *adreno_gpu; | |
1055 | struct msm_gpu *gpu; | |
1056 | int ret; | |
1057 | ||
1058 | if (!pdev) { | |
1059 | dev_err(dev->dev, "No A5XX device is defined\n"); | |
1060 | return ERR_PTR(-ENXIO); | |
1061 | } | |
1062 | ||
1063 | a5xx_gpu = kzalloc(sizeof(*a5xx_gpu), GFP_KERNEL); | |
1064 | if (!a5xx_gpu) | |
1065 | return ERR_PTR(-ENOMEM); | |
1066 | ||
1067 | adreno_gpu = &a5xx_gpu->base; | |
1068 | gpu = &adreno_gpu->base; | |
1069 | ||
1070 | a5xx_gpu->pdev = pdev; | |
1071 | adreno_gpu->registers = a5xx_registers; | |
1072 | adreno_gpu->reg_offsets = a5xx_register_offsets; | |
1073 | ||
2401a008 JC |
1074 | a5xx_gpu->lm_leakage = 0x4E001A; |
1075 | ||
b5f103ab JC |
1076 | ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs); |
1077 | if (ret) { | |
1078 | a5xx_destroy(&(a5xx_gpu->base.base)); | |
1079 | return ERR_PTR(ret); | |
1080 | } | |
1081 | ||
7f8036b7 RC |
1082 | if (gpu->aspace) |
1083 | msm_mmu_set_fault_handler(gpu->aspace->mmu, gpu, a5xx_fault_handler); | |
1084 | ||
b5f103ab JC |
1085 | return gpu; |
1086 | } |