]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/gpu/drm/msm/adreno/adreno_gpu.c
drm/msm: add basic hangcheck/recovery mechanism
[mirror_ubuntu-bionic-kernel.git] / drivers / gpu / drm / msm / adreno / adreno_gpu.c
CommitLineData
7198e6b0
RC
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include "adreno_gpu.h"
19#include "msm_gem.h"
20
21struct adreno_info {
22 struct adreno_rev rev;
23 uint32_t revn;
24 const char *name;
25 const char *pm4fw, *pfpfw;
26 uint32_t gmem;
27};
28
29#define ANY_ID 0xff
30
31static const struct adreno_info gpulist[] = {
32 {
33 .rev = ADRENO_REV(3, 0, 5, ANY_ID),
34 .revn = 305,
35 .name = "A305",
36 .pm4fw = "a300_pm4.fw",
37 .pfpfw = "a300_pfp.fw",
38 .gmem = SZ_256K,
39 }, {
40 .rev = ADRENO_REV(3, 2, ANY_ID, ANY_ID),
41 .revn = 320,
42 .name = "A320",
43 .pm4fw = "a300_pm4.fw",
44 .pfpfw = "a300_pfp.fw",
45 .gmem = SZ_512K,
46 }, {
47 .rev = ADRENO_REV(3, 3, 0, 0),
48 .revn = 330,
49 .name = "A330",
50 .pm4fw = "a330_pm4.fw",
51 .pfpfw = "a330_pfp.fw",
52 .gmem = SZ_1M,
53 },
54};
55
56#define RB_SIZE SZ_32K
57#define RB_BLKSIZE 16
58
59int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value)
60{
61 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
62
63 switch (param) {
64 case MSM_PARAM_GPU_ID:
65 *value = adreno_gpu->info->revn;
66 return 0;
67 case MSM_PARAM_GMEM_SIZE:
68 *value = adreno_gpu->info->gmem;
69 return 0;
70 default:
71 DBG("%s: invalid param: %u", gpu->name, param);
72 return -EINVAL;
73 }
74}
75
76#define rbmemptr(adreno_gpu, member) \
77 ((adreno_gpu)->memptrs_iova + offsetof(struct adreno_rbmemptrs, member))
78
79int adreno_hw_init(struct msm_gpu *gpu)
80{
81 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
82
83 DBG("%s", gpu->name);
84
85 /* Setup REG_CP_RB_CNTL: */
86 gpu_write(gpu, REG_AXXX_CP_RB_CNTL,
87 /* size is log2(quad-words): */
88 AXXX_CP_RB_CNTL_BUFSZ(ilog2(gpu->rb->size / 8)) |
89 AXXX_CP_RB_CNTL_BLKSZ(RB_BLKSIZE));
90
91 /* Setup ringbuffer address: */
92 gpu_write(gpu, REG_AXXX_CP_RB_BASE, gpu->rb_iova);
93 gpu_write(gpu, REG_AXXX_CP_RB_RPTR_ADDR, rbmemptr(adreno_gpu, rptr));
94
95 /* Setup scratch/timestamp: */
96 gpu_write(gpu, REG_AXXX_SCRATCH_ADDR, rbmemptr(adreno_gpu, fence));
97
98 gpu_write(gpu, REG_AXXX_SCRATCH_UMSK, 0x1);
99
100 return 0;
101}
102
103static uint32_t get_wptr(struct msm_ringbuffer *ring)
104{
105 return ring->cur - ring->start;
106}
107
108uint32_t adreno_last_fence(struct msm_gpu *gpu)
109{
110 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
111 return adreno_gpu->memptrs->fence;
112}
113
bd6f82d8
RC
114void adreno_recover(struct msm_gpu *gpu)
115{
116 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
117 struct drm_device *dev = gpu->dev;
118 int ret;
119
120 gpu->funcs->pm_suspend(gpu);
121
122 /* reset ringbuffer: */
123 gpu->rb->cur = gpu->rb->start;
124
125 /* reset completed fence seqno, just discard anything pending: */
126 adreno_gpu->memptrs->fence = gpu->submitted_fence;
127
128 gpu->funcs->pm_resume(gpu);
129 ret = gpu->funcs->hw_init(gpu);
130 if (ret) {
131 dev_err(dev->dev, "gpu hw init failed: %d\n", ret);
132 /* hmm, oh well? */
133 }
134}
135
7198e6b0
RC
136int adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
137 struct msm_file_private *ctx)
138{
139 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
140 struct msm_drm_private *priv = gpu->dev->dev_private;
141 struct msm_ringbuffer *ring = gpu->rb;
142 unsigned i, ibs = 0;
143
7198e6b0
RC
144 for (i = 0; i < submit->nr_cmds; i++) {
145 switch (submit->cmd[i].type) {
146 case MSM_SUBMIT_CMD_IB_TARGET_BUF:
147 /* ignore IB-targets */
148 break;
149 case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
150 /* ignore if there has not been a ctx switch: */
151 if (priv->lastctx == ctx)
152 break;
153 case MSM_SUBMIT_CMD_BUF:
154 OUT_PKT3(ring, CP_INDIRECT_BUFFER_PFD, 2);
155 OUT_RING(ring, submit->cmd[i].iova);
156 OUT_RING(ring, submit->cmd[i].size);
157 ibs++;
158 break;
159 }
160 }
161
162 /* on a320, at least, we seem to need to pad things out to an
163 * even number of qwords to avoid issue w/ CP hanging on wrap-
164 * around:
165 */
166 if (ibs % 2)
167 OUT_PKT2(ring);
168
169 OUT_PKT0(ring, REG_AXXX_CP_SCRATCH_REG2, 1);
170 OUT_RING(ring, submit->fence);
171
172 if (adreno_is_a3xx(adreno_gpu)) {
173 /* Flush HLSQ lazy updates to make sure there is nothing
174 * pending for indirect loads after the timestamp has
175 * passed:
176 */
177 OUT_PKT3(ring, CP_EVENT_WRITE, 1);
178 OUT_RING(ring, HLSQ_FLUSH);
179
180 OUT_PKT3(ring, CP_WAIT_FOR_IDLE, 1);
181 OUT_RING(ring, 0x00000000);
182 }
183
184 OUT_PKT3(ring, CP_EVENT_WRITE, 3);
185 OUT_RING(ring, CACHE_FLUSH_TS);
186 OUT_RING(ring, rbmemptr(adreno_gpu, fence));
187 OUT_RING(ring, submit->fence);
188
189 /* we could maybe be clever and only CP_COND_EXEC the interrupt: */
190 OUT_PKT3(ring, CP_INTERRUPT, 1);
191 OUT_RING(ring, 0x80000000);
192
193#if 0
194 if (adreno_is_a3xx(adreno_gpu)) {
195 /* Dummy set-constant to trigger context rollover */
196 OUT_PKT3(ring, CP_SET_CONSTANT, 2);
197 OUT_RING(ring, CP_REG(REG_A3XX_HLSQ_CL_KERNEL_GROUP_X_REG));
198 OUT_RING(ring, 0x00000000);
199 }
200#endif
201
202 gpu->funcs->flush(gpu);
203
204 return 0;
205}
206
207void adreno_flush(struct msm_gpu *gpu)
208{
209 uint32_t wptr = get_wptr(gpu->rb);
210
211 /* ensure writes to ringbuffer have hit system memory: */
212 mb();
213
214 gpu_write(gpu, REG_AXXX_CP_RB_WPTR, wptr);
215}
216
217void adreno_idle(struct msm_gpu *gpu)
218{
219 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
220 uint32_t rptr, wptr = get_wptr(gpu->rb);
221 unsigned long t;
222
223 t = jiffies + ADRENO_IDLE_TIMEOUT;
224
225 /* then wait for CP to drain ringbuffer: */
226 do {
227 rptr = adreno_gpu->memptrs->rptr;
228 if (rptr == wptr)
229 return;
230 } while(time_before(jiffies, t));
231
232 DRM_ERROR("timeout waiting for %s to drain ringbuffer!\n", gpu->name);
233
234 /* TODO maybe we need to reset GPU here to recover from hang? */
235}
236
237#ifdef CONFIG_DEBUG_FS
238void adreno_show(struct msm_gpu *gpu, struct seq_file *m)
239{
240 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
241
242 seq_printf(m, "revision: %d (%d.%d.%d.%d)\n",
243 adreno_gpu->info->revn, adreno_gpu->rev.core,
244 adreno_gpu->rev.major, adreno_gpu->rev.minor,
245 adreno_gpu->rev.patchid);
246
247 seq_printf(m, "fence: %d/%d\n", adreno_gpu->memptrs->fence,
bd6f82d8 248 gpu->submitted_fence);
7198e6b0
RC
249 seq_printf(m, "rptr: %d\n", adreno_gpu->memptrs->rptr);
250 seq_printf(m, "wptr: %d\n", adreno_gpu->memptrs->wptr);
251 seq_printf(m, "rb wptr: %d\n", get_wptr(gpu->rb));
252}
253#endif
254
255void adreno_wait_ring(struct msm_gpu *gpu, uint32_t ndwords)
256{
257 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
258 uint32_t freedwords;
259 do {
260 uint32_t size = gpu->rb->size / 4;
261 uint32_t wptr = get_wptr(gpu->rb);
262 uint32_t rptr = adreno_gpu->memptrs->rptr;
263 freedwords = (rptr + (size - 1) - wptr) % size;
264 } while(freedwords < ndwords);
265}
266
267static const char *iommu_ports[] = {
268 "gfx3d_user", "gfx3d_priv",
269 "gfx3d1_user", "gfx3d1_priv",
270};
271
272static inline bool _rev_match(uint8_t entry, uint8_t id)
273{
274 return (entry == ANY_ID) || (entry == id);
275}
276
277int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
278 struct adreno_gpu *gpu, const struct adreno_gpu_funcs *funcs,
279 struct adreno_rev rev)
280{
281 int i, ret;
282
283 /* identify gpu: */
284 for (i = 0; i < ARRAY_SIZE(gpulist); i++) {
285 const struct adreno_info *info = &gpulist[i];
286 if (_rev_match(info->rev.core, rev.core) &&
287 _rev_match(info->rev.major, rev.major) &&
288 _rev_match(info->rev.minor, rev.minor) &&
289 _rev_match(info->rev.patchid, rev.patchid)) {
290 gpu->info = info;
291 gpu->revn = info->revn;
292 break;
293 }
294 }
295
296 if (i == ARRAY_SIZE(gpulist)) {
297 dev_err(drm->dev, "Unknown GPU revision: %u.%u.%u.%u\n",
298 rev.core, rev.major, rev.minor, rev.patchid);
299 return -ENXIO;
300 }
301
302 DBG("Found GPU: %s (%u.%u.%u.%u)", gpu->info->name,
303 rev.core, rev.major, rev.minor, rev.patchid);
304
305 gpu->funcs = funcs;
306 gpu->rev = rev;
307
308 ret = request_firmware(&gpu->pm4, gpu->info->pm4fw, drm->dev);
309 if (ret) {
310 dev_err(drm->dev, "failed to load %s PM4 firmware: %d\n",
311 gpu->info->pm4fw, ret);
312 return ret;
313 }
314
315 ret = request_firmware(&gpu->pfp, gpu->info->pfpfw, drm->dev);
316 if (ret) {
317 dev_err(drm->dev, "failed to load %s PFP firmware: %d\n",
318 gpu->info->pfpfw, ret);
319 return ret;
320 }
321
322 ret = msm_gpu_init(drm, pdev, &gpu->base, &funcs->base,
323 gpu->info->name, "kgsl_3d0_reg_memory", "kgsl_3d0_irq",
324 RB_SIZE);
325 if (ret)
326 return ret;
327
328 ret = msm_iommu_attach(drm, gpu->base.iommu,
329 iommu_ports, ARRAY_SIZE(iommu_ports));
330 if (ret)
331 return ret;
332
333 gpu->memptrs_bo = msm_gem_new(drm, sizeof(*gpu->memptrs),
334 MSM_BO_UNCACHED);
335 if (IS_ERR(gpu->memptrs_bo)) {
336 ret = PTR_ERR(gpu->memptrs_bo);
337 gpu->memptrs_bo = NULL;
338 dev_err(drm->dev, "could not allocate memptrs: %d\n", ret);
339 return ret;
340 }
341
342 gpu->memptrs = msm_gem_vaddr_locked(gpu->memptrs_bo);
343 if (!gpu->memptrs) {
344 dev_err(drm->dev, "could not vmap memptrs\n");
345 return -ENOMEM;
346 }
347
348 ret = msm_gem_get_iova_locked(gpu->memptrs_bo, gpu->base.id,
349 &gpu->memptrs_iova);
350 if (ret) {
351 dev_err(drm->dev, "could not map memptrs: %d\n", ret);
352 return ret;
353 }
354
355 return 0;
356}
357
358void adreno_gpu_cleanup(struct adreno_gpu *gpu)
359{
360 if (gpu->memptrs_bo) {
361 if (gpu->memptrs_iova)
362 msm_gem_put_iova(gpu->memptrs_bo, gpu->base.id);
363 drm_gem_object_unreference(gpu->memptrs_bo);
364 }
365 if (gpu->pm4)
366 release_firmware(gpu->pm4);
367 if (gpu->pfp)
368 release_firmware(gpu->pfp);
369 msm_gpu_cleanup(&gpu->base);
370}