]>
Commit | Line | Data |
---|---|---|
7198e6b0 RC |
1 | /* |
2 | * Copyright (C) 2013 Red Hat | |
3 | * Author: Rob Clark <robdclark@gmail.com> | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify it | |
6 | * under the terms of the GNU General Public License version 2 as published by | |
7 | * the Free Software Foundation. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, but WITHOUT | |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
12 | * more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License along with | |
15 | * this program. If not, see <http://www.gnu.org/licenses/>. | |
16 | */ | |
17 | ||
18 | #include "adreno_gpu.h" | |
19 | #include "msm_gem.h" | |
871d812a | 20 | #include "msm_mmu.h" |
7198e6b0 RC |
21 | |
22 | struct adreno_info { | |
23 | struct adreno_rev rev; | |
24 | uint32_t revn; | |
25 | const char *name; | |
26 | const char *pm4fw, *pfpfw; | |
27 | uint32_t gmem; | |
28 | }; | |
29 | ||
30 | #define ANY_ID 0xff | |
31 | ||
32 | static const struct adreno_info gpulist[] = { | |
33 | { | |
34 | .rev = ADRENO_REV(3, 0, 5, ANY_ID), | |
35 | .revn = 305, | |
36 | .name = "A305", | |
37 | .pm4fw = "a300_pm4.fw", | |
38 | .pfpfw = "a300_pfp.fw", | |
39 | .gmem = SZ_256K, | |
40 | }, { | |
41 | .rev = ADRENO_REV(3, 2, ANY_ID, ANY_ID), | |
42 | .revn = 320, | |
43 | .name = "A320", | |
44 | .pm4fw = "a300_pm4.fw", | |
45 | .pfpfw = "a300_pfp.fw", | |
46 | .gmem = SZ_512K, | |
47 | }, { | |
48 | .rev = ADRENO_REV(3, 3, 0, 0), | |
49 | .revn = 330, | |
50 | .name = "A330", | |
51 | .pm4fw = "a330_pm4.fw", | |
52 | .pfpfw = "a330_pfp.fw", | |
53 | .gmem = SZ_1M, | |
54 | }, | |
55 | }; | |
56 | ||
3b57f23b RC |
57 | MODULE_FIRMWARE("a300_pm4.fw"); |
58 | MODULE_FIRMWARE("a300_pfp.fw"); | |
59 | MODULE_FIRMWARE("a330_pm4.fw"); | |
60 | MODULE_FIRMWARE("a330_pfp.fw"); | |
61 | ||
7198e6b0 RC |
62 | #define RB_SIZE SZ_32K |
63 | #define RB_BLKSIZE 16 | |
64 | ||
65 | int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value) | |
66 | { | |
67 | struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); | |
68 | ||
69 | switch (param) { | |
70 | case MSM_PARAM_GPU_ID: | |
71 | *value = adreno_gpu->info->revn; | |
72 | return 0; | |
73 | case MSM_PARAM_GMEM_SIZE: | |
74 | *value = adreno_gpu->info->gmem; | |
75 | return 0; | |
76 | default: | |
77 | DBG("%s: invalid param: %u", gpu->name, param); | |
78 | return -EINVAL; | |
79 | } | |
80 | } | |
81 | ||
82 | #define rbmemptr(adreno_gpu, member) \ | |
83 | ((adreno_gpu)->memptrs_iova + offsetof(struct adreno_rbmemptrs, member)) | |
84 | ||
85 | int adreno_hw_init(struct msm_gpu *gpu) | |
86 | { | |
87 | struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); | |
88 | ||
89 | DBG("%s", gpu->name); | |
90 | ||
91 | /* Setup REG_CP_RB_CNTL: */ | |
92 | gpu_write(gpu, REG_AXXX_CP_RB_CNTL, | |
93 | /* size is log2(quad-words): */ | |
94 | AXXX_CP_RB_CNTL_BUFSZ(ilog2(gpu->rb->size / 8)) | | |
95 | AXXX_CP_RB_CNTL_BLKSZ(RB_BLKSIZE)); | |
96 | ||
97 | /* Setup ringbuffer address: */ | |
98 | gpu_write(gpu, REG_AXXX_CP_RB_BASE, gpu->rb_iova); | |
99 | gpu_write(gpu, REG_AXXX_CP_RB_RPTR_ADDR, rbmemptr(adreno_gpu, rptr)); | |
100 | ||
101 | /* Setup scratch/timestamp: */ | |
102 | gpu_write(gpu, REG_AXXX_SCRATCH_ADDR, rbmemptr(adreno_gpu, fence)); | |
103 | ||
104 | gpu_write(gpu, REG_AXXX_SCRATCH_UMSK, 0x1); | |
105 | ||
106 | return 0; | |
107 | } | |
108 | ||
109 | static uint32_t get_wptr(struct msm_ringbuffer *ring) | |
110 | { | |
111 | return ring->cur - ring->start; | |
112 | } | |
113 | ||
114 | uint32_t adreno_last_fence(struct msm_gpu *gpu) | |
115 | { | |
116 | struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); | |
117 | return adreno_gpu->memptrs->fence; | |
118 | } | |
119 | ||
bd6f82d8 RC |
120 | void adreno_recover(struct msm_gpu *gpu) |
121 | { | |
122 | struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); | |
123 | struct drm_device *dev = gpu->dev; | |
124 | int ret; | |
125 | ||
126 | gpu->funcs->pm_suspend(gpu); | |
127 | ||
128 | /* reset ringbuffer: */ | |
129 | gpu->rb->cur = gpu->rb->start; | |
130 | ||
131 | /* reset completed fence seqno, just discard anything pending: */ | |
132 | adreno_gpu->memptrs->fence = gpu->submitted_fence; | |
26791c48 RC |
133 | adreno_gpu->memptrs->rptr = 0; |
134 | adreno_gpu->memptrs->wptr = 0; | |
bd6f82d8 RC |
135 | |
136 | gpu->funcs->pm_resume(gpu); | |
137 | ret = gpu->funcs->hw_init(gpu); | |
138 | if (ret) { | |
139 | dev_err(dev->dev, "gpu hw init failed: %d\n", ret); | |
140 | /* hmm, oh well? */ | |
141 | } | |
142 | } | |
143 | ||
7198e6b0 RC |
144 | int adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, |
145 | struct msm_file_private *ctx) | |
146 | { | |
147 | struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); | |
148 | struct msm_drm_private *priv = gpu->dev->dev_private; | |
149 | struct msm_ringbuffer *ring = gpu->rb; | |
150 | unsigned i, ibs = 0; | |
151 | ||
7198e6b0 RC |
152 | for (i = 0; i < submit->nr_cmds; i++) { |
153 | switch (submit->cmd[i].type) { | |
154 | case MSM_SUBMIT_CMD_IB_TARGET_BUF: | |
155 | /* ignore IB-targets */ | |
156 | break; | |
157 | case MSM_SUBMIT_CMD_CTX_RESTORE_BUF: | |
158 | /* ignore if there has not been a ctx switch: */ | |
159 | if (priv->lastctx == ctx) | |
160 | break; | |
161 | case MSM_SUBMIT_CMD_BUF: | |
162 | OUT_PKT3(ring, CP_INDIRECT_BUFFER_PFD, 2); | |
163 | OUT_RING(ring, submit->cmd[i].iova); | |
164 | OUT_RING(ring, submit->cmd[i].size); | |
165 | ibs++; | |
166 | break; | |
167 | } | |
168 | } | |
169 | ||
170 | /* on a320, at least, we seem to need to pad things out to an | |
171 | * even number of qwords to avoid issue w/ CP hanging on wrap- | |
172 | * around: | |
173 | */ | |
174 | if (ibs % 2) | |
175 | OUT_PKT2(ring); | |
176 | ||
177 | OUT_PKT0(ring, REG_AXXX_CP_SCRATCH_REG2, 1); | |
178 | OUT_RING(ring, submit->fence); | |
179 | ||
180 | if (adreno_is_a3xx(adreno_gpu)) { | |
181 | /* Flush HLSQ lazy updates to make sure there is nothing | |
182 | * pending for indirect loads after the timestamp has | |
183 | * passed: | |
184 | */ | |
185 | OUT_PKT3(ring, CP_EVENT_WRITE, 1); | |
186 | OUT_RING(ring, HLSQ_FLUSH); | |
187 | ||
188 | OUT_PKT3(ring, CP_WAIT_FOR_IDLE, 1); | |
189 | OUT_RING(ring, 0x00000000); | |
190 | } | |
191 | ||
192 | OUT_PKT3(ring, CP_EVENT_WRITE, 3); | |
193 | OUT_RING(ring, CACHE_FLUSH_TS); | |
194 | OUT_RING(ring, rbmemptr(adreno_gpu, fence)); | |
195 | OUT_RING(ring, submit->fence); | |
196 | ||
197 | /* we could maybe be clever and only CP_COND_EXEC the interrupt: */ | |
198 | OUT_PKT3(ring, CP_INTERRUPT, 1); | |
199 | OUT_RING(ring, 0x80000000); | |
200 | ||
201 | #if 0 | |
202 | if (adreno_is_a3xx(adreno_gpu)) { | |
203 | /* Dummy set-constant to trigger context rollover */ | |
204 | OUT_PKT3(ring, CP_SET_CONSTANT, 2); | |
205 | OUT_RING(ring, CP_REG(REG_A3XX_HLSQ_CL_KERNEL_GROUP_X_REG)); | |
206 | OUT_RING(ring, 0x00000000); | |
207 | } | |
208 | #endif | |
209 | ||
210 | gpu->funcs->flush(gpu); | |
211 | ||
212 | return 0; | |
213 | } | |
214 | ||
215 | void adreno_flush(struct msm_gpu *gpu) | |
216 | { | |
217 | uint32_t wptr = get_wptr(gpu->rb); | |
218 | ||
219 | /* ensure writes to ringbuffer have hit system memory: */ | |
220 | mb(); | |
221 | ||
222 | gpu_write(gpu, REG_AXXX_CP_RB_WPTR, wptr); | |
223 | } | |
224 | ||
225 | void adreno_idle(struct msm_gpu *gpu) | |
226 | { | |
227 | struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); | |
228 | uint32_t rptr, wptr = get_wptr(gpu->rb); | |
229 | unsigned long t; | |
230 | ||
231 | t = jiffies + ADRENO_IDLE_TIMEOUT; | |
232 | ||
233 | /* then wait for CP to drain ringbuffer: */ | |
234 | do { | |
235 | rptr = adreno_gpu->memptrs->rptr; | |
236 | if (rptr == wptr) | |
237 | return; | |
238 | } while(time_before(jiffies, t)); | |
239 | ||
26791c48 | 240 | DRM_ERROR("%s: timeout waiting to drain ringbuffer!\n", gpu->name); |
7198e6b0 RC |
241 | |
242 | /* TODO maybe we need to reset GPU here to recover from hang? */ | |
243 | } | |
244 | ||
245 | #ifdef CONFIG_DEBUG_FS | |
246 | void adreno_show(struct msm_gpu *gpu, struct seq_file *m) | |
247 | { | |
248 | struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); | |
249 | ||
250 | seq_printf(m, "revision: %d (%d.%d.%d.%d)\n", | |
251 | adreno_gpu->info->revn, adreno_gpu->rev.core, | |
252 | adreno_gpu->rev.major, adreno_gpu->rev.minor, | |
253 | adreno_gpu->rev.patchid); | |
254 | ||
255 | seq_printf(m, "fence: %d/%d\n", adreno_gpu->memptrs->fence, | |
bd6f82d8 | 256 | gpu->submitted_fence); |
7198e6b0 RC |
257 | seq_printf(m, "rptr: %d\n", adreno_gpu->memptrs->rptr); |
258 | seq_printf(m, "wptr: %d\n", adreno_gpu->memptrs->wptr); | |
259 | seq_printf(m, "rb wptr: %d\n", get_wptr(gpu->rb)); | |
260 | } | |
261 | #endif | |
262 | ||
263 | void adreno_wait_ring(struct msm_gpu *gpu, uint32_t ndwords) | |
264 | { | |
265 | struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); | |
266 | uint32_t freedwords; | |
26791c48 | 267 | unsigned long t = jiffies + ADRENO_IDLE_TIMEOUT; |
7198e6b0 RC |
268 | do { |
269 | uint32_t size = gpu->rb->size / 4; | |
270 | uint32_t wptr = get_wptr(gpu->rb); | |
271 | uint32_t rptr = adreno_gpu->memptrs->rptr; | |
272 | freedwords = (rptr + (size - 1) - wptr) % size; | |
26791c48 RC |
273 | |
274 | if (time_after(jiffies, t)) { | |
275 | DRM_ERROR("%s: timeout waiting for ringbuffer space\n", gpu->name); | |
276 | break; | |
277 | } | |
7198e6b0 RC |
278 | } while(freedwords < ndwords); |
279 | } | |
280 | ||
281 | static const char *iommu_ports[] = { | |
282 | "gfx3d_user", "gfx3d_priv", | |
283 | "gfx3d1_user", "gfx3d1_priv", | |
284 | }; | |
285 | ||
286 | static inline bool _rev_match(uint8_t entry, uint8_t id) | |
287 | { | |
288 | return (entry == ANY_ID) || (entry == id); | |
289 | } | |
290 | ||
291 | int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, | |
292 | struct adreno_gpu *gpu, const struct adreno_gpu_funcs *funcs, | |
293 | struct adreno_rev rev) | |
294 | { | |
871d812a | 295 | struct msm_mmu *mmu; |
7198e6b0 RC |
296 | int i, ret; |
297 | ||
298 | /* identify gpu: */ | |
299 | for (i = 0; i < ARRAY_SIZE(gpulist); i++) { | |
300 | const struct adreno_info *info = &gpulist[i]; | |
301 | if (_rev_match(info->rev.core, rev.core) && | |
302 | _rev_match(info->rev.major, rev.major) && | |
303 | _rev_match(info->rev.minor, rev.minor) && | |
304 | _rev_match(info->rev.patchid, rev.patchid)) { | |
305 | gpu->info = info; | |
306 | gpu->revn = info->revn; | |
307 | break; | |
308 | } | |
309 | } | |
310 | ||
311 | if (i == ARRAY_SIZE(gpulist)) { | |
312 | dev_err(drm->dev, "Unknown GPU revision: %u.%u.%u.%u\n", | |
313 | rev.core, rev.major, rev.minor, rev.patchid); | |
314 | return -ENXIO; | |
315 | } | |
316 | ||
317 | DBG("Found GPU: %s (%u.%u.%u.%u)", gpu->info->name, | |
318 | rev.core, rev.major, rev.minor, rev.patchid); | |
319 | ||
320 | gpu->funcs = funcs; | |
321 | gpu->rev = rev; | |
322 | ||
323 | ret = request_firmware(&gpu->pm4, gpu->info->pm4fw, drm->dev); | |
324 | if (ret) { | |
325 | dev_err(drm->dev, "failed to load %s PM4 firmware: %d\n", | |
326 | gpu->info->pm4fw, ret); | |
327 | return ret; | |
328 | } | |
329 | ||
330 | ret = request_firmware(&gpu->pfp, gpu->info->pfpfw, drm->dev); | |
331 | if (ret) { | |
332 | dev_err(drm->dev, "failed to load %s PFP firmware: %d\n", | |
333 | gpu->info->pfpfw, ret); | |
334 | return ret; | |
335 | } | |
336 | ||
337 | ret = msm_gpu_init(drm, pdev, &gpu->base, &funcs->base, | |
338 | gpu->info->name, "kgsl_3d0_reg_memory", "kgsl_3d0_irq", | |
339 | RB_SIZE); | |
340 | if (ret) | |
341 | return ret; | |
342 | ||
871d812a RC |
343 | mmu = gpu->base.mmu; |
344 | if (mmu) { | |
345 | ret = mmu->funcs->attach(mmu, iommu_ports, | |
346 | ARRAY_SIZE(iommu_ports)); | |
347 | if (ret) | |
348 | return ret; | |
349 | } | |
7198e6b0 RC |
350 | |
351 | gpu->memptrs_bo = msm_gem_new(drm, sizeof(*gpu->memptrs), | |
352 | MSM_BO_UNCACHED); | |
353 | if (IS_ERR(gpu->memptrs_bo)) { | |
354 | ret = PTR_ERR(gpu->memptrs_bo); | |
355 | gpu->memptrs_bo = NULL; | |
356 | dev_err(drm->dev, "could not allocate memptrs: %d\n", ret); | |
357 | return ret; | |
358 | } | |
359 | ||
360 | gpu->memptrs = msm_gem_vaddr_locked(gpu->memptrs_bo); | |
361 | if (!gpu->memptrs) { | |
362 | dev_err(drm->dev, "could not vmap memptrs\n"); | |
363 | return -ENOMEM; | |
364 | } | |
365 | ||
366 | ret = msm_gem_get_iova_locked(gpu->memptrs_bo, gpu->base.id, | |
367 | &gpu->memptrs_iova); | |
368 | if (ret) { | |
369 | dev_err(drm->dev, "could not map memptrs: %d\n", ret); | |
370 | return ret; | |
371 | } | |
372 | ||
373 | return 0; | |
374 | } | |
375 | ||
376 | void adreno_gpu_cleanup(struct adreno_gpu *gpu) | |
377 | { | |
378 | if (gpu->memptrs_bo) { | |
379 | if (gpu->memptrs_iova) | |
380 | msm_gem_put_iova(gpu->memptrs_bo, gpu->base.id); | |
381 | drm_gem_object_unreference(gpu->memptrs_bo); | |
382 | } | |
383 | if (gpu->pm4) | |
384 | release_firmware(gpu->pm4); | |
385 | if (gpu->pfp) | |
386 | release_firmware(gpu->pfp); | |
387 | msm_gpu_cleanup(&gpu->base); | |
388 | } |