]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/gpu/drm/msm/adreno/adreno_gpu.c
drm/msm: Remove __user from __u64 data types
[mirror_ubuntu-artful-kernel.git] / drivers / gpu / drm / msm / adreno / adreno_gpu.c
CommitLineData
7198e6b0
RC
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
91b74e97
AG
5 * Copyright (c) 2014 The Linux Foundation. All rights reserved.
6 *
7198e6b0
RC
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as published by
9 * the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include "adreno_gpu.h"
21#include "msm_gem.h"
871d812a 22#include "msm_mmu.h"
7198e6b0 23
7198e6b0 24#define RB_SIZE SZ_32K
b5f103ab 25#define RB_BLKSIZE 32
7198e6b0
RC
26
27int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value)
28{
29 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
30
31 switch (param) {
32 case MSM_PARAM_GPU_ID:
33 *value = adreno_gpu->info->revn;
34 return 0;
35 case MSM_PARAM_GMEM_SIZE:
55459968 36 *value = adreno_gpu->gmem;
7198e6b0 37 return 0;
e3689e47
JC
38 case MSM_PARAM_GMEM_BASE:
39 *value = 0x100000;
40 return 0;
4e1cbaa3
RC
41 case MSM_PARAM_CHIP_ID:
42 *value = adreno_gpu->rev.patchid |
43 (adreno_gpu->rev.minor << 8) |
44 (adreno_gpu->rev.major << 16) |
45 (adreno_gpu->rev.core << 24);
46 return 0;
4102a9e5
RC
47 case MSM_PARAM_MAX_FREQ:
48 *value = adreno_gpu->base.fast_rate;
49 return 0;
6c77d1ab
RC
50 case MSM_PARAM_TIMESTAMP:
51 if (adreno_gpu->funcs->get_timestamp)
52 return adreno_gpu->funcs->get_timestamp(gpu, value);
53 return -EINVAL;
7198e6b0
RC
54 default:
55 DBG("%s: invalid param: %u", gpu->name, param);
56 return -EINVAL;
57 }
58}
59
7198e6b0
RC
60int adreno_hw_init(struct msm_gpu *gpu)
61{
62 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
944fc36c 63 int ret;
7198e6b0
RC
64
65 DBG("%s", gpu->name);
66
0e08270a 67 ret = msm_gem_get_iova(gpu->rb->bo, gpu->aspace, &gpu->rb_iova);
944fc36c
RC
68 if (ret) {
69 gpu->rb_iova = 0;
70 dev_err(gpu->dev->dev, "could not map ringbuffer: %d\n", ret);
71 return ret;
72 }
73
de098e5f
RC
74 /* reset ringbuffer: */
75 gpu->rb->cur = gpu->rb->start;
76
77 /* reset completed fence seqno: */
78 adreno_gpu->memptrs->fence = gpu->fctx->completed_fence;
79 adreno_gpu->memptrs->rptr = 0;
de098e5f 80
7198e6b0 81 /* Setup REG_CP_RB_CNTL: */
91b74e97 82 adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_CNTL,
7198e6b0
RC
83 /* size is log2(quad-words): */
84 AXXX_CP_RB_CNTL_BUFSZ(ilog2(gpu->rb->size / 8)) |
7d0c5ee9
CS
85 AXXX_CP_RB_CNTL_BLKSZ(ilog2(RB_BLKSIZE / 8)) |
86 (adreno_is_a430(adreno_gpu) ? AXXX_CP_RB_CNTL_NO_UPDATE : 0));
7198e6b0
RC
87
88 /* Setup ringbuffer address: */
fb039981
JC
89 adreno_gpu_write64(adreno_gpu, REG_ADRENO_CP_RB_BASE,
90 REG_ADRENO_CP_RB_BASE_HI, gpu->rb_iova);
7198e6b0 91
fb039981
JC
92 if (!adreno_is_a430(adreno_gpu)) {
93 adreno_gpu_write64(adreno_gpu, REG_ADRENO_CP_RB_RPTR_ADDR,
94 REG_ADRENO_CP_RB_RPTR_ADDR_HI,
95 rbmemptr(adreno_gpu, rptr));
96 }
7198e6b0
RC
97
98 return 0;
99}
100
101static uint32_t get_wptr(struct msm_ringbuffer *ring)
102{
103 return ring->cur - ring->start;
104}
105
7d0c5ee9
CS
106/* Use this helper to read rptr, since a430 doesn't update rptr in memory */
107static uint32_t get_rptr(struct adreno_gpu *adreno_gpu)
108{
109 if (adreno_is_a430(adreno_gpu))
110 return adreno_gpu->memptrs->rptr = adreno_gpu_read(
111 adreno_gpu, REG_ADRENO_CP_RB_RPTR);
112 else
113 return adreno_gpu->memptrs->rptr;
114}
115
7198e6b0
RC
116uint32_t adreno_last_fence(struct msm_gpu *gpu)
117{
118 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
119 return adreno_gpu->memptrs->fence;
120}
121
bd6f82d8
RC
122void adreno_recover(struct msm_gpu *gpu)
123{
bd6f82d8
RC
124 struct drm_device *dev = gpu->dev;
125 int ret;
126
eeb75474
RC
127 // XXX pm-runtime?? we *need* the device to be off after this
128 // so maybe continuing to call ->pm_suspend/resume() is better?
129
bd6f82d8 130 gpu->funcs->pm_suspend(gpu);
bd6f82d8 131 gpu->funcs->pm_resume(gpu);
4ac277cd 132
eeb75474 133 ret = msm_gpu_hw_init(gpu);
bd6f82d8
RC
134 if (ret) {
135 dev_err(dev->dev, "gpu hw init failed: %d\n", ret);
136 /* hmm, oh well? */
137 }
138}
139
1193c3bc 140void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
7198e6b0
RC
141 struct msm_file_private *ctx)
142{
143 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
144 struct msm_drm_private *priv = gpu->dev->dev_private;
145 struct msm_ringbuffer *ring = gpu->rb;
6b597ce2 146 unsigned i;
7198e6b0 147
7198e6b0
RC
148 for (i = 0; i < submit->nr_cmds; i++) {
149 switch (submit->cmd[i].type) {
150 case MSM_SUBMIT_CMD_IB_TARGET_BUF:
151 /* ignore IB-targets */
152 break;
153 case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
154 /* ignore if there has not been a ctx switch: */
155 if (priv->lastctx == ctx)
156 break;
157 case MSM_SUBMIT_CMD_BUF:
357ff00b
CS
158 OUT_PKT3(ring, adreno_is_a430(adreno_gpu) ?
159 CP_INDIRECT_BUFFER_PFE : CP_INDIRECT_BUFFER_PFD, 2);
7198e6b0
RC
160 OUT_RING(ring, submit->cmd[i].iova);
161 OUT_RING(ring, submit->cmd[i].size);
6b597ce2 162 OUT_PKT2(ring);
7198e6b0
RC
163 break;
164 }
165 }
166
7198e6b0 167 OUT_PKT0(ring, REG_AXXX_CP_SCRATCH_REG2, 1);
b6295f9a 168 OUT_RING(ring, submit->fence->seqno);
7198e6b0 169
23bd62fd 170 if (adreno_is_a3xx(adreno_gpu) || adreno_is_a4xx(adreno_gpu)) {
7198e6b0
RC
171 /* Flush HLSQ lazy updates to make sure there is nothing
172 * pending for indirect loads after the timestamp has
173 * passed:
174 */
175 OUT_PKT3(ring, CP_EVENT_WRITE, 1);
176 OUT_RING(ring, HLSQ_FLUSH);
177
178 OUT_PKT3(ring, CP_WAIT_FOR_IDLE, 1);
179 OUT_RING(ring, 0x00000000);
180 }
181
182 OUT_PKT3(ring, CP_EVENT_WRITE, 3);
183 OUT_RING(ring, CACHE_FLUSH_TS);
184 OUT_RING(ring, rbmemptr(adreno_gpu, fence));
b6295f9a 185 OUT_RING(ring, submit->fence->seqno);
7198e6b0
RC
186
187 /* we could maybe be clever and only CP_COND_EXEC the interrupt: */
188 OUT_PKT3(ring, CP_INTERRUPT, 1);
189 OUT_RING(ring, 0x80000000);
190
d735fdc3
RC
191 /* Workaround for missing irq issue on 8x16/a306. Unsure if the
192 * root cause is a platform issue or some a306 quirk, but this
193 * keeps things humming along:
194 */
195 if (adreno_is_a306(adreno_gpu)) {
196 OUT_PKT3(ring, CP_WAIT_FOR_IDLE, 1);
197 OUT_RING(ring, 0x00000000);
198 OUT_PKT3(ring, CP_INTERRUPT, 1);
199 OUT_RING(ring, 0x80000000);
200 }
201
7198e6b0
RC
202#if 0
203 if (adreno_is_a3xx(adreno_gpu)) {
204 /* Dummy set-constant to trigger context rollover */
205 OUT_PKT3(ring, CP_SET_CONSTANT, 2);
206 OUT_RING(ring, CP_REG(REG_A3XX_HLSQ_CL_KERNEL_GROUP_X_REG));
207 OUT_RING(ring, 0x00000000);
208 }
209#endif
210
211 gpu->funcs->flush(gpu);
7198e6b0
RC
212}
213
214void adreno_flush(struct msm_gpu *gpu)
215{
91b74e97 216 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
88b333b0
JC
217 uint32_t wptr;
218
219 /*
220 * Mask wptr value that we calculate to fit in the HW range. This is
221 * to account for the possibility that the last command fit exactly into
222 * the ringbuffer and rb->next hasn't wrapped to zero yet
223 */
224 wptr = get_wptr(gpu->rb) & ((gpu->rb->size / 4) - 1);
7198e6b0
RC
225
226 /* ensure writes to ringbuffer have hit system memory: */
227 mb();
228
91b74e97 229 adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_WPTR, wptr);
7198e6b0
RC
230}
231
c4a8d475 232bool adreno_idle(struct msm_gpu *gpu)
7198e6b0
RC
233{
234 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
0963756f 235 uint32_t wptr = get_wptr(gpu->rb);
7198e6b0 236
0963756f 237 /* wait for CP to drain ringbuffer: */
c4a8d475
JC
238 if (!spin_until(get_rptr(adreno_gpu) == wptr))
239 return true;
7198e6b0
RC
240
241 /* TODO maybe we need to reset GPU here to recover from hang? */
c4a8d475
JC
242 DRM_ERROR("%s: timeout waiting to drain ringbuffer!\n", gpu->name);
243 return false;
7198e6b0
RC
244}
245
246#ifdef CONFIG_DEBUG_FS
247void adreno_show(struct msm_gpu *gpu, struct seq_file *m)
248{
249 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
3bcefb04 250 int i;
7198e6b0
RC
251
252 seq_printf(m, "revision: %d (%d.%d.%d.%d)\n",
253 adreno_gpu->info->revn, adreno_gpu->rev.core,
254 adreno_gpu->rev.major, adreno_gpu->rev.minor,
255 adreno_gpu->rev.patchid);
256
257 seq_printf(m, "fence: %d/%d\n", adreno_gpu->memptrs->fence,
ca762a8a 258 gpu->fctx->last_fence);
7d0c5ee9 259 seq_printf(m, "rptr: %d\n", get_rptr(adreno_gpu));
7198e6b0 260 seq_printf(m, "rb wptr: %d\n", get_wptr(gpu->rb));
3bcefb04 261
3bcefb04
RC
262 /* dump these out in a form that can be parsed by demsm: */
263 seq_printf(m, "IO:region %s 00000000 00020000\n", gpu->name);
264 for (i = 0; adreno_gpu->registers[i] != ~0; i += 2) {
265 uint32_t start = adreno_gpu->registers[i];
266 uint32_t end = adreno_gpu->registers[i+1];
267 uint32_t addr;
268
269 for (addr = start; addr <= end; addr++) {
270 uint32_t val = gpu_read(gpu, addr);
271 seq_printf(m, "IO:R %08x %08x\n", addr<<2, val);
272 }
273 }
7198e6b0
RC
274}
275#endif
276
26716185
RC
277/* Dump common gpu status and scratch registers on any hang, to make
278 * the hangcheck logs more useful. The scratch registers seem always
279 * safe to read when GPU has hung (unlike some other regs, depending
280 * on how the GPU hung), and they are useful to match up to cmdstream
281 * dumps when debugging hangs:
282 */
283void adreno_dump_info(struct msm_gpu *gpu)
5b6ef08e
RC
284{
285 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
286
287 printk("revision: %d (%d.%d.%d.%d)\n",
288 adreno_gpu->info->revn, adreno_gpu->rev.core,
289 adreno_gpu->rev.major, adreno_gpu->rev.minor,
290 adreno_gpu->rev.patchid);
291
292 printk("fence: %d/%d\n", adreno_gpu->memptrs->fence,
ca762a8a 293 gpu->fctx->last_fence);
7d0c5ee9 294 printk("rptr: %d\n", get_rptr(adreno_gpu));
5b6ef08e 295 printk("rb wptr: %d\n", get_wptr(gpu->rb));
26716185
RC
296}
297
298/* would be nice to not have to duplicate the _show() stuff with printk(): */
299void adreno_dump(struct msm_gpu *gpu)
300{
301 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
302 int i;
303
3bcefb04
RC
304 /* dump these out in a form that can be parsed by demsm: */
305 printk("IO:region %s 00000000 00020000\n", gpu->name);
306 for (i = 0; adreno_gpu->registers[i] != ~0; i += 2) {
307 uint32_t start = adreno_gpu->registers[i];
308 uint32_t end = adreno_gpu->registers[i+1];
309 uint32_t addr;
310
311 for (addr = start; addr <= end; addr++) {
312 uint32_t val = gpu_read(gpu, addr);
313 printk("IO:R %08x %08x\n", addr<<2, val);
314 }
315 }
5b6ef08e
RC
316}
317
0963756f 318static uint32_t ring_freewords(struct msm_gpu *gpu)
7198e6b0
RC
319{
320 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
0963756f
RC
321 uint32_t size = gpu->rb->size / 4;
322 uint32_t wptr = get_wptr(gpu->rb);
7d0c5ee9 323 uint32_t rptr = get_rptr(adreno_gpu);
0963756f
RC
324 return (rptr + (size - 1) - wptr) % size;
325}
326
327void adreno_wait_ring(struct msm_gpu *gpu, uint32_t ndwords)
328{
329 if (spin_until(ring_freewords(gpu) >= ndwords))
330 DRM_ERROR("%s: timeout waiting for ringbuffer space\n", gpu->name);
7198e6b0
RC
331}
332
333static const char *iommu_ports[] = {
334 "gfx3d_user", "gfx3d_priv",
335 "gfx3d1_user", "gfx3d1_priv",
336};
337
7198e6b0 338int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
3526e9fb 339 struct adreno_gpu *adreno_gpu, const struct adreno_gpu_funcs *funcs)
7198e6b0 340{
3526e9fb 341 struct adreno_platform_config *config = pdev->dev.platform_data;
5770fc7a 342 struct msm_gpu_config adreno_gpu_config = { 0 };
3526e9fb 343 struct msm_gpu *gpu = &adreno_gpu->base;
e2550b7a 344 int ret;
7198e6b0 345
3526e9fb
RC
346 adreno_gpu->funcs = funcs;
347 adreno_gpu->info = adreno_info(config->rev);
348 adreno_gpu->gmem = adreno_gpu->info->gmem;
349 adreno_gpu->revn = adreno_gpu->info->revn;
350 adreno_gpu->rev = config->rev;
351
352 gpu->fast_rate = config->fast_rate;
3526e9fb 353 gpu->bus_freq = config->bus_freq;
6490ad47 354#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING
3526e9fb
RC
355 gpu->bus_scale_table = config->bus_scale_table;
356#endif
357
bf5af4ae
JC
358 DBG("fast_rate=%u, slow_rate=27000000, bus_freq=%u",
359 gpu->fast_rate, gpu->bus_freq);
7198e6b0 360
5770fc7a
JC
361 adreno_gpu_config.ioname = "kgsl_3d0_reg_memory";
362 adreno_gpu_config.irqname = "kgsl_3d0_irq";
363
364 adreno_gpu_config.va_start = SZ_16M;
365 adreno_gpu_config.va_end = 0xffffffff;
366
367 adreno_gpu_config.ringsz = RB_SIZE;
368
0122f96f 369 ret = msm_gpu_init(drm, pdev, &adreno_gpu->base, &funcs->base,
5770fc7a 370 adreno_gpu->info->name, &adreno_gpu_config);
0122f96f
RC
371 if (ret)
372 return ret;
373
eeb75474
RC
374 pm_runtime_set_autosuspend_delay(&pdev->dev, DRM_MSM_INACTIVE_PERIOD);
375 pm_runtime_use_autosuspend(&pdev->dev);
376 pm_runtime_enable(&pdev->dev);
377
3526e9fb 378 ret = request_firmware(&adreno_gpu->pm4, adreno_gpu->info->pm4fw, drm->dev);
7198e6b0
RC
379 if (ret) {
380 dev_err(drm->dev, "failed to load %s PM4 firmware: %d\n",
3526e9fb 381 adreno_gpu->info->pm4fw, ret);
7198e6b0
RC
382 return ret;
383 }
384
3526e9fb 385 ret = request_firmware(&adreno_gpu->pfp, adreno_gpu->info->pfpfw, drm->dev);
7198e6b0
RC
386 if (ret) {
387 dev_err(drm->dev, "failed to load %s PFP firmware: %d\n",
3526e9fb 388 adreno_gpu->info->pfpfw, ret);
7198e6b0
RC
389 return ret;
390 }
391
de85d2b3
RC
392 if (gpu->aspace && gpu->aspace->mmu) {
393 struct msm_mmu *mmu = gpu->aspace->mmu;
871d812a
RC
394 ret = mmu->funcs->attach(mmu, iommu_ports,
395 ARRAY_SIZE(iommu_ports));
396 if (ret)
397 return ret;
398 }
7198e6b0 399
3526e9fb 400 adreno_gpu->memptrs_bo = msm_gem_new(drm, sizeof(*adreno_gpu->memptrs),
7198e6b0 401 MSM_BO_UNCACHED);
3526e9fb
RC
402 if (IS_ERR(adreno_gpu->memptrs_bo)) {
403 ret = PTR_ERR(adreno_gpu->memptrs_bo);
404 adreno_gpu->memptrs_bo = NULL;
7198e6b0
RC
405 dev_err(drm->dev, "could not allocate memptrs: %d\n", ret);
406 return ret;
407 }
408
18f23049 409 adreno_gpu->memptrs = msm_gem_get_vaddr(adreno_gpu->memptrs_bo);
69a834c2 410 if (IS_ERR(adreno_gpu->memptrs)) {
7198e6b0
RC
411 dev_err(drm->dev, "could not vmap memptrs\n");
412 return -ENOMEM;
413 }
414
8bdcd949 415 ret = msm_gem_get_iova(adreno_gpu->memptrs_bo, gpu->aspace,
3526e9fb 416 &adreno_gpu->memptrs_iova);
7198e6b0
RC
417 if (ret) {
418 dev_err(drm->dev, "could not map memptrs: %d\n", ret);
419 return ret;
420 }
421
422 return 0;
423}
424
9873ef07 425void adreno_gpu_cleanup(struct adreno_gpu *adreno_gpu)
7198e6b0 426{
9873ef07
JC
427 struct msm_gpu *gpu = &adreno_gpu->base;
428
429 if (adreno_gpu->memptrs_bo) {
430 if (adreno_gpu->memptrs)
431 msm_gem_put_vaddr(adreno_gpu->memptrs_bo);
432
433 if (adreno_gpu->memptrs_iova)
8bdcd949 434 msm_gem_put_iova(adreno_gpu->memptrs_bo, gpu->aspace);
9873ef07
JC
435
436 drm_gem_object_unreference_unlocked(adreno_gpu->memptrs_bo);
437 }
438 release_firmware(adreno_gpu->pm4);
439 release_firmware(adreno_gpu->pfp);
18f23049 440
9873ef07 441 msm_gpu_cleanup(gpu);
18f23049 442
9873ef07
JC
443 if (gpu->aspace) {
444 gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu,
445 iommu_ports, ARRAY_SIZE(iommu_ports));
ee546cd3 446 msm_gem_address_space_put(gpu->aspace);
7198e6b0 447 }
7198e6b0 448}