]>
Commit | Line | Data |
---|---|---|
7198e6b0 RC |
1 | /* |
2 | * Copyright (C) 2013 Red Hat | |
3 | * Author: Rob Clark <robdclark@gmail.com> | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify it | |
6 | * under the terms of the GNU General Public License version 2 as published by | |
7 | * the Free Software Foundation. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, but WITHOUT | |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
12 | * more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License along with | |
15 | * this program. If not, see <http://www.gnu.org/licenses/>. | |
16 | */ | |
17 | ||
18 | #include "msm_gpu.h" | |
19 | #include "msm_gem.h" | |
871d812a | 20 | #include "msm_mmu.h" |
fde5de6c | 21 | #include "msm_fence.h" |
4241db42 | 22 | #include "msm_gpu_trace.h" |
7198e6b0 | 23 | |
c0fec7f5 | 24 | #include <generated/utsrelease.h> |
18bb8a6c | 25 | #include <linux/string_helpers.h> |
f91c14ab JC |
26 | #include <linux/pm_opp.h> |
27 | #include <linux/devfreq.h> | |
c0fec7f5 | 28 | #include <linux/devcoredump.h> |
7198e6b0 RC |
29 | |
30 | /* | |
31 | * Power Management: | |
32 | */ | |
33 | ||
f91c14ab JC |
34 | static int msm_devfreq_target(struct device *dev, unsigned long *freq, |
35 | u32 flags) | |
36 | { | |
37 | struct msm_gpu *gpu = platform_get_drvdata(to_platform_device(dev)); | |
38 | struct dev_pm_opp *opp; | |
39 | ||
40 | opp = devfreq_recommended_opp(dev, freq, flags); | |
41 | ||
42 | if (IS_ERR(opp)) | |
43 | return PTR_ERR(opp); | |
44 | ||
de0a3d09 SM |
45 | if (gpu->funcs->gpu_set_freq) |
46 | gpu->funcs->gpu_set_freq(gpu, (u64)*freq); | |
47 | else | |
48 | clk_set_rate(gpu->core_clk, *freq); | |
49 | ||
f91c14ab JC |
50 | dev_pm_opp_put(opp); |
51 | ||
52 | return 0; | |
53 | } | |
54 | ||
55 | static int msm_devfreq_get_dev_status(struct device *dev, | |
56 | struct devfreq_dev_status *status) | |
57 | { | |
58 | struct msm_gpu *gpu = platform_get_drvdata(to_platform_device(dev)); | |
f91c14ab JC |
59 | ktime_t time; |
60 | ||
de0a3d09 SM |
61 | if (gpu->funcs->gpu_get_freq) |
62 | status->current_frequency = gpu->funcs->gpu_get_freq(gpu); | |
63 | else | |
64 | status->current_frequency = clk_get_rate(gpu->core_clk); | |
f91c14ab | 65 | |
de0a3d09 | 66 | status->busy_time = gpu->funcs->gpu_busy(gpu); |
f91c14ab JC |
67 | |
68 | time = ktime_get(); | |
69 | status->total_time = ktime_us_delta(time, gpu->devfreq.time); | |
70 | gpu->devfreq.time = time; | |
71 | ||
72 | return 0; | |
73 | } | |
74 | ||
75 | static int msm_devfreq_get_cur_freq(struct device *dev, unsigned long *freq) | |
76 | { | |
77 | struct msm_gpu *gpu = platform_get_drvdata(to_platform_device(dev)); | |
78 | ||
de0a3d09 SM |
79 | if (gpu->funcs->gpu_get_freq) |
80 | *freq = gpu->funcs->gpu_get_freq(gpu); | |
81 | else | |
82 | *freq = clk_get_rate(gpu->core_clk); | |
f91c14ab JC |
83 | |
84 | return 0; | |
85 | } | |
86 | ||
87 | static struct devfreq_dev_profile msm_devfreq_profile = { | |
88 | .polling_ms = 10, | |
89 | .target = msm_devfreq_target, | |
90 | .get_dev_status = msm_devfreq_get_dev_status, | |
91 | .get_cur_freq = msm_devfreq_get_cur_freq, | |
92 | }; | |
93 | ||
94 | static void msm_devfreq_init(struct msm_gpu *gpu) | |
95 | { | |
96 | /* We need target support to do devfreq */ | |
de0a3d09 | 97 | if (!gpu->funcs->gpu_busy) |
f91c14ab JC |
98 | return; |
99 | ||
100 | msm_devfreq_profile.initial_freq = gpu->fast_rate; | |
101 | ||
102 | /* | |
103 | * Don't set the freq_table or max_state and let devfreq build the table | |
104 | * from OPP | |
105 | */ | |
106 | ||
107 | gpu->devfreq.devfreq = devm_devfreq_add_device(&gpu->pdev->dev, | |
108 | &msm_devfreq_profile, "simple_ondemand", NULL); | |
109 | ||
110 | if (IS_ERR(gpu->devfreq.devfreq)) { | |
6a41da17 | 111 | DRM_DEV_ERROR(&gpu->pdev->dev, "Couldn't initialize GPU devfreq\n"); |
f91c14ab JC |
112 | gpu->devfreq.devfreq = NULL; |
113 | } | |
d3fa91c9 SM |
114 | |
115 | devfreq_suspend_device(gpu->devfreq.devfreq); | |
f91c14ab JC |
116 | } |
117 | ||
7198e6b0 RC |
118 | static int enable_pwrrail(struct msm_gpu *gpu) |
119 | { | |
120 | struct drm_device *dev = gpu->dev; | |
121 | int ret = 0; | |
122 | ||
123 | if (gpu->gpu_reg) { | |
124 | ret = regulator_enable(gpu->gpu_reg); | |
125 | if (ret) { | |
6a41da17 | 126 | DRM_DEV_ERROR(dev->dev, "failed to enable 'gpu_reg': %d\n", ret); |
7198e6b0 RC |
127 | return ret; |
128 | } | |
129 | } | |
130 | ||
131 | if (gpu->gpu_cx) { | |
132 | ret = regulator_enable(gpu->gpu_cx); | |
133 | if (ret) { | |
6a41da17 | 134 | DRM_DEV_ERROR(dev->dev, "failed to enable 'gpu_cx': %d\n", ret); |
7198e6b0 RC |
135 | return ret; |
136 | } | |
137 | } | |
138 | ||
139 | return 0; | |
140 | } | |
141 | ||
142 | static int disable_pwrrail(struct msm_gpu *gpu) | |
143 | { | |
144 | if (gpu->gpu_cx) | |
145 | regulator_disable(gpu->gpu_cx); | |
146 | if (gpu->gpu_reg) | |
147 | regulator_disable(gpu->gpu_reg); | |
148 | return 0; | |
149 | } | |
150 | ||
151 | static int enable_clk(struct msm_gpu *gpu) | |
152 | { | |
98db803f JC |
153 | if (gpu->core_clk && gpu->fast_rate) |
154 | clk_set_rate(gpu->core_clk, gpu->fast_rate); | |
7198e6b0 | 155 | |
b5f103ab | 156 | /* Set the RBBM timer rate to 19.2Mhz */ |
98db803f JC |
157 | if (gpu->rbbmtimer_clk) |
158 | clk_set_rate(gpu->rbbmtimer_clk, 19200000); | |
b5f103ab | 159 | |
8e54eea5 | 160 | return clk_bulk_prepare_enable(gpu->nr_clocks, gpu->grp_clks); |
7198e6b0 RC |
161 | } |
162 | ||
163 | static int disable_clk(struct msm_gpu *gpu) | |
164 | { | |
8e54eea5 | 165 | clk_bulk_disable_unprepare(gpu->nr_clocks, gpu->grp_clks); |
7198e6b0 | 166 | |
bf5af4ae JC |
167 | /* |
168 | * Set the clock to a deliberately low rate. On older targets the clock | |
169 | * speed had to be non zero to avoid problems. On newer targets this | |
170 | * will be rounded down to zero anyway so it all works out. | |
171 | */ | |
98db803f JC |
172 | if (gpu->core_clk) |
173 | clk_set_rate(gpu->core_clk, 27000000); | |
89d777a5 | 174 | |
98db803f JC |
175 | if (gpu->rbbmtimer_clk) |
176 | clk_set_rate(gpu->rbbmtimer_clk, 0); | |
b5f103ab | 177 | |
7198e6b0 RC |
178 | return 0; |
179 | } | |
180 | ||
181 | static int enable_axi(struct msm_gpu *gpu) | |
182 | { | |
183 | if (gpu->ebi1_clk) | |
184 | clk_prepare_enable(gpu->ebi1_clk); | |
7198e6b0 RC |
185 | return 0; |
186 | } | |
187 | ||
188 | static int disable_axi(struct msm_gpu *gpu) | |
189 | { | |
190 | if (gpu->ebi1_clk) | |
191 | clk_disable_unprepare(gpu->ebi1_clk); | |
7198e6b0 RC |
192 | return 0; |
193 | } | |
194 | ||
de0a3d09 SM |
195 | void msm_gpu_resume_devfreq(struct msm_gpu *gpu) |
196 | { | |
197 | gpu->devfreq.busy_cycles = 0; | |
198 | gpu->devfreq.time = ktime_get(); | |
199 | ||
200 | devfreq_resume_device(gpu->devfreq.devfreq); | |
201 | } | |
202 | ||
7198e6b0 RC |
203 | int msm_gpu_pm_resume(struct msm_gpu *gpu) |
204 | { | |
205 | int ret; | |
206 | ||
eeb75474 | 207 | DBG("%s", gpu->name); |
7198e6b0 RC |
208 | |
209 | ret = enable_pwrrail(gpu); | |
210 | if (ret) | |
211 | return ret; | |
212 | ||
213 | ret = enable_clk(gpu); | |
214 | if (ret) | |
215 | return ret; | |
216 | ||
217 | ret = enable_axi(gpu); | |
218 | if (ret) | |
219 | return ret; | |
220 | ||
de0a3d09 | 221 | msm_gpu_resume_devfreq(gpu); |
f91c14ab | 222 | |
eeb75474 RC |
223 | gpu->needs_hw_init = true; |
224 | ||
7198e6b0 RC |
225 | return 0; |
226 | } | |
227 | ||
228 | int msm_gpu_pm_suspend(struct msm_gpu *gpu) | |
229 | { | |
230 | int ret; | |
231 | ||
eeb75474 | 232 | DBG("%s", gpu->name); |
7198e6b0 | 233 | |
de0a3d09 | 234 | devfreq_suspend_device(gpu->devfreq.devfreq); |
f91c14ab | 235 | |
7198e6b0 RC |
236 | ret = disable_axi(gpu); |
237 | if (ret) | |
238 | return ret; | |
239 | ||
240 | ret = disable_clk(gpu); | |
241 | if (ret) | |
242 | return ret; | |
243 | ||
244 | ret = disable_pwrrail(gpu); | |
245 | if (ret) | |
246 | return ret; | |
247 | ||
248 | return 0; | |
249 | } | |
250 | ||
eeb75474 | 251 | int msm_gpu_hw_init(struct msm_gpu *gpu) |
37d77c3a | 252 | { |
eeb75474 | 253 | int ret; |
37d77c3a | 254 | |
cb1e3818 RC |
255 | WARN_ON(!mutex_is_locked(&gpu->dev->struct_mutex)); |
256 | ||
eeb75474 RC |
257 | if (!gpu->needs_hw_init) |
258 | return 0; | |
37d77c3a | 259 | |
eeb75474 RC |
260 | disable_irq(gpu->irq); |
261 | ret = gpu->funcs->hw_init(gpu); | |
262 | if (!ret) | |
263 | gpu->needs_hw_init = false; | |
264 | enable_irq(gpu->irq); | |
37d77c3a | 265 | |
eeb75474 | 266 | return ret; |
37d77c3a RC |
267 | } |
268 | ||
c0fec7f5 JC |
269 | #ifdef CONFIG_DEV_COREDUMP |
270 | static ssize_t msm_gpu_devcoredump_read(char *buffer, loff_t offset, | |
271 | size_t count, void *data, size_t datalen) | |
272 | { | |
273 | struct msm_gpu *gpu = data; | |
274 | struct drm_print_iterator iter; | |
275 | struct drm_printer p; | |
276 | struct msm_gpu_state *state; | |
277 | ||
278 | state = msm_gpu_crashstate_get(gpu); | |
279 | if (!state) | |
280 | return 0; | |
281 | ||
282 | iter.data = buffer; | |
283 | iter.offset = 0; | |
284 | iter.start = offset; | |
285 | iter.remain = count; | |
286 | ||
287 | p = drm_coredump_printer(&iter); | |
288 | ||
289 | drm_printf(&p, "---\n"); | |
290 | drm_printf(&p, "kernel: " UTS_RELEASE "\n"); | |
291 | drm_printf(&p, "module: " KBUILD_MODNAME "\n"); | |
3530a17f AB |
292 | drm_printf(&p, "time: %lld.%09ld\n", |
293 | state->time.tv_sec, state->time.tv_nsec); | |
c0fec7f5 JC |
294 | if (state->comm) |
295 | drm_printf(&p, "comm: %s\n", state->comm); | |
296 | if (state->cmd) | |
297 | drm_printf(&p, "cmdline: %s\n", state->cmd); | |
298 | ||
299 | gpu->funcs->show(gpu, state, &p); | |
300 | ||
301 | msm_gpu_crashstate_put(gpu); | |
302 | ||
303 | return count - iter.remain; | |
304 | } | |
305 | ||
306 | static void msm_gpu_devcoredump_free(void *data) | |
307 | { | |
308 | struct msm_gpu *gpu = data; | |
309 | ||
310 | msm_gpu_crashstate_put(gpu); | |
311 | } | |
312 | ||
cdb95931 JC |
313 | static void msm_gpu_crashstate_get_bo(struct msm_gpu_state *state, |
314 | struct msm_gem_object *obj, u64 iova, u32 flags) | |
315 | { | |
316 | struct msm_gpu_state_bo *state_bo = &state->bos[state->nr_bos]; | |
317 | ||
318 | /* Don't record write only objects */ | |
cdb95931 JC |
319 | state_bo->size = obj->base.size; |
320 | state_bo->iova = iova; | |
321 | ||
896a248a JC |
322 | /* Only store data for non imported buffer objects marked for read */ |
323 | if ((flags & MSM_SUBMIT_BO_READ) && !obj->base.import_attach) { | |
cdb95931 JC |
324 | void *ptr; |
325 | ||
326 | state_bo->data = kvmalloc(obj->base.size, GFP_KERNEL); | |
327 | if (!state_bo->data) | |
896a248a | 328 | goto out; |
cdb95931 JC |
329 | |
330 | ptr = msm_gem_get_vaddr_active(&obj->base); | |
331 | if (IS_ERR(ptr)) { | |
332 | kvfree(state_bo->data); | |
896a248a JC |
333 | state_bo->data = NULL; |
334 | goto out; | |
cdb95931 JC |
335 | } |
336 | ||
337 | memcpy(state_bo->data, ptr, obj->base.size); | |
338 | msm_gem_put_vaddr(&obj->base); | |
339 | } | |
896a248a | 340 | out: |
cdb95931 JC |
341 | state->nr_bos++; |
342 | } | |
343 | ||
344 | static void msm_gpu_crashstate_capture(struct msm_gpu *gpu, | |
345 | struct msm_gem_submit *submit, char *comm, char *cmd) | |
c0fec7f5 JC |
346 | { |
347 | struct msm_gpu_state *state; | |
348 | ||
349 | /* Only save one crash state at a time */ | |
350 | if (gpu->crashstate) | |
351 | return; | |
352 | ||
353 | state = gpu->funcs->gpu_state_get(gpu); | |
354 | if (IS_ERR_OR_NULL(state)) | |
355 | return; | |
356 | ||
357 | /* Fill in the additional crash state information */ | |
358 | state->comm = kstrdup(comm, GFP_KERNEL); | |
359 | state->cmd = kstrdup(cmd, GFP_KERNEL); | |
360 | ||
cdb95931 JC |
361 | if (submit) { |
362 | int i; | |
363 | ||
896a248a | 364 | state->bos = kcalloc(submit->nr_cmds, |
cdb95931 JC |
365 | sizeof(struct msm_gpu_state_bo), GFP_KERNEL); |
366 | ||
896a248a JC |
367 | for (i = 0; state->bos && i < submit->nr_cmds; i++) { |
368 | int idx = submit->cmd[i].idx; | |
369 | ||
370 | msm_gpu_crashstate_get_bo(state, submit->bos[idx].obj, | |
371 | submit->bos[idx].iova, submit->bos[idx].flags); | |
372 | } | |
cdb95931 JC |
373 | } |
374 | ||
c0fec7f5 JC |
375 | /* Set the active crash state to be dumped on failure */ |
376 | gpu->crashstate = state; | |
377 | ||
378 | /* FIXME: Release the crashstate if this errors out? */ | |
379 | dev_coredumpm(gpu->dev->dev, THIS_MODULE, gpu, 0, GFP_KERNEL, | |
380 | msm_gpu_devcoredump_read, msm_gpu_devcoredump_free); | |
381 | } | |
382 | #else | |
6969019f AR |
383 | static void msm_gpu_crashstate_capture(struct msm_gpu *gpu, |
384 | struct msm_gem_submit *submit, char *comm, char *cmd) | |
c0fec7f5 JC |
385 | { |
386 | } | |
387 | #endif | |
388 | ||
bd6f82d8 RC |
389 | /* |
390 | * Hangcheck detection for locked gpu: | |
391 | */ | |
392 | ||
f97decac JC |
393 | static void update_fences(struct msm_gpu *gpu, struct msm_ringbuffer *ring, |
394 | uint32_t fence) | |
395 | { | |
396 | struct msm_gem_submit *submit; | |
397 | ||
398 | list_for_each_entry(submit, &ring->submits, node) { | |
399 | if (submit->seqno > fence) | |
400 | break; | |
401 | ||
402 | msm_update_fence(submit->ring->fctx, | |
403 | submit->fence->seqno); | |
404 | } | |
405 | } | |
406 | ||
18bb8a6c RC |
407 | static struct msm_gem_submit * |
408 | find_submit(struct msm_ringbuffer *ring, uint32_t fence) | |
409 | { | |
410 | struct msm_gem_submit *submit; | |
411 | ||
412 | WARN_ON(!mutex_is_locked(&ring->gpu->dev->struct_mutex)); | |
413 | ||
414 | list_for_each_entry(submit, &ring->submits, node) | |
415 | if (submit->seqno == fence) | |
416 | return submit; | |
417 | ||
418 | return NULL; | |
419 | } | |
420 | ||
b6295f9a | 421 | static void retire_submits(struct msm_gpu *gpu); |
1a370be9 | 422 | |
bd6f82d8 RC |
423 | static void recover_worker(struct work_struct *work) |
424 | { | |
425 | struct msm_gpu *gpu = container_of(work, struct msm_gpu, recover_work); | |
426 | struct drm_device *dev = gpu->dev; | |
96169f4e | 427 | struct msm_drm_private *priv = dev->dev_private; |
4816b626 | 428 | struct msm_gem_submit *submit; |
f97decac | 429 | struct msm_ringbuffer *cur_ring = gpu->funcs->active_ring(gpu); |
65a3c274 | 430 | char *comm = NULL, *cmd = NULL; |
f97decac JC |
431 | int i; |
432 | ||
bd6f82d8 | 433 | mutex_lock(&dev->struct_mutex); |
1a370be9 | 434 | |
6a41da17 | 435 | DRM_DEV_ERROR(dev->dev, "%s: hangcheck recover!\n", gpu->name); |
f97decac | 436 | |
96169f4e | 437 | submit = find_submit(cur_ring, cur_ring->memptrs->fence + 1); |
18bb8a6c RC |
438 | if (submit) { |
439 | struct task_struct *task; | |
440 | ||
441 | rcu_read_lock(); | |
442 | task = pid_task(submit->pid, PIDTYPE_PID); | |
443 | if (task) { | |
65a3c274 | 444 | comm = kstrdup(task->comm, GFP_ATOMIC); |
18bb8a6c RC |
445 | |
446 | /* | |
447 | * So slightly annoying, in other paths like | |
448 | * mmap'ing gem buffers, mmap_sem is acquired | |
449 | * before struct_mutex, which means we can't | |
450 | * hold struct_mutex across the call to | |
451 | * get_cmdline(). But submits are retired | |
452 | * from the same in-order workqueue, so we can | |
453 | * safely drop the lock here without worrying | |
454 | * about the submit going away. | |
455 | */ | |
456 | mutex_unlock(&dev->struct_mutex); | |
65a3c274 | 457 | cmd = kstrdup_quotable_cmdline(task, GFP_ATOMIC); |
18bb8a6c | 458 | mutex_lock(&dev->struct_mutex); |
65a3c274 JC |
459 | } |
460 | rcu_read_unlock(); | |
18bb8a6c | 461 | |
65a3c274 | 462 | if (comm && cmd) { |
6a41da17 | 463 | DRM_DEV_ERROR(dev->dev, "%s: offending task: %s (%s)\n", |
65a3c274 | 464 | gpu->name, comm, cmd); |
96169f4e RC |
465 | |
466 | msm_rd_dump_submit(priv->hangrd, submit, | |
65a3c274 JC |
467 | "offending task: %s (%s)", comm, cmd); |
468 | } else | |
96169f4e | 469 | msm_rd_dump_submit(priv->hangrd, submit, NULL); |
96169f4e RC |
470 | } |
471 | ||
c0fec7f5 JC |
472 | /* Record the crash state */ |
473 | pm_runtime_get_sync(&gpu->pdev->dev); | |
cdb95931 | 474 | msm_gpu_crashstate_capture(gpu, submit, comm, cmd); |
c0fec7f5 JC |
475 | pm_runtime_put_sync(&gpu->pdev->dev); |
476 | ||
65a3c274 JC |
477 | kfree(cmd); |
478 | kfree(comm); | |
96169f4e RC |
479 | |
480 | /* | |
481 | * Update all the rings with the latest and greatest fence.. this | |
482 | * needs to happen after msm_rd_dump_submit() to ensure that the | |
483 | * bo's referenced by the offending submit are still around. | |
484 | */ | |
7ddae82e | 485 | for (i = 0; i < gpu->nr_rings; i++) { |
96169f4e RC |
486 | struct msm_ringbuffer *ring = gpu->rb[i]; |
487 | ||
488 | uint32_t fence = ring->memptrs->fence; | |
18bb8a6c | 489 | |
96169f4e RC |
490 | /* |
491 | * For the current (faulting?) ring/submit advance the fence by | |
492 | * one more to clear the faulting submit | |
493 | */ | |
494 | if (ring == cur_ring) | |
495 | fence++; | |
496 | ||
497 | update_fences(gpu, ring, fence); | |
4816b626 RC |
498 | } |
499 | ||
500 | if (msm_gpu_active(gpu)) { | |
1a370be9 | 501 | /* retire completed submits, plus the one that hung: */ |
b6295f9a | 502 | retire_submits(gpu); |
1a370be9 | 503 | |
eeb75474 | 504 | pm_runtime_get_sync(&gpu->pdev->dev); |
37d77c3a | 505 | gpu->funcs->recover(gpu); |
eeb75474 | 506 | pm_runtime_put_sync(&gpu->pdev->dev); |
1a370be9 | 507 | |
f97decac JC |
508 | /* |
509 | * Replay all remaining submits starting with highest priority | |
510 | * ring | |
511 | */ | |
b1fc2839 | 512 | for (i = 0; i < gpu->nr_rings; i++) { |
f97decac JC |
513 | struct msm_ringbuffer *ring = gpu->rb[i]; |
514 | ||
515 | list_for_each_entry(submit, &ring->submits, node) | |
516 | gpu->funcs->submit(gpu, submit, NULL); | |
1a370be9 | 517 | } |
37d77c3a | 518 | } |
4816b626 | 519 | |
bd6f82d8 RC |
520 | mutex_unlock(&dev->struct_mutex); |
521 | ||
522 | msm_gpu_retire(gpu); | |
523 | } | |
524 | ||
525 | static void hangcheck_timer_reset(struct msm_gpu *gpu) | |
526 | { | |
527 | DBG("%s", gpu->name); | |
528 | mod_timer(&gpu->hangcheck_timer, | |
529 | round_jiffies_up(jiffies + DRM_MSM_HANGCHECK_JIFFIES)); | |
530 | } | |
531 | ||
e99e88a9 | 532 | static void hangcheck_handler(struct timer_list *t) |
bd6f82d8 | 533 | { |
e99e88a9 | 534 | struct msm_gpu *gpu = from_timer(gpu, t, hangcheck_timer); |
6b8819c8 RC |
535 | struct drm_device *dev = gpu->dev; |
536 | struct msm_drm_private *priv = dev->dev_private; | |
f97decac JC |
537 | struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu); |
538 | uint32_t fence = ring->memptrs->fence; | |
bd6f82d8 | 539 | |
f97decac | 540 | if (fence != ring->hangcheck_fence) { |
bd6f82d8 | 541 | /* some progress has been made.. ya! */ |
f97decac JC |
542 | ring->hangcheck_fence = fence; |
543 | } else if (fence < ring->seqno) { | |
bd6f82d8 | 544 | /* no progress and not done.. hung! */ |
f97decac | 545 | ring->hangcheck_fence = fence; |
6a41da17 | 546 | DRM_DEV_ERROR(dev->dev, "%s: hangcheck detected gpu lockup rb %d!\n", |
f97decac | 547 | gpu->name, ring->id); |
6a41da17 | 548 | DRM_DEV_ERROR(dev->dev, "%s: completed fence: %u\n", |
26791c48 | 549 | gpu->name, fence); |
6a41da17 | 550 | DRM_DEV_ERROR(dev->dev, "%s: submitted fence: %u\n", |
f97decac JC |
551 | gpu->name, ring->seqno); |
552 | ||
bd6f82d8 RC |
553 | queue_work(priv->wq, &gpu->recover_work); |
554 | } | |
555 | ||
556 | /* if still more pending work, reset the hangcheck timer: */ | |
f97decac | 557 | if (ring->seqno > ring->hangcheck_fence) |
bd6f82d8 | 558 | hangcheck_timer_reset(gpu); |
6b8819c8 RC |
559 | |
560 | /* workaround for missing irq: */ | |
561 | queue_work(priv->wq, &gpu->retire_work); | |
bd6f82d8 RC |
562 | } |
563 | ||
70c70f09 RC |
564 | /* |
565 | * Performance Counters: | |
566 | */ | |
567 | ||
568 | /* called under perf_lock */ | |
569 | static int update_hw_cntrs(struct msm_gpu *gpu, uint32_t ncntrs, uint32_t *cntrs) | |
570 | { | |
571 | uint32_t current_cntrs[ARRAY_SIZE(gpu->last_cntrs)]; | |
572 | int i, n = min(ncntrs, gpu->num_perfcntrs); | |
573 | ||
574 | /* read current values: */ | |
575 | for (i = 0; i < gpu->num_perfcntrs; i++) | |
576 | current_cntrs[i] = gpu_read(gpu, gpu->perfcntrs[i].sample_reg); | |
577 | ||
578 | /* update cntrs: */ | |
579 | for (i = 0; i < n; i++) | |
580 | cntrs[i] = current_cntrs[i] - gpu->last_cntrs[i]; | |
581 | ||
582 | /* save current values: */ | |
583 | for (i = 0; i < gpu->num_perfcntrs; i++) | |
584 | gpu->last_cntrs[i] = current_cntrs[i]; | |
585 | ||
586 | return n; | |
587 | } | |
588 | ||
589 | static void update_sw_cntrs(struct msm_gpu *gpu) | |
590 | { | |
591 | ktime_t time; | |
592 | uint32_t elapsed; | |
593 | unsigned long flags; | |
594 | ||
595 | spin_lock_irqsave(&gpu->perf_lock, flags); | |
596 | if (!gpu->perfcntr_active) | |
597 | goto out; | |
598 | ||
599 | time = ktime_get(); | |
600 | elapsed = ktime_to_us(ktime_sub(time, gpu->last_sample.time)); | |
601 | ||
602 | gpu->totaltime += elapsed; | |
603 | if (gpu->last_sample.active) | |
604 | gpu->activetime += elapsed; | |
605 | ||
606 | gpu->last_sample.active = msm_gpu_active(gpu); | |
607 | gpu->last_sample.time = time; | |
608 | ||
609 | out: | |
610 | spin_unlock_irqrestore(&gpu->perf_lock, flags); | |
611 | } | |
612 | ||
613 | void msm_gpu_perfcntr_start(struct msm_gpu *gpu) | |
614 | { | |
615 | unsigned long flags; | |
616 | ||
eeb75474 RC |
617 | pm_runtime_get_sync(&gpu->pdev->dev); |
618 | ||
70c70f09 RC |
619 | spin_lock_irqsave(&gpu->perf_lock, flags); |
620 | /* we could dynamically enable/disable perfcntr registers too.. */ | |
621 | gpu->last_sample.active = msm_gpu_active(gpu); | |
622 | gpu->last_sample.time = ktime_get(); | |
623 | gpu->activetime = gpu->totaltime = 0; | |
624 | gpu->perfcntr_active = true; | |
625 | update_hw_cntrs(gpu, 0, NULL); | |
626 | spin_unlock_irqrestore(&gpu->perf_lock, flags); | |
627 | } | |
628 | ||
629 | void msm_gpu_perfcntr_stop(struct msm_gpu *gpu) | |
630 | { | |
631 | gpu->perfcntr_active = false; | |
eeb75474 | 632 | pm_runtime_put_sync(&gpu->pdev->dev); |
70c70f09 RC |
633 | } |
634 | ||
635 | /* returns -errno or # of cntrs sampled */ | |
636 | int msm_gpu_perfcntr_sample(struct msm_gpu *gpu, uint32_t *activetime, | |
637 | uint32_t *totaltime, uint32_t ncntrs, uint32_t *cntrs) | |
638 | { | |
639 | unsigned long flags; | |
640 | int ret; | |
641 | ||
642 | spin_lock_irqsave(&gpu->perf_lock, flags); | |
643 | ||
644 | if (!gpu->perfcntr_active) { | |
645 | ret = -EINVAL; | |
646 | goto out; | |
647 | } | |
648 | ||
649 | *activetime = gpu->activetime; | |
650 | *totaltime = gpu->totaltime; | |
651 | ||
652 | gpu->activetime = gpu->totaltime = 0; | |
653 | ||
654 | ret = update_hw_cntrs(gpu, ncntrs, cntrs); | |
655 | ||
656 | out: | |
657 | spin_unlock_irqrestore(&gpu->perf_lock, flags); | |
658 | ||
659 | return ret; | |
660 | } | |
661 | ||
7198e6b0 RC |
662 | /* |
663 | * Cmdstream submission/retirement: | |
664 | */ | |
665 | ||
4241db42 JC |
666 | static void retire_submit(struct msm_gpu *gpu, struct msm_ringbuffer *ring, |
667 | struct msm_gem_submit *submit) | |
7d12a279 | 668 | { |
4241db42 JC |
669 | int index = submit->seqno % MSM_GPU_SUBMIT_STATS_COUNT; |
670 | volatile struct msm_gpu_submit_stats *stats; | |
671 | u64 elapsed, clock = 0; | |
7d12a279 RC |
672 | int i; |
673 | ||
4241db42 JC |
674 | stats = &ring->memptrs->stats[index]; |
675 | /* Convert 19.2Mhz alwayson ticks to nanoseconds for elapsed time */ | |
676 | elapsed = (stats->alwayson_end - stats->alwayson_start) * 10000; | |
677 | do_div(elapsed, 192); | |
678 | ||
679 | /* Calculate the clock frequency from the number of CP cycles */ | |
680 | if (elapsed) { | |
681 | clock = (stats->cpcycles_end - stats->cpcycles_start) * 1000; | |
682 | do_div(clock, elapsed); | |
683 | } | |
684 | ||
685 | trace_msm_gpu_submit_retired(submit, elapsed, clock, | |
686 | stats->alwayson_start, stats->alwayson_end); | |
687 | ||
7d12a279 RC |
688 | for (i = 0; i < submit->nr_bos; i++) { |
689 | struct msm_gem_object *msm_obj = submit->bos[i].obj; | |
690 | /* move to inactive: */ | |
691 | msm_gem_move_to_inactive(&msm_obj->base); | |
7ad0e8cf | 692 | msm_gem_unpin_iova(&msm_obj->base, gpu->aspace); |
dc9a9b32 | 693 | drm_gem_object_put(&msm_obj->base); |
7d12a279 RC |
694 | } |
695 | ||
eeb75474 RC |
696 | pm_runtime_mark_last_busy(&gpu->pdev->dev); |
697 | pm_runtime_put_autosuspend(&gpu->pdev->dev); | |
40e6815b | 698 | msm_gem_submit_free(submit); |
7d12a279 RC |
699 | } |
700 | ||
b6295f9a | 701 | static void retire_submits(struct msm_gpu *gpu) |
1a370be9 RC |
702 | { |
703 | struct drm_device *dev = gpu->dev; | |
f97decac JC |
704 | struct msm_gem_submit *submit, *tmp; |
705 | int i; | |
1a370be9 RC |
706 | |
707 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); | |
708 | ||
f97decac | 709 | /* Retire the commits starting with highest priority */ |
b1fc2839 | 710 | for (i = 0; i < gpu->nr_rings; i++) { |
f97decac | 711 | struct msm_ringbuffer *ring = gpu->rb[i]; |
1a370be9 | 712 | |
f97decac JC |
713 | list_for_each_entry_safe(submit, tmp, &ring->submits, node) { |
714 | if (dma_fence_is_signaled(submit->fence)) | |
4241db42 | 715 | retire_submit(gpu, ring, submit); |
1a370be9 RC |
716 | } |
717 | } | |
718 | } | |
719 | ||
7198e6b0 RC |
720 | static void retire_worker(struct work_struct *work) |
721 | { | |
722 | struct msm_gpu *gpu = container_of(work, struct msm_gpu, retire_work); | |
723 | struct drm_device *dev = gpu->dev; | |
f97decac | 724 | int i; |
7198e6b0 | 725 | |
f97decac JC |
726 | for (i = 0; i < gpu->nr_rings; i++) |
727 | update_fences(gpu, gpu->rb[i], gpu->rb[i]->memptrs->fence); | |
edd4fc63 | 728 | |
7198e6b0 | 729 | mutex_lock(&dev->struct_mutex); |
b6295f9a | 730 | retire_submits(gpu); |
7198e6b0 RC |
731 | mutex_unlock(&dev->struct_mutex); |
732 | } | |
733 | ||
734 | /* call from irq handler to schedule work to retire bo's */ | |
735 | void msm_gpu_retire(struct msm_gpu *gpu) | |
736 | { | |
737 | struct msm_drm_private *priv = gpu->dev->dev_private; | |
738 | queue_work(priv->wq, &gpu->retire_work); | |
70c70f09 | 739 | update_sw_cntrs(gpu); |
7198e6b0 RC |
740 | } |
741 | ||
742 | /* add bo's to gpu's ring, and kick gpu: */ | |
f44d32c7 | 743 | void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, |
7198e6b0 RC |
744 | struct msm_file_private *ctx) |
745 | { | |
746 | struct drm_device *dev = gpu->dev; | |
747 | struct msm_drm_private *priv = dev->dev_private; | |
f97decac | 748 | struct msm_ringbuffer *ring = submit->ring; |
f44d32c7 | 749 | int i; |
7198e6b0 | 750 | |
1a370be9 RC |
751 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); |
752 | ||
eeb75474 RC |
753 | pm_runtime_get_sync(&gpu->pdev->dev); |
754 | ||
755 | msm_gpu_hw_init(gpu); | |
37d77c3a | 756 | |
f97decac JC |
757 | submit->seqno = ++ring->seqno; |
758 | ||
759 | list_add_tail(&submit->node, &ring->submits); | |
1a370be9 | 760 | |
998b9a58 | 761 | msm_rd_dump_submit(priv->rd, submit, NULL); |
a7d3c950 | 762 | |
70c70f09 RC |
763 | update_sw_cntrs(gpu); |
764 | ||
7198e6b0 RC |
765 | for (i = 0; i < submit->nr_bos; i++) { |
766 | struct msm_gem_object *msm_obj = submit->bos[i].obj; | |
78babc16 | 767 | uint64_t iova; |
7198e6b0 RC |
768 | |
769 | /* can't happen yet.. but when we add 2d support we'll have | |
770 | * to deal w/ cross-ring synchronization: | |
771 | */ | |
772 | WARN_ON(is_active(msm_obj) && (msm_obj->gpu != gpu)); | |
773 | ||
7d12a279 | 774 | /* submit takes a reference to the bo and iova until retired: */ |
dc9a9b32 | 775 | drm_gem_object_get(&msm_obj->base); |
9fe041f6 | 776 | msm_gem_get_and_pin_iova(&msm_obj->base, |
8bdcd949 | 777 | submit->gpu->aspace, &iova); |
7198e6b0 | 778 | |
bf6811f3 RC |
779 | if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE) |
780 | msm_gem_move_to_active(&msm_obj->base, gpu, true, submit->fence); | |
b6295f9a RC |
781 | else if (submit->bos[i].flags & MSM_SUBMIT_BO_READ) |
782 | msm_gem_move_to_active(&msm_obj->base, gpu, false, submit->fence); | |
7198e6b0 | 783 | } |
1a370be9 | 784 | |
1193c3bc | 785 | gpu->funcs->submit(gpu, submit, ctx); |
1a370be9 RC |
786 | priv->lastctx = ctx; |
787 | ||
bd6f82d8 | 788 | hangcheck_timer_reset(gpu); |
7198e6b0 RC |
789 | } |
790 | ||
791 | /* | |
792 | * Init/Cleanup: | |
793 | */ | |
794 | ||
795 | static irqreturn_t irq_handler(int irq, void *data) | |
796 | { | |
797 | struct msm_gpu *gpu = data; | |
798 | return gpu->funcs->irq(gpu); | |
799 | } | |
800 | ||
98db803f JC |
801 | static int get_clocks(struct platform_device *pdev, struct msm_gpu *gpu) |
802 | { | |
8e54eea5 | 803 | int ret = msm_clk_bulk_get(&pdev->dev, &gpu->grp_clks); |
98db803f | 804 | |
8e54eea5 | 805 | if (ret < 1) { |
98db803f | 806 | gpu->nr_clocks = 0; |
8e54eea5 | 807 | return ret; |
9d20a0e6 | 808 | } |
98db803f | 809 | |
8e54eea5 | 810 | gpu->nr_clocks = ret; |
98db803f | 811 | |
8e54eea5 JC |
812 | gpu->core_clk = msm_clk_bulk_get_clock(gpu->grp_clks, |
813 | gpu->nr_clocks, "core"); | |
98db803f | 814 | |
8e54eea5 JC |
815 | gpu->rbbmtimer_clk = msm_clk_bulk_get_clock(gpu->grp_clks, |
816 | gpu->nr_clocks, "rbbmtimer"); | |
98db803f JC |
817 | |
818 | return 0; | |
819 | } | |
7198e6b0 | 820 | |
1267a4df JC |
821 | static struct msm_gem_address_space * |
822 | msm_gpu_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev, | |
823 | uint64_t va_start, uint64_t va_end) | |
824 | { | |
825 | struct iommu_domain *iommu; | |
826 | struct msm_gem_address_space *aspace; | |
827 | int ret; | |
828 | ||
829 | /* | |
830 | * Setup IOMMU.. eventually we will (I think) do this once per context | |
831 | * and have separate page tables per context. For now, to keep things | |
832 | * simple and to get something working, just use a single address space: | |
833 | */ | |
834 | iommu = iommu_domain_alloc(&platform_bus_type); | |
835 | if (!iommu) | |
836 | return NULL; | |
837 | ||
838 | iommu->geometry.aperture_start = va_start; | |
839 | iommu->geometry.aperture_end = va_end; | |
840 | ||
6a41da17 | 841 | DRM_DEV_INFO(gpu->dev->dev, "%s: using IOMMU\n", gpu->name); |
1267a4df JC |
842 | |
843 | aspace = msm_gem_address_space_create(&pdev->dev, iommu, "gpu"); | |
844 | if (IS_ERR(aspace)) { | |
6a41da17 | 845 | DRM_DEV_ERROR(gpu->dev->dev, "failed to init iommu: %ld\n", |
1267a4df JC |
846 | PTR_ERR(aspace)); |
847 | iommu_domain_free(iommu); | |
848 | return ERR_CAST(aspace); | |
849 | } | |
850 | ||
851 | ret = aspace->mmu->funcs->attach(aspace->mmu, NULL, 0); | |
852 | if (ret) { | |
853 | msm_gem_address_space_put(aspace); | |
854 | return ERR_PTR(ret); | |
855 | } | |
856 | ||
857 | return aspace; | |
858 | } | |
859 | ||
7198e6b0 RC |
860 | int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, |
861 | struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs, | |
5770fc7a | 862 | const char *name, struct msm_gpu_config *config) |
7198e6b0 | 863 | { |
f97decac JC |
864 | int i, ret, nr_rings = config->nr_rings; |
865 | void *memptrs; | |
866 | uint64_t memptrs_iova; | |
7198e6b0 | 867 | |
70c70f09 RC |
868 | if (WARN_ON(gpu->num_perfcntrs > ARRAY_SIZE(gpu->last_cntrs))) |
869 | gpu->num_perfcntrs = ARRAY_SIZE(gpu->last_cntrs); | |
870 | ||
7198e6b0 RC |
871 | gpu->dev = drm; |
872 | gpu->funcs = funcs; | |
873 | gpu->name = name; | |
874 | ||
875 | INIT_LIST_HEAD(&gpu->active_list); | |
876 | INIT_WORK(&gpu->retire_work, retire_worker); | |
bd6f82d8 RC |
877 | INIT_WORK(&gpu->recover_work, recover_worker); |
878 | ||
1a370be9 | 879 | |
e99e88a9 | 880 | timer_setup(&gpu->hangcheck_timer, hangcheck_handler, 0); |
7198e6b0 | 881 | |
70c70f09 RC |
882 | spin_lock_init(&gpu->perf_lock); |
883 | ||
7198e6b0 RC |
884 | |
885 | /* Map registers: */ | |
5770fc7a | 886 | gpu->mmio = msm_ioremap(pdev, config->ioname, name); |
7198e6b0 RC |
887 | if (IS_ERR(gpu->mmio)) { |
888 | ret = PTR_ERR(gpu->mmio); | |
889 | goto fail; | |
890 | } | |
891 | ||
892 | /* Get Interrupt: */ | |
5770fc7a | 893 | gpu->irq = platform_get_irq_byname(pdev, config->irqname); |
7198e6b0 RC |
894 | if (gpu->irq < 0) { |
895 | ret = gpu->irq; | |
6a41da17 | 896 | DRM_DEV_ERROR(drm->dev, "failed to get irq: %d\n", ret); |
7198e6b0 RC |
897 | goto fail; |
898 | } | |
899 | ||
900 | ret = devm_request_irq(&pdev->dev, gpu->irq, irq_handler, | |
901 | IRQF_TRIGGER_HIGH, gpu->name, gpu); | |
902 | if (ret) { | |
6a41da17 | 903 | DRM_DEV_ERROR(drm->dev, "failed to request IRQ%u: %d\n", gpu->irq, ret); |
7198e6b0 RC |
904 | goto fail; |
905 | } | |
906 | ||
98db803f JC |
907 | ret = get_clocks(pdev, gpu); |
908 | if (ret) | |
909 | goto fail; | |
7198e6b0 | 910 | |
720c3bb8 | 911 | gpu->ebi1_clk = msm_clk_get(pdev, "bus"); |
7198e6b0 RC |
912 | DBG("ebi1_clk: %p", gpu->ebi1_clk); |
913 | if (IS_ERR(gpu->ebi1_clk)) | |
914 | gpu->ebi1_clk = NULL; | |
915 | ||
916 | /* Acquire regulators: */ | |
917 | gpu->gpu_reg = devm_regulator_get(&pdev->dev, "vdd"); | |
918 | DBG("gpu_reg: %p", gpu->gpu_reg); | |
919 | if (IS_ERR(gpu->gpu_reg)) | |
920 | gpu->gpu_reg = NULL; | |
921 | ||
922 | gpu->gpu_cx = devm_regulator_get(&pdev->dev, "vddcx"); | |
923 | DBG("gpu_cx: %p", gpu->gpu_cx); | |
924 | if (IS_ERR(gpu->gpu_cx)) | |
925 | gpu->gpu_cx = NULL; | |
926 | ||
1267a4df JC |
927 | gpu->pdev = pdev; |
928 | platform_set_drvdata(pdev, gpu); | |
929 | ||
f91c14ab JC |
930 | msm_devfreq_init(gpu); |
931 | ||
1267a4df JC |
932 | gpu->aspace = msm_gpu_create_address_space(gpu, pdev, |
933 | config->va_start, config->va_end); | |
934 | ||
935 | if (gpu->aspace == NULL) | |
6a41da17 | 936 | DRM_DEV_INFO(drm->dev, "%s: no IOMMU, fallback to VRAM carveout!\n", name); |
1267a4df JC |
937 | else if (IS_ERR(gpu->aspace)) { |
938 | ret = PTR_ERR(gpu->aspace); | |
939 | goto fail; | |
7198e6b0 | 940 | } |
a1ad3523 | 941 | |
546ec7b4 JC |
942 | memptrs = msm_gem_kernel_new(drm, |
943 | sizeof(struct msm_rbmemptrs) * nr_rings, | |
cd414f3d | 944 | MSM_BO_UNCACHED, gpu->aspace, &gpu->memptrs_bo, |
f97decac | 945 | &memptrs_iova); |
cd414f3d | 946 | |
f97decac JC |
947 | if (IS_ERR(memptrs)) { |
948 | ret = PTR_ERR(memptrs); | |
6a41da17 | 949 | DRM_DEV_ERROR(drm->dev, "could not allocate memptrs: %d\n", ret); |
cd414f3d JC |
950 | goto fail; |
951 | } | |
952 | ||
f97decac | 953 | if (nr_rings > ARRAY_SIZE(gpu->rb)) { |
39ae0d3e | 954 | DRM_DEV_INFO_ONCE(drm->dev, "Only creating %zu ringbuffers\n", |
f97decac JC |
955 | ARRAY_SIZE(gpu->rb)); |
956 | nr_rings = ARRAY_SIZE(gpu->rb); | |
7198e6b0 RC |
957 | } |
958 | ||
f97decac JC |
959 | /* Create ringbuffer(s): */ |
960 | for (i = 0; i < nr_rings; i++) { | |
961 | gpu->rb[i] = msm_ringbuffer_new(gpu, i, memptrs, memptrs_iova); | |
962 | ||
963 | if (IS_ERR(gpu->rb[i])) { | |
964 | ret = PTR_ERR(gpu->rb[i]); | |
6a41da17 | 965 | DRM_DEV_ERROR(drm->dev, |
f97decac JC |
966 | "could not create ringbuffer %d: %d\n", i, ret); |
967 | goto fail; | |
968 | } | |
969 | ||
970 | memptrs += sizeof(struct msm_rbmemptrs); | |
971 | memptrs_iova += sizeof(struct msm_rbmemptrs); | |
972 | } | |
973 | ||
974 | gpu->nr_rings = nr_rings; | |
975 | ||
7198e6b0 RC |
976 | return 0; |
977 | ||
978 | fail: | |
f97decac JC |
979 | for (i = 0; i < ARRAY_SIZE(gpu->rb); i++) { |
980 | msm_ringbuffer_destroy(gpu->rb[i]); | |
981 | gpu->rb[i] = NULL; | |
982 | } | |
983 | ||
1e29dff0 | 984 | msm_gem_kernel_put(gpu->memptrs_bo, gpu->aspace, false); |
cd414f3d | 985 | |
1267a4df | 986 | platform_set_drvdata(pdev, NULL); |
7198e6b0 RC |
987 | return ret; |
988 | } | |
989 | ||
990 | void msm_gpu_cleanup(struct msm_gpu *gpu) | |
991 | { | |
f97decac JC |
992 | int i; |
993 | ||
7198e6b0 RC |
994 | DBG("%s", gpu->name); |
995 | ||
996 | WARN_ON(!list_empty(&gpu->active_list)); | |
997 | ||
f97decac JC |
998 | for (i = 0; i < ARRAY_SIZE(gpu->rb); i++) { |
999 | msm_ringbuffer_destroy(gpu->rb[i]); | |
1000 | gpu->rb[i] = NULL; | |
7198e6b0 | 1001 | } |
cd414f3d | 1002 | |
1e29dff0 | 1003 | msm_gem_kernel_put(gpu->memptrs_bo, gpu->aspace, false); |
cd414f3d JC |
1004 | |
1005 | if (!IS_ERR_OR_NULL(gpu->aspace)) { | |
1267a4df JC |
1006 | gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu, |
1007 | NULL, 0); | |
1008 | msm_gem_address_space_put(gpu->aspace); | |
1009 | } | |
7198e6b0 | 1010 | } |