2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
21 #include "msm_fence.h"
23 #include <linux/string_helpers.h>
30 #ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING
31 #include <mach/board.h>
32 static void bs_init(struct msm_gpu
*gpu
)
34 if (gpu
->bus_scale_table
) {
35 gpu
->bsc
= msm_bus_scale_register_client(gpu
->bus_scale_table
);
36 DBG("bus scale client: %08x", gpu
->bsc
);
40 static void bs_fini(struct msm_gpu
*gpu
)
43 msm_bus_scale_unregister_client(gpu
->bsc
);
48 static void bs_set(struct msm_gpu
*gpu
, int idx
)
51 DBG("set bus scaling: %d", idx
);
52 msm_bus_scale_client_update_request(gpu
->bsc
, idx
);
56 static void bs_init(struct msm_gpu
*gpu
) {}
57 static void bs_fini(struct msm_gpu
*gpu
) {}
58 static void bs_set(struct msm_gpu
*gpu
, int idx
) {}
61 static int enable_pwrrail(struct msm_gpu
*gpu
)
63 struct drm_device
*dev
= gpu
->dev
;
67 ret
= regulator_enable(gpu
->gpu_reg
);
69 dev_err(dev
->dev
, "failed to enable 'gpu_reg': %d\n", ret
);
75 ret
= regulator_enable(gpu
->gpu_cx
);
77 dev_err(dev
->dev
, "failed to enable 'gpu_cx': %d\n", ret
);
85 static int disable_pwrrail(struct msm_gpu
*gpu
)
88 regulator_disable(gpu
->gpu_cx
);
90 regulator_disable(gpu
->gpu_reg
);
94 static int enable_clk(struct msm_gpu
*gpu
)
98 if (gpu
->core_clk
&& gpu
->fast_rate
)
99 clk_set_rate(gpu
->core_clk
, gpu
->fast_rate
);
101 /* Set the RBBM timer rate to 19.2Mhz */
102 if (gpu
->rbbmtimer_clk
)
103 clk_set_rate(gpu
->rbbmtimer_clk
, 19200000);
105 for (i
= gpu
->nr_clocks
- 1; i
>= 0; i
--)
106 if (gpu
->grp_clks
[i
])
107 clk_prepare(gpu
->grp_clks
[i
]);
109 for (i
= gpu
->nr_clocks
- 1; i
>= 0; i
--)
110 if (gpu
->grp_clks
[i
])
111 clk_enable(gpu
->grp_clks
[i
]);
116 static int disable_clk(struct msm_gpu
*gpu
)
120 for (i
= gpu
->nr_clocks
- 1; i
>= 0; i
--)
121 if (gpu
->grp_clks
[i
])
122 clk_disable(gpu
->grp_clks
[i
]);
124 for (i
= gpu
->nr_clocks
- 1; i
>= 0; i
--)
125 if (gpu
->grp_clks
[i
])
126 clk_unprepare(gpu
->grp_clks
[i
]);
129 * Set the clock to a deliberately low rate. On older targets the clock
130 * speed had to be non zero to avoid problems. On newer targets this
131 * will be rounded down to zero anyway so it all works out.
134 clk_set_rate(gpu
->core_clk
, 27000000);
136 if (gpu
->rbbmtimer_clk
)
137 clk_set_rate(gpu
->rbbmtimer_clk
, 0);
142 static int enable_axi(struct msm_gpu
*gpu
)
145 clk_prepare_enable(gpu
->ebi1_clk
);
147 bs_set(gpu
, gpu
->bus_freq
);
151 static int disable_axi(struct msm_gpu
*gpu
)
154 clk_disable_unprepare(gpu
->ebi1_clk
);
160 int msm_gpu_pm_resume(struct msm_gpu
*gpu
)
164 DBG("%s", gpu
->name
);
166 ret
= enable_pwrrail(gpu
);
170 ret
= enable_clk(gpu
);
174 ret
= enable_axi(gpu
);
178 gpu
->needs_hw_init
= true;
183 int msm_gpu_pm_suspend(struct msm_gpu
*gpu
)
187 DBG("%s", gpu
->name
);
189 ret
= disable_axi(gpu
);
193 ret
= disable_clk(gpu
);
197 ret
= disable_pwrrail(gpu
);
204 int msm_gpu_hw_init(struct msm_gpu
*gpu
)
208 WARN_ON(!mutex_is_locked(&gpu
->dev
->struct_mutex
));
210 if (!gpu
->needs_hw_init
)
213 disable_irq(gpu
->irq
);
214 ret
= gpu
->funcs
->hw_init(gpu
);
216 gpu
->needs_hw_init
= false;
217 enable_irq(gpu
->irq
);
223 * Hangcheck detection for locked gpu:
226 static void update_fences(struct msm_gpu
*gpu
, struct msm_ringbuffer
*ring
,
229 struct msm_gem_submit
*submit
;
231 list_for_each_entry(submit
, &ring
->submits
, node
) {
232 if (submit
->seqno
> fence
)
235 msm_update_fence(submit
->ring
->fctx
,
236 submit
->fence
->seqno
);
240 static struct msm_gem_submit
*
241 find_submit(struct msm_ringbuffer
*ring
, uint32_t fence
)
243 struct msm_gem_submit
*submit
;
245 WARN_ON(!mutex_is_locked(&ring
->gpu
->dev
->struct_mutex
));
247 list_for_each_entry(submit
, &ring
->submits
, node
)
248 if (submit
->seqno
== fence
)
254 static void retire_submits(struct msm_gpu
*gpu
);
256 static void recover_worker(struct work_struct
*work
)
258 struct msm_gpu
*gpu
= container_of(work
, struct msm_gpu
, recover_work
);
259 struct drm_device
*dev
= gpu
->dev
;
260 struct msm_drm_private
*priv
= dev
->dev_private
;
261 struct msm_gem_submit
*submit
;
262 struct msm_ringbuffer
*cur_ring
= gpu
->funcs
->active_ring(gpu
);
265 mutex_lock(&dev
->struct_mutex
);
267 dev_err(dev
->dev
, "%s: hangcheck recover!\n", gpu
->name
);
269 submit
= find_submit(cur_ring
, cur_ring
->memptrs
->fence
+ 1);
271 struct task_struct
*task
;
274 task
= pid_task(submit
->pid
, PIDTYPE_PID
);
279 * So slightly annoying, in other paths like
280 * mmap'ing gem buffers, mmap_sem is acquired
281 * before struct_mutex, which means we can't
282 * hold struct_mutex across the call to
283 * get_cmdline(). But submits are retired
284 * from the same in-order workqueue, so we can
285 * safely drop the lock here without worrying
286 * about the submit going away.
288 mutex_unlock(&dev
->struct_mutex
);
289 cmd
= kstrdup_quotable_cmdline(task
, GFP_KERNEL
);
290 mutex_lock(&dev
->struct_mutex
);
292 dev_err(dev
->dev
, "%s: offending task: %s (%s)\n",
293 gpu
->name
, task
->comm
, cmd
);
295 msm_rd_dump_submit(priv
->hangrd
, submit
,
296 "offending task: %s (%s)", task
->comm
, cmd
);
298 msm_rd_dump_submit(priv
->hangrd
, submit
, NULL
);
305 * Update all the rings with the latest and greatest fence.. this
306 * needs to happen after msm_rd_dump_submit() to ensure that the
307 * bo's referenced by the offending submit are still around.
309 for (i
= 0; i
< ARRAY_SIZE(gpu
->rb
); i
++) {
310 struct msm_ringbuffer
*ring
= gpu
->rb
[i
];
312 uint32_t fence
= ring
->memptrs
->fence
;
315 * For the current (faulting?) ring/submit advance the fence by
316 * one more to clear the faulting submit
318 if (ring
== cur_ring
)
321 update_fences(gpu
, ring
, fence
);
324 if (msm_gpu_active(gpu
)) {
325 /* retire completed submits, plus the one that hung: */
328 pm_runtime_get_sync(&gpu
->pdev
->dev
);
329 gpu
->funcs
->recover(gpu
);
330 pm_runtime_put_sync(&gpu
->pdev
->dev
);
333 * Replay all remaining submits starting with highest priority
336 for (i
= 0; i
< gpu
->nr_rings
; i
++) {
337 struct msm_ringbuffer
*ring
= gpu
->rb
[i
];
339 list_for_each_entry(submit
, &ring
->submits
, node
)
340 gpu
->funcs
->submit(gpu
, submit
, NULL
);
344 mutex_unlock(&dev
->struct_mutex
);
349 static void hangcheck_timer_reset(struct msm_gpu
*gpu
)
351 DBG("%s", gpu
->name
);
352 mod_timer(&gpu
->hangcheck_timer
,
353 round_jiffies_up(jiffies
+ DRM_MSM_HANGCHECK_JIFFIES
));
356 static void hangcheck_handler(unsigned long data
)
358 struct msm_gpu
*gpu
= (struct msm_gpu
*)data
;
359 struct drm_device
*dev
= gpu
->dev
;
360 struct msm_drm_private
*priv
= dev
->dev_private
;
361 struct msm_ringbuffer
*ring
= gpu
->funcs
->active_ring(gpu
);
362 uint32_t fence
= ring
->memptrs
->fence
;
364 if (fence
!= ring
->hangcheck_fence
) {
365 /* some progress has been made.. ya! */
366 ring
->hangcheck_fence
= fence
;
367 } else if (fence
< ring
->seqno
) {
368 /* no progress and not done.. hung! */
369 ring
->hangcheck_fence
= fence
;
370 dev_err(dev
->dev
, "%s: hangcheck detected gpu lockup rb %d!\n",
371 gpu
->name
, ring
->id
);
372 dev_err(dev
->dev
, "%s: completed fence: %u\n",
374 dev_err(dev
->dev
, "%s: submitted fence: %u\n",
375 gpu
->name
, ring
->seqno
);
377 queue_work(priv
->wq
, &gpu
->recover_work
);
380 /* if still more pending work, reset the hangcheck timer: */
381 if (ring
->seqno
> ring
->hangcheck_fence
)
382 hangcheck_timer_reset(gpu
);
384 /* workaround for missing irq: */
385 queue_work(priv
->wq
, &gpu
->retire_work
);
389 * Performance Counters:
392 /* called under perf_lock */
393 static int update_hw_cntrs(struct msm_gpu
*gpu
, uint32_t ncntrs
, uint32_t *cntrs
)
395 uint32_t current_cntrs
[ARRAY_SIZE(gpu
->last_cntrs
)];
396 int i
, n
= min(ncntrs
, gpu
->num_perfcntrs
);
398 /* read current values: */
399 for (i
= 0; i
< gpu
->num_perfcntrs
; i
++)
400 current_cntrs
[i
] = gpu_read(gpu
, gpu
->perfcntrs
[i
].sample_reg
);
403 for (i
= 0; i
< n
; i
++)
404 cntrs
[i
] = current_cntrs
[i
] - gpu
->last_cntrs
[i
];
406 /* save current values: */
407 for (i
= 0; i
< gpu
->num_perfcntrs
; i
++)
408 gpu
->last_cntrs
[i
] = current_cntrs
[i
];
413 static void update_sw_cntrs(struct msm_gpu
*gpu
)
419 spin_lock_irqsave(&gpu
->perf_lock
, flags
);
420 if (!gpu
->perfcntr_active
)
424 elapsed
= ktime_to_us(ktime_sub(time
, gpu
->last_sample
.time
));
426 gpu
->totaltime
+= elapsed
;
427 if (gpu
->last_sample
.active
)
428 gpu
->activetime
+= elapsed
;
430 gpu
->last_sample
.active
= msm_gpu_active(gpu
);
431 gpu
->last_sample
.time
= time
;
434 spin_unlock_irqrestore(&gpu
->perf_lock
, flags
);
437 void msm_gpu_perfcntr_start(struct msm_gpu
*gpu
)
441 pm_runtime_get_sync(&gpu
->pdev
->dev
);
443 spin_lock_irqsave(&gpu
->perf_lock
, flags
);
444 /* we could dynamically enable/disable perfcntr registers too.. */
445 gpu
->last_sample
.active
= msm_gpu_active(gpu
);
446 gpu
->last_sample
.time
= ktime_get();
447 gpu
->activetime
= gpu
->totaltime
= 0;
448 gpu
->perfcntr_active
= true;
449 update_hw_cntrs(gpu
, 0, NULL
);
450 spin_unlock_irqrestore(&gpu
->perf_lock
, flags
);
453 void msm_gpu_perfcntr_stop(struct msm_gpu
*gpu
)
455 gpu
->perfcntr_active
= false;
456 pm_runtime_put_sync(&gpu
->pdev
->dev
);
459 /* returns -errno or # of cntrs sampled */
460 int msm_gpu_perfcntr_sample(struct msm_gpu
*gpu
, uint32_t *activetime
,
461 uint32_t *totaltime
, uint32_t ncntrs
, uint32_t *cntrs
)
466 spin_lock_irqsave(&gpu
->perf_lock
, flags
);
468 if (!gpu
->perfcntr_active
) {
473 *activetime
= gpu
->activetime
;
474 *totaltime
= gpu
->totaltime
;
476 gpu
->activetime
= gpu
->totaltime
= 0;
478 ret
= update_hw_cntrs(gpu
, ncntrs
, cntrs
);
481 spin_unlock_irqrestore(&gpu
->perf_lock
, flags
);
487 * Cmdstream submission/retirement:
490 static void retire_submit(struct msm_gpu
*gpu
, struct msm_gem_submit
*submit
)
494 for (i
= 0; i
< submit
->nr_bos
; i
++) {
495 struct msm_gem_object
*msm_obj
= submit
->bos
[i
].obj
;
496 /* move to inactive: */
497 msm_gem_move_to_inactive(&msm_obj
->base
);
498 msm_gem_put_iova(&msm_obj
->base
, gpu
->aspace
);
499 drm_gem_object_unreference(&msm_obj
->base
);
502 pm_runtime_mark_last_busy(&gpu
->pdev
->dev
);
503 pm_runtime_put_autosuspend(&gpu
->pdev
->dev
);
504 msm_gem_submit_free(submit
);
507 static void retire_submits(struct msm_gpu
*gpu
)
509 struct drm_device
*dev
= gpu
->dev
;
510 struct msm_gem_submit
*submit
, *tmp
;
513 WARN_ON(!mutex_is_locked(&dev
->struct_mutex
));
515 /* Retire the commits starting with highest priority */
516 for (i
= 0; i
< gpu
->nr_rings
; i
++) {
517 struct msm_ringbuffer
*ring
= gpu
->rb
[i
];
519 list_for_each_entry_safe(submit
, tmp
, &ring
->submits
, node
) {
520 if (dma_fence_is_signaled(submit
->fence
))
521 retire_submit(gpu
, submit
);
526 static void retire_worker(struct work_struct
*work
)
528 struct msm_gpu
*gpu
= container_of(work
, struct msm_gpu
, retire_work
);
529 struct drm_device
*dev
= gpu
->dev
;
532 for (i
= 0; i
< gpu
->nr_rings
; i
++)
533 update_fences(gpu
, gpu
->rb
[i
], gpu
->rb
[i
]->memptrs
->fence
);
535 mutex_lock(&dev
->struct_mutex
);
537 mutex_unlock(&dev
->struct_mutex
);
540 /* call from irq handler to schedule work to retire bo's */
541 void msm_gpu_retire(struct msm_gpu
*gpu
)
543 struct msm_drm_private
*priv
= gpu
->dev
->dev_private
;
544 queue_work(priv
->wq
, &gpu
->retire_work
);
545 update_sw_cntrs(gpu
);
548 /* add bo's to gpu's ring, and kick gpu: */
549 void msm_gpu_submit(struct msm_gpu
*gpu
, struct msm_gem_submit
*submit
,
550 struct msm_file_private
*ctx
)
552 struct drm_device
*dev
= gpu
->dev
;
553 struct msm_drm_private
*priv
= dev
->dev_private
;
554 struct msm_ringbuffer
*ring
= submit
->ring
;
557 WARN_ON(!mutex_is_locked(&dev
->struct_mutex
));
559 pm_runtime_get_sync(&gpu
->pdev
->dev
);
561 msm_gpu_hw_init(gpu
);
563 submit
->seqno
= ++ring
->seqno
;
565 list_add_tail(&submit
->node
, &ring
->submits
);
567 msm_rd_dump_submit(priv
->rd
, submit
, NULL
);
569 update_sw_cntrs(gpu
);
571 for (i
= 0; i
< submit
->nr_bos
; i
++) {
572 struct msm_gem_object
*msm_obj
= submit
->bos
[i
].obj
;
575 /* can't happen yet.. but when we add 2d support we'll have
576 * to deal w/ cross-ring synchronization:
578 WARN_ON(is_active(msm_obj
) && (msm_obj
->gpu
!= gpu
));
580 /* submit takes a reference to the bo and iova until retired: */
581 drm_gem_object_reference(&msm_obj
->base
);
582 msm_gem_get_iova(&msm_obj
->base
,
583 submit
->gpu
->aspace
, &iova
);
585 if (submit
->bos
[i
].flags
& MSM_SUBMIT_BO_WRITE
)
586 msm_gem_move_to_active(&msm_obj
->base
, gpu
, true, submit
->fence
);
587 else if (submit
->bos
[i
].flags
& MSM_SUBMIT_BO_READ
)
588 msm_gem_move_to_active(&msm_obj
->base
, gpu
, false, submit
->fence
);
591 gpu
->funcs
->submit(gpu
, submit
, ctx
);
594 hangcheck_timer_reset(gpu
);
601 static irqreturn_t
irq_handler(int irq
, void *data
)
603 struct msm_gpu
*gpu
= data
;
604 return gpu
->funcs
->irq(gpu
);
607 static struct clk
*get_clock(struct device
*dev
, const char *name
)
609 struct clk
*clk
= devm_clk_get(dev
, name
);
611 return IS_ERR(clk
) ? NULL
: clk
;
614 static int get_clocks(struct platform_device
*pdev
, struct msm_gpu
*gpu
)
616 struct device
*dev
= &pdev
->dev
;
617 struct property
*prop
;
621 gpu
->nr_clocks
= of_property_count_strings(dev
->of_node
, "clock-names");
622 if (gpu
->nr_clocks
< 1) {
627 gpu
->grp_clks
= devm_kcalloc(dev
, sizeof(struct clk
*), gpu
->nr_clocks
,
632 of_property_for_each_string(dev
->of_node
, "clock-names", prop
, name
) {
633 gpu
->grp_clks
[i
] = get_clock(dev
, name
);
635 /* Remember the key clocks that we need to control later */
636 if (!strcmp(name
, "core") || !strcmp(name
, "core_clk"))
637 gpu
->core_clk
= gpu
->grp_clks
[i
];
638 else if (!strcmp(name
, "rbbmtimer") || !strcmp(name
, "rbbmtimer_clk"))
639 gpu
->rbbmtimer_clk
= gpu
->grp_clks
[i
];
647 static struct msm_gem_address_space
*
648 msm_gpu_create_address_space(struct msm_gpu
*gpu
, struct platform_device
*pdev
,
649 uint64_t va_start
, uint64_t va_end
)
651 struct iommu_domain
*iommu
;
652 struct msm_gem_address_space
*aspace
;
656 * Setup IOMMU.. eventually we will (I think) do this once per context
657 * and have separate page tables per context. For now, to keep things
658 * simple and to get something working, just use a single address space:
660 iommu
= iommu_domain_alloc(&platform_bus_type
);
664 iommu
->geometry
.aperture_start
= va_start
;
665 iommu
->geometry
.aperture_end
= va_end
;
667 dev_info(gpu
->dev
->dev
, "%s: using IOMMU\n", gpu
->name
);
669 aspace
= msm_gem_address_space_create(&pdev
->dev
, iommu
, "gpu");
670 if (IS_ERR(aspace
)) {
671 dev_err(gpu
->dev
->dev
, "failed to init iommu: %ld\n",
673 iommu_domain_free(iommu
);
674 return ERR_CAST(aspace
);
677 ret
= aspace
->mmu
->funcs
->attach(aspace
->mmu
, NULL
, 0);
679 msm_gem_address_space_put(aspace
);
686 int msm_gpu_init(struct drm_device
*drm
, struct platform_device
*pdev
,
687 struct msm_gpu
*gpu
, const struct msm_gpu_funcs
*funcs
,
688 const char *name
, struct msm_gpu_config
*config
)
690 int i
, ret
, nr_rings
= config
->nr_rings
;
692 uint64_t memptrs_iova
;
694 if (WARN_ON(gpu
->num_perfcntrs
> ARRAY_SIZE(gpu
->last_cntrs
)))
695 gpu
->num_perfcntrs
= ARRAY_SIZE(gpu
->last_cntrs
);
701 INIT_LIST_HEAD(&gpu
->active_list
);
702 INIT_WORK(&gpu
->retire_work
, retire_worker
);
703 INIT_WORK(&gpu
->recover_work
, recover_worker
);
706 setup_timer(&gpu
->hangcheck_timer
, hangcheck_handler
,
709 spin_lock_init(&gpu
->perf_lock
);
713 gpu
->mmio
= msm_ioremap(pdev
, config
->ioname
, name
);
714 if (IS_ERR(gpu
->mmio
)) {
715 ret
= PTR_ERR(gpu
->mmio
);
720 gpu
->irq
= platform_get_irq_byname(pdev
, config
->irqname
);
723 dev_err(drm
->dev
, "failed to get irq: %d\n", ret
);
727 ret
= devm_request_irq(&pdev
->dev
, gpu
->irq
, irq_handler
,
728 IRQF_TRIGGER_HIGH
, gpu
->name
, gpu
);
730 dev_err(drm
->dev
, "failed to request IRQ%u: %d\n", gpu
->irq
, ret
);
734 ret
= get_clocks(pdev
, gpu
);
738 gpu
->ebi1_clk
= msm_clk_get(pdev
, "bus");
739 DBG("ebi1_clk: %p", gpu
->ebi1_clk
);
740 if (IS_ERR(gpu
->ebi1_clk
))
741 gpu
->ebi1_clk
= NULL
;
743 /* Acquire regulators: */
744 gpu
->gpu_reg
= devm_regulator_get(&pdev
->dev
, "vdd");
745 DBG("gpu_reg: %p", gpu
->gpu_reg
);
746 if (IS_ERR(gpu
->gpu_reg
))
749 gpu
->gpu_cx
= devm_regulator_get(&pdev
->dev
, "vddcx");
750 DBG("gpu_cx: %p", gpu
->gpu_cx
);
751 if (IS_ERR(gpu
->gpu_cx
))
755 platform_set_drvdata(pdev
, gpu
);
759 gpu
->aspace
= msm_gpu_create_address_space(gpu
, pdev
,
760 config
->va_start
, config
->va_end
);
762 if (gpu
->aspace
== NULL
)
763 dev_info(drm
->dev
, "%s: no IOMMU, fallback to VRAM carveout!\n", name
);
764 else if (IS_ERR(gpu
->aspace
)) {
765 ret
= PTR_ERR(gpu
->aspace
);
769 memptrs
= msm_gem_kernel_new(drm
, sizeof(*gpu
->memptrs_bo
),
770 MSM_BO_UNCACHED
, gpu
->aspace
, &gpu
->memptrs_bo
,
773 if (IS_ERR(memptrs
)) {
774 ret
= PTR_ERR(memptrs
);
775 dev_err(drm
->dev
, "could not allocate memptrs: %d\n", ret
);
779 if (nr_rings
> ARRAY_SIZE(gpu
->rb
)) {
780 DRM_DEV_INFO_ONCE(drm
->dev
, "Only creating %zu ringbuffers\n",
781 ARRAY_SIZE(gpu
->rb
));
782 nr_rings
= ARRAY_SIZE(gpu
->rb
);
785 /* Create ringbuffer(s): */
786 for (i
= 0; i
< nr_rings
; i
++) {
787 gpu
->rb
[i
] = msm_ringbuffer_new(gpu
, i
, memptrs
, memptrs_iova
);
789 if (IS_ERR(gpu
->rb
[i
])) {
790 ret
= PTR_ERR(gpu
->rb
[i
]);
792 "could not create ringbuffer %d: %d\n", i
, ret
);
796 memptrs
+= sizeof(struct msm_rbmemptrs
);
797 memptrs_iova
+= sizeof(struct msm_rbmemptrs
);
800 gpu
->nr_rings
= nr_rings
;
805 for (i
= 0; i
< ARRAY_SIZE(gpu
->rb
); i
++) {
806 msm_ringbuffer_destroy(gpu
->rb
[i
]);
810 if (gpu
->memptrs_bo
) {
811 msm_gem_put_vaddr(gpu
->memptrs_bo
);
812 msm_gem_put_iova(gpu
->memptrs_bo
, gpu
->aspace
);
813 drm_gem_object_unreference_unlocked(gpu
->memptrs_bo
);
816 platform_set_drvdata(pdev
, NULL
);
820 void msm_gpu_cleanup(struct msm_gpu
*gpu
)
824 DBG("%s", gpu
->name
);
826 WARN_ON(!list_empty(&gpu
->active_list
));
830 for (i
= 0; i
< ARRAY_SIZE(gpu
->rb
); i
++) {
831 msm_ringbuffer_destroy(gpu
->rb
[i
]);
835 if (gpu
->memptrs_bo
) {
836 msm_gem_put_vaddr(gpu
->memptrs_bo
);
837 msm_gem_put_iova(gpu
->memptrs_bo
, gpu
->aspace
);
838 drm_gem_object_unreference_unlocked(gpu
->memptrs_bo
);
841 if (!IS_ERR_OR_NULL(gpu
->aspace
)) {
842 gpu
->aspace
->mmu
->funcs
->detach(gpu
->aspace
->mmu
,
844 msm_gem_address_space_put(gpu
->aspace
);