]>
Commit | Line | Data |
---|---|---|
c8b75bca EA |
1 | /* |
2 | * Copyright © 2015 Broadcom | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License version 2 as | |
6 | * published by the Free Software Foundation. | |
7 | */ | |
8 | ||
9 | /* DOC: VC4 GEM BO management support. | |
10 | * | |
11 | * The VC4 GPU architecture (both scanout and rendering) has direct | |
12 | * access to system memory with no MMU in between. To support it, we | |
13 | * use the GEM CMA helper functions to allocate contiguous ranges of | |
14 | * physical memory for our BOs. | |
c826a6e1 EA |
15 | * |
16 | * Since the CMA allocator is very slow, we keep a cache of recently | |
17 | * freed BOs around so that the kernel's allocation of objects for 3D | |
18 | * rendering can return quickly. | |
c8b75bca EA |
19 | */ |
20 | ||
21 | #include "vc4_drv.h" | |
d5bc60f6 | 22 | #include "uapi/drm/vc4_drm.h" |
c8b75bca | 23 | |
c826a6e1 EA |
24 | static void vc4_bo_stats_dump(struct vc4_dev *vc4) |
25 | { | |
26 | DRM_INFO("num bos allocated: %d\n", | |
27 | vc4->bo_stats.num_allocated); | |
28 | DRM_INFO("size bos allocated: %dkb\n", | |
29 | vc4->bo_stats.size_allocated / 1024); | |
30 | DRM_INFO("num bos used: %d\n", | |
31 | vc4->bo_stats.num_allocated - vc4->bo_stats.num_cached); | |
32 | DRM_INFO("size bos used: %dkb\n", | |
33 | (vc4->bo_stats.size_allocated - | |
34 | vc4->bo_stats.size_cached) / 1024); | |
35 | DRM_INFO("num bos cached: %d\n", | |
36 | vc4->bo_stats.num_cached); | |
37 | DRM_INFO("size bos cached: %dkb\n", | |
38 | vc4->bo_stats.size_cached / 1024); | |
39 | } | |
40 | ||
41 | #ifdef CONFIG_DEBUG_FS | |
42 | int vc4_bo_stats_debugfs(struct seq_file *m, void *unused) | |
43 | { | |
44 | struct drm_info_node *node = (struct drm_info_node *)m->private; | |
45 | struct drm_device *dev = node->minor->dev; | |
46 | struct vc4_dev *vc4 = to_vc4_dev(dev); | |
47 | struct vc4_bo_stats stats; | |
48 | ||
49 | /* Take a snapshot of the current stats with the lock held. */ | |
50 | mutex_lock(&vc4->bo_lock); | |
51 | stats = vc4->bo_stats; | |
52 | mutex_unlock(&vc4->bo_lock); | |
53 | ||
54 | seq_printf(m, "num bos allocated: %d\n", | |
55 | stats.num_allocated); | |
56 | seq_printf(m, "size bos allocated: %dkb\n", | |
57 | stats.size_allocated / 1024); | |
58 | seq_printf(m, "num bos used: %d\n", | |
59 | stats.num_allocated - stats.num_cached); | |
60 | seq_printf(m, "size bos used: %dkb\n", | |
61 | (stats.size_allocated - stats.size_cached) / 1024); | |
62 | seq_printf(m, "num bos cached: %d\n", | |
63 | stats.num_cached); | |
64 | seq_printf(m, "size bos cached: %dkb\n", | |
65 | stats.size_cached / 1024); | |
66 | ||
67 | return 0; | |
68 | } | |
69 | #endif | |
70 | ||
71 | static uint32_t bo_page_index(size_t size) | |
72 | { | |
73 | return (size / PAGE_SIZE) - 1; | |
74 | } | |
75 | ||
76 | /* Must be called with bo_lock held. */ | |
77 | static void vc4_bo_destroy(struct vc4_bo *bo) | |
c8b75bca | 78 | { |
c826a6e1 EA |
79 | struct drm_gem_object *obj = &bo->base.base; |
80 | struct vc4_dev *vc4 = to_vc4_dev(obj->dev); | |
81 | ||
463873d5 EA |
82 | if (bo->validated_shader) { |
83 | kfree(bo->validated_shader->texture_samples); | |
84 | kfree(bo->validated_shader); | |
85 | bo->validated_shader = NULL; | |
86 | } | |
87 | ||
c826a6e1 EA |
88 | vc4->bo_stats.num_allocated--; |
89 | vc4->bo_stats.size_allocated -= obj->size; | |
90 | drm_gem_cma_free_object(obj); | |
91 | } | |
92 | ||
93 | /* Must be called with bo_lock held. */ | |
94 | static void vc4_bo_remove_from_cache(struct vc4_bo *bo) | |
95 | { | |
96 | struct drm_gem_object *obj = &bo->base.base; | |
97 | struct vc4_dev *vc4 = to_vc4_dev(obj->dev); | |
98 | ||
99 | vc4->bo_stats.num_cached--; | |
100 | vc4->bo_stats.size_cached -= obj->size; | |
101 | ||
102 | list_del(&bo->unref_head); | |
103 | list_del(&bo->size_head); | |
104 | } | |
105 | ||
106 | static struct list_head *vc4_get_cache_list_for_size(struct drm_device *dev, | |
107 | size_t size) | |
108 | { | |
109 | struct vc4_dev *vc4 = to_vc4_dev(dev); | |
110 | uint32_t page_index = bo_page_index(size); | |
111 | ||
112 | if (vc4->bo_cache.size_list_size <= page_index) { | |
113 | uint32_t new_size = max(vc4->bo_cache.size_list_size * 2, | |
114 | page_index + 1); | |
115 | struct list_head *new_list; | |
116 | uint32_t i; | |
117 | ||
118 | new_list = kmalloc_array(new_size, sizeof(struct list_head), | |
119 | GFP_KERNEL); | |
120 | if (!new_list) | |
121 | return NULL; | |
122 | ||
123 | /* Rebase the old cached BO lists to their new list | |
124 | * head locations. | |
125 | */ | |
126 | for (i = 0; i < vc4->bo_cache.size_list_size; i++) { | |
127 | struct list_head *old_list = | |
128 | &vc4->bo_cache.size_list[i]; | |
129 | ||
130 | if (list_empty(old_list)) | |
131 | INIT_LIST_HEAD(&new_list[i]); | |
132 | else | |
133 | list_replace(old_list, &new_list[i]); | |
134 | } | |
135 | /* And initialize the brand new BO list heads. */ | |
136 | for (i = vc4->bo_cache.size_list_size; i < new_size; i++) | |
137 | INIT_LIST_HEAD(&new_list[i]); | |
138 | ||
139 | kfree(vc4->bo_cache.size_list); | |
140 | vc4->bo_cache.size_list = new_list; | |
141 | vc4->bo_cache.size_list_size = new_size; | |
142 | } | |
143 | ||
144 | return &vc4->bo_cache.size_list[page_index]; | |
145 | } | |
146 | ||
147 | void vc4_bo_cache_purge(struct drm_device *dev) | |
148 | { | |
149 | struct vc4_dev *vc4 = to_vc4_dev(dev); | |
150 | ||
151 | mutex_lock(&vc4->bo_lock); | |
152 | while (!list_empty(&vc4->bo_cache.time_list)) { | |
153 | struct vc4_bo *bo = list_last_entry(&vc4->bo_cache.time_list, | |
154 | struct vc4_bo, unref_head); | |
155 | vc4_bo_remove_from_cache(bo); | |
156 | vc4_bo_destroy(bo); | |
157 | } | |
158 | mutex_unlock(&vc4->bo_lock); | |
159 | } | |
160 | ||
161 | static struct vc4_bo *vc4_bo_get_from_cache(struct drm_device *dev, | |
162 | uint32_t size) | |
163 | { | |
164 | struct vc4_dev *vc4 = to_vc4_dev(dev); | |
165 | uint32_t page_index = bo_page_index(size); | |
166 | struct vc4_bo *bo = NULL; | |
167 | ||
168 | size = roundup(size, PAGE_SIZE); | |
169 | ||
170 | mutex_lock(&vc4->bo_lock); | |
171 | if (page_index >= vc4->bo_cache.size_list_size) | |
172 | goto out; | |
173 | ||
174 | if (list_empty(&vc4->bo_cache.size_list[page_index])) | |
175 | goto out; | |
176 | ||
177 | bo = list_first_entry(&vc4->bo_cache.size_list[page_index], | |
178 | struct vc4_bo, size_head); | |
179 | vc4_bo_remove_from_cache(bo); | |
180 | kref_init(&bo->base.base.refcount); | |
181 | ||
182 | out: | |
183 | mutex_unlock(&vc4->bo_lock); | |
184 | return bo; | |
185 | } | |
186 | ||
187 | /** | |
188 | * vc4_gem_create_object - Implementation of driver->gem_create_object. | |
189 | * | |
190 | * This lets the CMA helpers allocate object structs for us, and keep | |
191 | * our BO stats correct. | |
192 | */ | |
193 | struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size) | |
194 | { | |
195 | struct vc4_dev *vc4 = to_vc4_dev(dev); | |
196 | struct vc4_bo *bo; | |
197 | ||
198 | bo = kzalloc(sizeof(*bo), GFP_KERNEL); | |
199 | if (!bo) | |
200 | return ERR_PTR(-ENOMEM); | |
201 | ||
202 | mutex_lock(&vc4->bo_lock); | |
203 | vc4->bo_stats.num_allocated++; | |
204 | vc4->bo_stats.size_allocated += size; | |
205 | mutex_unlock(&vc4->bo_lock); | |
206 | ||
207 | return &bo->base.base; | |
208 | } | |
209 | ||
210 | struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t unaligned_size, | |
211 | bool from_cache) | |
212 | { | |
213 | size_t size = roundup(unaligned_size, PAGE_SIZE); | |
214 | struct vc4_dev *vc4 = to_vc4_dev(dev); | |
c8b75bca EA |
215 | struct drm_gem_cma_object *cma_obj; |
216 | ||
c826a6e1 | 217 | if (size == 0) |
2c68f1fc | 218 | return ERR_PTR(-EINVAL); |
c826a6e1 EA |
219 | |
220 | /* First, try to get a vc4_bo from the kernel BO cache. */ | |
221 | if (from_cache) { | |
222 | struct vc4_bo *bo = vc4_bo_get_from_cache(dev, size); | |
223 | ||
224 | if (bo) | |
225 | return bo; | |
226 | } | |
227 | ||
228 | cma_obj = drm_gem_cma_create(dev, size); | |
229 | if (IS_ERR(cma_obj)) { | |
230 | /* | |
231 | * If we've run out of CMA memory, kill the cache of | |
232 | * CMA allocations we've got laying around and try again. | |
233 | */ | |
234 | vc4_bo_cache_purge(dev); | |
235 | ||
236 | cma_obj = drm_gem_cma_create(dev, size); | |
237 | if (IS_ERR(cma_obj)) { | |
238 | DRM_ERROR("Failed to allocate from CMA:\n"); | |
239 | vc4_bo_stats_dump(vc4); | |
2c68f1fc | 240 | return ERR_PTR(-ENOMEM); |
c826a6e1 EA |
241 | } |
242 | } | |
243 | ||
244 | return to_vc4_bo(&cma_obj->base); | |
c8b75bca EA |
245 | } |
246 | ||
247 | int vc4_dumb_create(struct drm_file *file_priv, | |
248 | struct drm_device *dev, | |
249 | struct drm_mode_create_dumb *args) | |
250 | { | |
251 | int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8); | |
252 | struct vc4_bo *bo = NULL; | |
253 | int ret; | |
254 | ||
255 | if (args->pitch < min_pitch) | |
256 | args->pitch = min_pitch; | |
257 | ||
258 | if (args->size < args->pitch * args->height) | |
259 | args->size = args->pitch * args->height; | |
260 | ||
c826a6e1 | 261 | bo = vc4_bo_create(dev, args->size, false); |
2c68f1fc EA |
262 | if (IS_ERR(bo)) |
263 | return PTR_ERR(bo); | |
c8b75bca EA |
264 | |
265 | ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle); | |
266 | drm_gem_object_unreference_unlocked(&bo->base.base); | |
267 | ||
268 | return ret; | |
269 | } | |
c826a6e1 EA |
270 | |
271 | /* Must be called with bo_lock held. */ | |
272 | static void vc4_bo_cache_free_old(struct drm_device *dev) | |
273 | { | |
274 | struct vc4_dev *vc4 = to_vc4_dev(dev); | |
275 | unsigned long expire_time = jiffies - msecs_to_jiffies(1000); | |
276 | ||
277 | while (!list_empty(&vc4->bo_cache.time_list)) { | |
278 | struct vc4_bo *bo = list_last_entry(&vc4->bo_cache.time_list, | |
279 | struct vc4_bo, unref_head); | |
280 | if (time_before(expire_time, bo->free_time)) { | |
281 | mod_timer(&vc4->bo_cache.time_timer, | |
282 | round_jiffies_up(jiffies + | |
283 | msecs_to_jiffies(1000))); | |
284 | return; | |
285 | } | |
286 | ||
287 | vc4_bo_remove_from_cache(bo); | |
288 | vc4_bo_destroy(bo); | |
289 | } | |
290 | } | |
291 | ||
292 | /* Called on the last userspace/kernel unreference of the BO. Returns | |
293 | * it to the BO cache if possible, otherwise frees it. | |
c826a6e1 EA |
294 | */ |
295 | void vc4_free_object(struct drm_gem_object *gem_bo) | |
296 | { | |
297 | struct drm_device *dev = gem_bo->dev; | |
298 | struct vc4_dev *vc4 = to_vc4_dev(dev); | |
299 | struct vc4_bo *bo = to_vc4_bo(gem_bo); | |
300 | struct list_head *cache_list; | |
301 | ||
302 | mutex_lock(&vc4->bo_lock); | |
303 | /* If the object references someone else's memory, we can't cache it. | |
304 | */ | |
305 | if (gem_bo->import_attach) { | |
306 | vc4_bo_destroy(bo); | |
307 | goto out; | |
308 | } | |
309 | ||
310 | /* Don't cache if it was publicly named. */ | |
311 | if (gem_bo->name) { | |
312 | vc4_bo_destroy(bo); | |
313 | goto out; | |
314 | } | |
315 | ||
316 | cache_list = vc4_get_cache_list_for_size(dev, gem_bo->size); | |
317 | if (!cache_list) { | |
318 | vc4_bo_destroy(bo); | |
319 | goto out; | |
320 | } | |
321 | ||
463873d5 EA |
322 | if (bo->validated_shader) { |
323 | kfree(bo->validated_shader->texture_samples); | |
324 | kfree(bo->validated_shader); | |
325 | bo->validated_shader = NULL; | |
326 | } | |
327 | ||
c826a6e1 EA |
328 | bo->free_time = jiffies; |
329 | list_add(&bo->size_head, cache_list); | |
330 | list_add(&bo->unref_head, &vc4->bo_cache.time_list); | |
331 | ||
332 | vc4->bo_stats.num_cached++; | |
333 | vc4->bo_stats.size_cached += gem_bo->size; | |
334 | ||
335 | vc4_bo_cache_free_old(dev); | |
336 | ||
337 | out: | |
338 | mutex_unlock(&vc4->bo_lock); | |
339 | } | |
340 | ||
341 | static void vc4_bo_cache_time_work(struct work_struct *work) | |
342 | { | |
343 | struct vc4_dev *vc4 = | |
344 | container_of(work, struct vc4_dev, bo_cache.time_work); | |
345 | struct drm_device *dev = vc4->dev; | |
346 | ||
347 | mutex_lock(&vc4->bo_lock); | |
348 | vc4_bo_cache_free_old(dev); | |
349 | mutex_unlock(&vc4->bo_lock); | |
350 | } | |
351 | ||
352 | static void vc4_bo_cache_time_timer(unsigned long data) | |
353 | { | |
354 | struct drm_device *dev = (struct drm_device *)data; | |
355 | struct vc4_dev *vc4 = to_vc4_dev(dev); | |
356 | ||
357 | schedule_work(&vc4->bo_cache.time_work); | |
358 | } | |
359 | ||
463873d5 EA |
360 | struct dma_buf * |
361 | vc4_prime_export(struct drm_device *dev, struct drm_gem_object *obj, int flags) | |
362 | { | |
363 | struct vc4_bo *bo = to_vc4_bo(obj); | |
364 | ||
365 | if (bo->validated_shader) { | |
366 | DRM_ERROR("Attempting to export shader BO\n"); | |
367 | return ERR_PTR(-EINVAL); | |
368 | } | |
369 | ||
370 | return drm_gem_prime_export(dev, obj, flags); | |
371 | } | |
372 | ||
373 | int vc4_mmap(struct file *filp, struct vm_area_struct *vma) | |
374 | { | |
375 | struct drm_gem_object *gem_obj; | |
376 | struct vc4_bo *bo; | |
377 | int ret; | |
378 | ||
379 | ret = drm_gem_mmap(filp, vma); | |
380 | if (ret) | |
381 | return ret; | |
382 | ||
383 | gem_obj = vma->vm_private_data; | |
384 | bo = to_vc4_bo(gem_obj); | |
385 | ||
386 | if (bo->validated_shader && (vma->vm_flags & VM_WRITE)) { | |
387 | DRM_ERROR("mmaping of shader BOs for writing not allowed.\n"); | |
388 | return -EINVAL; | |
389 | } | |
390 | ||
391 | /* | |
392 | * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the | |
393 | * vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map | |
394 | * the whole buffer. | |
395 | */ | |
396 | vma->vm_flags &= ~VM_PFNMAP; | |
397 | vma->vm_pgoff = 0; | |
398 | ||
f6e45661 LR |
399 | ret = dma_mmap_wc(bo->base.base.dev->dev, vma, bo->base.vaddr, |
400 | bo->base.paddr, vma->vm_end - vma->vm_start); | |
463873d5 EA |
401 | if (ret) |
402 | drm_gem_vm_close(vma); | |
403 | ||
404 | return ret; | |
405 | } | |
406 | ||
407 | int vc4_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) | |
408 | { | |
409 | struct vc4_bo *bo = to_vc4_bo(obj); | |
410 | ||
411 | if (bo->validated_shader && (vma->vm_flags & VM_WRITE)) { | |
412 | DRM_ERROR("mmaping of shader BOs for writing not allowed.\n"); | |
413 | return -EINVAL; | |
414 | } | |
415 | ||
416 | return drm_gem_cma_prime_mmap(obj, vma); | |
417 | } | |
418 | ||
419 | void *vc4_prime_vmap(struct drm_gem_object *obj) | |
420 | { | |
421 | struct vc4_bo *bo = to_vc4_bo(obj); | |
422 | ||
423 | if (bo->validated_shader) { | |
424 | DRM_ERROR("mmaping of shader BOs not allowed.\n"); | |
425 | return ERR_PTR(-EINVAL); | |
426 | } | |
427 | ||
428 | return drm_gem_cma_prime_vmap(obj); | |
429 | } | |
430 | ||
d5bc60f6 EA |
431 | int vc4_create_bo_ioctl(struct drm_device *dev, void *data, |
432 | struct drm_file *file_priv) | |
433 | { | |
434 | struct drm_vc4_create_bo *args = data; | |
435 | struct vc4_bo *bo = NULL; | |
436 | int ret; | |
437 | ||
438 | /* | |
439 | * We can't allocate from the BO cache, because the BOs don't | |
440 | * get zeroed, and that might leak data between users. | |
441 | */ | |
442 | bo = vc4_bo_create(dev, args->size, false); | |
2c68f1fc EA |
443 | if (IS_ERR(bo)) |
444 | return PTR_ERR(bo); | |
d5bc60f6 EA |
445 | |
446 | ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle); | |
447 | drm_gem_object_unreference_unlocked(&bo->base.base); | |
448 | ||
449 | return ret; | |
450 | } | |
451 | ||
452 | int vc4_mmap_bo_ioctl(struct drm_device *dev, void *data, | |
453 | struct drm_file *file_priv) | |
454 | { | |
455 | struct drm_vc4_mmap_bo *args = data; | |
456 | struct drm_gem_object *gem_obj; | |
457 | ||
a8ad0bd8 | 458 | gem_obj = drm_gem_object_lookup(file_priv, args->handle); |
d5bc60f6 EA |
459 | if (!gem_obj) { |
460 | DRM_ERROR("Failed to look up GEM BO %d\n", args->handle); | |
461 | return -EINVAL; | |
462 | } | |
463 | ||
464 | /* The mmap offset was set up at BO allocation time. */ | |
465 | args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node); | |
466 | ||
467 | drm_gem_object_unreference_unlocked(gem_obj); | |
468 | return 0; | |
469 | } | |
470 | ||
463873d5 EA |
471 | int |
472 | vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data, | |
473 | struct drm_file *file_priv) | |
474 | { | |
475 | struct drm_vc4_create_shader_bo *args = data; | |
476 | struct vc4_bo *bo = NULL; | |
477 | int ret; | |
478 | ||
479 | if (args->size == 0) | |
480 | return -EINVAL; | |
481 | ||
482 | if (args->size % sizeof(u64) != 0) | |
483 | return -EINVAL; | |
484 | ||
485 | if (args->flags != 0) { | |
486 | DRM_INFO("Unknown flags set: 0x%08x\n", args->flags); | |
487 | return -EINVAL; | |
488 | } | |
489 | ||
490 | if (args->pad != 0) { | |
491 | DRM_INFO("Pad set: 0x%08x\n", args->pad); | |
492 | return -EINVAL; | |
493 | } | |
494 | ||
495 | bo = vc4_bo_create(dev, args->size, true); | |
2c68f1fc EA |
496 | if (IS_ERR(bo)) |
497 | return PTR_ERR(bo); | |
463873d5 | 498 | |
585cb132 | 499 | if (copy_from_user(bo->base.vaddr, |
463873d5 | 500 | (void __user *)(uintptr_t)args->data, |
585cb132 DC |
501 | args->size)) { |
502 | ret = -EFAULT; | |
463873d5 | 503 | goto fail; |
585cb132 | 504 | } |
463873d5 EA |
505 | /* Clear the rest of the memory from allocating from the BO |
506 | * cache. | |
507 | */ | |
508 | memset(bo->base.vaddr + args->size, 0, | |
509 | bo->base.base.size - args->size); | |
510 | ||
511 | bo->validated_shader = vc4_validate_shader(&bo->base); | |
512 | if (!bo->validated_shader) { | |
513 | ret = -EINVAL; | |
514 | goto fail; | |
515 | } | |
516 | ||
517 | /* We have to create the handle after validation, to avoid | |
518 | * races for users to do doing things like mmap the shader BO. | |
519 | */ | |
520 | ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle); | |
521 | ||
522 | fail: | |
523 | drm_gem_object_unreference_unlocked(&bo->base.base); | |
524 | ||
525 | return ret; | |
526 | } | |
527 | ||
c826a6e1 EA |
528 | void vc4_bo_cache_init(struct drm_device *dev) |
529 | { | |
530 | struct vc4_dev *vc4 = to_vc4_dev(dev); | |
531 | ||
532 | mutex_init(&vc4->bo_lock); | |
533 | ||
534 | INIT_LIST_HEAD(&vc4->bo_cache.time_list); | |
535 | ||
536 | INIT_WORK(&vc4->bo_cache.time_work, vc4_bo_cache_time_work); | |
537 | setup_timer(&vc4->bo_cache.time_timer, | |
538 | vc4_bo_cache_time_timer, | |
539 | (unsigned long)dev); | |
540 | } | |
541 | ||
542 | void vc4_bo_cache_destroy(struct drm_device *dev) | |
543 | { | |
544 | struct vc4_dev *vc4 = to_vc4_dev(dev); | |
545 | ||
546 | del_timer(&vc4->bo_cache.time_timer); | |
547 | cancel_work_sync(&vc4->bo_cache.time_work); | |
548 | ||
549 | vc4_bo_cache_purge(dev); | |
550 | ||
551 | if (vc4->bo_stats.num_allocated) { | |
552 | DRM_ERROR("Destroying BO cache while BOs still allocated:\n"); | |
553 | vc4_bo_stats_dump(vc4); | |
554 | } | |
555 | } |