]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - drivers/gpu/drm/vc4/vc4_bo.c
drm/vc4: Add T-format scanout support.
[mirror_ubuntu-hirsute-kernel.git] / drivers / gpu / drm / vc4 / vc4_bo.c
CommitLineData
c8b75bca
EA
1/*
2 * Copyright © 2015 Broadcom
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
72f793f1
EA
9/**
10 * DOC: VC4 GEM BO management support
c8b75bca
EA
11 *
12 * The VC4 GPU architecture (both scanout and rendering) has direct
13 * access to system memory with no MMU in between. To support it, we
14 * use the GEM CMA helper functions to allocate contiguous ranges of
15 * physical memory for our BOs.
c826a6e1
EA
16 *
17 * Since the CMA allocator is very slow, we keep a cache of recently
18 * freed BOs around so that the kernel's allocation of objects for 3D
19 * rendering can return quickly.
c8b75bca
EA
20 */
21
cdec4d36
EA
22#include <linux/dma-buf.h>
23
c8b75bca 24#include "vc4_drv.h"
d5bc60f6 25#include "uapi/drm/vc4_drm.h"
c8b75bca 26
c826a6e1
EA
27static void vc4_bo_stats_dump(struct vc4_dev *vc4)
28{
29 DRM_INFO("num bos allocated: %d\n",
30 vc4->bo_stats.num_allocated);
31 DRM_INFO("size bos allocated: %dkb\n",
32 vc4->bo_stats.size_allocated / 1024);
33 DRM_INFO("num bos used: %d\n",
34 vc4->bo_stats.num_allocated - vc4->bo_stats.num_cached);
35 DRM_INFO("size bos used: %dkb\n",
36 (vc4->bo_stats.size_allocated -
37 vc4->bo_stats.size_cached) / 1024);
38 DRM_INFO("num bos cached: %d\n",
39 vc4->bo_stats.num_cached);
40 DRM_INFO("size bos cached: %dkb\n",
41 vc4->bo_stats.size_cached / 1024);
42}
43
44#ifdef CONFIG_DEBUG_FS
45int vc4_bo_stats_debugfs(struct seq_file *m, void *unused)
46{
47 struct drm_info_node *node = (struct drm_info_node *)m->private;
48 struct drm_device *dev = node->minor->dev;
49 struct vc4_dev *vc4 = to_vc4_dev(dev);
50 struct vc4_bo_stats stats;
51
52 /* Take a snapshot of the current stats with the lock held. */
53 mutex_lock(&vc4->bo_lock);
54 stats = vc4->bo_stats;
55 mutex_unlock(&vc4->bo_lock);
56
57 seq_printf(m, "num bos allocated: %d\n",
58 stats.num_allocated);
59 seq_printf(m, "size bos allocated: %dkb\n",
60 stats.size_allocated / 1024);
61 seq_printf(m, "num bos used: %d\n",
62 stats.num_allocated - stats.num_cached);
63 seq_printf(m, "size bos used: %dkb\n",
64 (stats.size_allocated - stats.size_cached) / 1024);
65 seq_printf(m, "num bos cached: %d\n",
66 stats.num_cached);
67 seq_printf(m, "size bos cached: %dkb\n",
68 stats.size_cached / 1024);
69
70 return 0;
71}
72#endif
73
74static uint32_t bo_page_index(size_t size)
75{
76 return (size / PAGE_SIZE) - 1;
77}
78
79/* Must be called with bo_lock held. */
80static void vc4_bo_destroy(struct vc4_bo *bo)
c8b75bca 81{
c826a6e1
EA
82 struct drm_gem_object *obj = &bo->base.base;
83 struct vc4_dev *vc4 = to_vc4_dev(obj->dev);
84
463873d5
EA
85 if (bo->validated_shader) {
86 kfree(bo->validated_shader->texture_samples);
87 kfree(bo->validated_shader);
88 bo->validated_shader = NULL;
89 }
90
c826a6e1
EA
91 vc4->bo_stats.num_allocated--;
92 vc4->bo_stats.size_allocated -= obj->size;
cdec4d36 93
24bb206f 94 reservation_object_fini(&bo->_resv);
cdec4d36 95
c826a6e1
EA
96 drm_gem_cma_free_object(obj);
97}
98
99/* Must be called with bo_lock held. */
100static void vc4_bo_remove_from_cache(struct vc4_bo *bo)
101{
102 struct drm_gem_object *obj = &bo->base.base;
103 struct vc4_dev *vc4 = to_vc4_dev(obj->dev);
104
105 vc4->bo_stats.num_cached--;
106 vc4->bo_stats.size_cached -= obj->size;
107
108 list_del(&bo->unref_head);
109 list_del(&bo->size_head);
110}
111
112static struct list_head *vc4_get_cache_list_for_size(struct drm_device *dev,
113 size_t size)
114{
115 struct vc4_dev *vc4 = to_vc4_dev(dev);
116 uint32_t page_index = bo_page_index(size);
117
118 if (vc4->bo_cache.size_list_size <= page_index) {
119 uint32_t new_size = max(vc4->bo_cache.size_list_size * 2,
120 page_index + 1);
121 struct list_head *new_list;
122 uint32_t i;
123
124 new_list = kmalloc_array(new_size, sizeof(struct list_head),
125 GFP_KERNEL);
126 if (!new_list)
127 return NULL;
128
129 /* Rebase the old cached BO lists to their new list
130 * head locations.
131 */
132 for (i = 0; i < vc4->bo_cache.size_list_size; i++) {
133 struct list_head *old_list =
134 &vc4->bo_cache.size_list[i];
135
136 if (list_empty(old_list))
137 INIT_LIST_HEAD(&new_list[i]);
138 else
139 list_replace(old_list, &new_list[i]);
140 }
141 /* And initialize the brand new BO list heads. */
142 for (i = vc4->bo_cache.size_list_size; i < new_size; i++)
143 INIT_LIST_HEAD(&new_list[i]);
144
145 kfree(vc4->bo_cache.size_list);
146 vc4->bo_cache.size_list = new_list;
147 vc4->bo_cache.size_list_size = new_size;
148 }
149
150 return &vc4->bo_cache.size_list[page_index];
151}
152
ea903838 153static void vc4_bo_cache_purge(struct drm_device *dev)
c826a6e1
EA
154{
155 struct vc4_dev *vc4 = to_vc4_dev(dev);
156
157 mutex_lock(&vc4->bo_lock);
158 while (!list_empty(&vc4->bo_cache.time_list)) {
159 struct vc4_bo *bo = list_last_entry(&vc4->bo_cache.time_list,
160 struct vc4_bo, unref_head);
161 vc4_bo_remove_from_cache(bo);
162 vc4_bo_destroy(bo);
163 }
164 mutex_unlock(&vc4->bo_lock);
165}
166
167static struct vc4_bo *vc4_bo_get_from_cache(struct drm_device *dev,
168 uint32_t size)
169{
170 struct vc4_dev *vc4 = to_vc4_dev(dev);
171 uint32_t page_index = bo_page_index(size);
172 struct vc4_bo *bo = NULL;
173
174 size = roundup(size, PAGE_SIZE);
175
176 mutex_lock(&vc4->bo_lock);
177 if (page_index >= vc4->bo_cache.size_list_size)
178 goto out;
179
180 if (list_empty(&vc4->bo_cache.size_list[page_index]))
181 goto out;
182
183 bo = list_first_entry(&vc4->bo_cache.size_list[page_index],
184 struct vc4_bo, size_head);
185 vc4_bo_remove_from_cache(bo);
186 kref_init(&bo->base.base.refcount);
187
188out:
189 mutex_unlock(&vc4->bo_lock);
190 return bo;
191}
192
193/**
194 * vc4_gem_create_object - Implementation of driver->gem_create_object.
72f793f1
EA
195 * @dev: DRM device
196 * @size: Size in bytes of the memory the object will reference
c826a6e1
EA
197 *
198 * This lets the CMA helpers allocate object structs for us, and keep
199 * our BO stats correct.
200 */
201struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size)
202{
203 struct vc4_dev *vc4 = to_vc4_dev(dev);
204 struct vc4_bo *bo;
205
206 bo = kzalloc(sizeof(*bo), GFP_KERNEL);
207 if (!bo)
208 return ERR_PTR(-ENOMEM);
209
210 mutex_lock(&vc4->bo_lock);
211 vc4->bo_stats.num_allocated++;
212 vc4->bo_stats.size_allocated += size;
213 mutex_unlock(&vc4->bo_lock);
24bb206f
HV
214 bo->resv = &bo->_resv;
215 reservation_object_init(bo->resv);
c826a6e1
EA
216
217 return &bo->base.base;
218}
219
220struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t unaligned_size,
eb981383 221 bool allow_unzeroed)
c826a6e1
EA
222{
223 size_t size = roundup(unaligned_size, PAGE_SIZE);
224 struct vc4_dev *vc4 = to_vc4_dev(dev);
c8b75bca 225 struct drm_gem_cma_object *cma_obj;
eb981383 226 struct vc4_bo *bo;
c8b75bca 227
c826a6e1 228 if (size == 0)
2c68f1fc 229 return ERR_PTR(-EINVAL);
c826a6e1
EA
230
231 /* First, try to get a vc4_bo from the kernel BO cache. */
eb981383
EA
232 bo = vc4_bo_get_from_cache(dev, size);
233 if (bo) {
234 if (!allow_unzeroed)
235 memset(bo->base.vaddr, 0, bo->base.base.size);
236 return bo;
c826a6e1
EA
237 }
238
239 cma_obj = drm_gem_cma_create(dev, size);
240 if (IS_ERR(cma_obj)) {
241 /*
242 * If we've run out of CMA memory, kill the cache of
243 * CMA allocations we've got laying around and try again.
244 */
245 vc4_bo_cache_purge(dev);
246
247 cma_obj = drm_gem_cma_create(dev, size);
248 if (IS_ERR(cma_obj)) {
249 DRM_ERROR("Failed to allocate from CMA:\n");
250 vc4_bo_stats_dump(vc4);
2c68f1fc 251 return ERR_PTR(-ENOMEM);
c826a6e1
EA
252 }
253 }
24bb206f 254 return to_vc4_bo(&cma_obj->base);
c8b75bca
EA
255}
256
257int vc4_dumb_create(struct drm_file *file_priv,
258 struct drm_device *dev,
259 struct drm_mode_create_dumb *args)
260{
261 int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
262 struct vc4_bo *bo = NULL;
263 int ret;
264
265 if (args->pitch < min_pitch)
266 args->pitch = min_pitch;
267
268 if (args->size < args->pitch * args->height)
269 args->size = args->pitch * args->height;
270
c826a6e1 271 bo = vc4_bo_create(dev, args->size, false);
2c68f1fc
EA
272 if (IS_ERR(bo))
273 return PTR_ERR(bo);
c8b75bca
EA
274
275 ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
276 drm_gem_object_unreference_unlocked(&bo->base.base);
277
278 return ret;
279}
c826a6e1
EA
280
281/* Must be called with bo_lock held. */
282static void vc4_bo_cache_free_old(struct drm_device *dev)
283{
284 struct vc4_dev *vc4 = to_vc4_dev(dev);
285 unsigned long expire_time = jiffies - msecs_to_jiffies(1000);
286
287 while (!list_empty(&vc4->bo_cache.time_list)) {
288 struct vc4_bo *bo = list_last_entry(&vc4->bo_cache.time_list,
289 struct vc4_bo, unref_head);
290 if (time_before(expire_time, bo->free_time)) {
291 mod_timer(&vc4->bo_cache.time_timer,
292 round_jiffies_up(jiffies +
293 msecs_to_jiffies(1000)));
294 return;
295 }
296
297 vc4_bo_remove_from_cache(bo);
298 vc4_bo_destroy(bo);
299 }
300}
301
302/* Called on the last userspace/kernel unreference of the BO. Returns
303 * it to the BO cache if possible, otherwise frees it.
c826a6e1
EA
304 */
305void vc4_free_object(struct drm_gem_object *gem_bo)
306{
307 struct drm_device *dev = gem_bo->dev;
308 struct vc4_dev *vc4 = to_vc4_dev(dev);
309 struct vc4_bo *bo = to_vc4_bo(gem_bo);
310 struct list_head *cache_list;
311
312 mutex_lock(&vc4->bo_lock);
313 /* If the object references someone else's memory, we can't cache it.
314 */
315 if (gem_bo->import_attach) {
316 vc4_bo_destroy(bo);
317 goto out;
318 }
319
320 /* Don't cache if it was publicly named. */
321 if (gem_bo->name) {
322 vc4_bo_destroy(bo);
323 goto out;
324 }
325
ca39b449
EA
326 /* If this object was partially constructed but CMA allocation
327 * had failed, just free it.
328 */
329 if (!bo->base.vaddr) {
330 vc4_bo_destroy(bo);
331 goto out;
332 }
333
c826a6e1
EA
334 cache_list = vc4_get_cache_list_for_size(dev, gem_bo->size);
335 if (!cache_list) {
336 vc4_bo_destroy(bo);
337 goto out;
338 }
339
463873d5
EA
340 if (bo->validated_shader) {
341 kfree(bo->validated_shader->texture_samples);
342 kfree(bo->validated_shader);
343 bo->validated_shader = NULL;
344 }
345
c826a6e1
EA
346 bo->free_time = jiffies;
347 list_add(&bo->size_head, cache_list);
348 list_add(&bo->unref_head, &vc4->bo_cache.time_list);
349
350 vc4->bo_stats.num_cached++;
351 vc4->bo_stats.size_cached += gem_bo->size;
352
353 vc4_bo_cache_free_old(dev);
354
355out:
356 mutex_unlock(&vc4->bo_lock);
357}
358
359static void vc4_bo_cache_time_work(struct work_struct *work)
360{
361 struct vc4_dev *vc4 =
362 container_of(work, struct vc4_dev, bo_cache.time_work);
363 struct drm_device *dev = vc4->dev;
364
365 mutex_lock(&vc4->bo_lock);
366 vc4_bo_cache_free_old(dev);
367 mutex_unlock(&vc4->bo_lock);
368}
369
370static void vc4_bo_cache_time_timer(unsigned long data)
371{
372 struct drm_device *dev = (struct drm_device *)data;
373 struct vc4_dev *vc4 = to_vc4_dev(dev);
374
375 schedule_work(&vc4->bo_cache.time_work);
376}
377
cdec4d36
EA
378struct reservation_object *vc4_prime_res_obj(struct drm_gem_object *obj)
379{
380 struct vc4_bo *bo = to_vc4_bo(obj);
381
382 return bo->resv;
383}
384
463873d5
EA
385struct dma_buf *
386vc4_prime_export(struct drm_device *dev, struct drm_gem_object *obj, int flags)
387{
388 struct vc4_bo *bo = to_vc4_bo(obj);
389
390 if (bo->validated_shader) {
391 DRM_ERROR("Attempting to export shader BO\n");
392 return ERR_PTR(-EINVAL);
393 }
394
395 return drm_gem_prime_export(dev, obj, flags);
396}
397
398int vc4_mmap(struct file *filp, struct vm_area_struct *vma)
399{
400 struct drm_gem_object *gem_obj;
401 struct vc4_bo *bo;
402 int ret;
403
404 ret = drm_gem_mmap(filp, vma);
405 if (ret)
406 return ret;
407
408 gem_obj = vma->vm_private_data;
409 bo = to_vc4_bo(gem_obj);
410
411 if (bo->validated_shader && (vma->vm_flags & VM_WRITE)) {
412 DRM_ERROR("mmaping of shader BOs for writing not allowed.\n");
413 return -EINVAL;
414 }
415
416 /*
417 * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the
418 * vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map
419 * the whole buffer.
420 */
421 vma->vm_flags &= ~VM_PFNMAP;
422 vma->vm_pgoff = 0;
423
f6e45661
LR
424 ret = dma_mmap_wc(bo->base.base.dev->dev, vma, bo->base.vaddr,
425 bo->base.paddr, vma->vm_end - vma->vm_start);
463873d5
EA
426 if (ret)
427 drm_gem_vm_close(vma);
428
429 return ret;
430}
431
432int vc4_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
433{
434 struct vc4_bo *bo = to_vc4_bo(obj);
435
436 if (bo->validated_shader && (vma->vm_flags & VM_WRITE)) {
437 DRM_ERROR("mmaping of shader BOs for writing not allowed.\n");
438 return -EINVAL;
439 }
440
441 return drm_gem_cma_prime_mmap(obj, vma);
442}
443
444void *vc4_prime_vmap(struct drm_gem_object *obj)
445{
446 struct vc4_bo *bo = to_vc4_bo(obj);
447
448 if (bo->validated_shader) {
449 DRM_ERROR("mmaping of shader BOs not allowed.\n");
450 return ERR_PTR(-EINVAL);
451 }
452
453 return drm_gem_cma_prime_vmap(obj);
454}
455
cdec4d36
EA
456struct drm_gem_object *
457vc4_prime_import_sg_table(struct drm_device *dev,
458 struct dma_buf_attachment *attach,
459 struct sg_table *sgt)
460{
461 struct drm_gem_object *obj;
462 struct vc4_bo *bo;
463
464 obj = drm_gem_cma_prime_import_sg_table(dev, attach, sgt);
465 if (IS_ERR(obj))
466 return obj;
467
468 bo = to_vc4_bo(obj);
469 bo->resv = attach->dmabuf->resv;
470
471 return obj;
472}
473
d5bc60f6
EA
474int vc4_create_bo_ioctl(struct drm_device *dev, void *data,
475 struct drm_file *file_priv)
476{
477 struct drm_vc4_create_bo *args = data;
478 struct vc4_bo *bo = NULL;
479 int ret;
480
481 /*
482 * We can't allocate from the BO cache, because the BOs don't
483 * get zeroed, and that might leak data between users.
484 */
485 bo = vc4_bo_create(dev, args->size, false);
2c68f1fc
EA
486 if (IS_ERR(bo))
487 return PTR_ERR(bo);
d5bc60f6
EA
488
489 ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
490 drm_gem_object_unreference_unlocked(&bo->base.base);
491
492 return ret;
493}
494
495int vc4_mmap_bo_ioctl(struct drm_device *dev, void *data,
496 struct drm_file *file_priv)
497{
498 struct drm_vc4_mmap_bo *args = data;
499 struct drm_gem_object *gem_obj;
500
a8ad0bd8 501 gem_obj = drm_gem_object_lookup(file_priv, args->handle);
d5bc60f6
EA
502 if (!gem_obj) {
503 DRM_ERROR("Failed to look up GEM BO %d\n", args->handle);
504 return -EINVAL;
505 }
506
507 /* The mmap offset was set up at BO allocation time. */
508 args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
509
510 drm_gem_object_unreference_unlocked(gem_obj);
511 return 0;
512}
513
463873d5
EA
514int
515vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data,
516 struct drm_file *file_priv)
517{
518 struct drm_vc4_create_shader_bo *args = data;
519 struct vc4_bo *bo = NULL;
520 int ret;
521
522 if (args->size == 0)
523 return -EINVAL;
524
525 if (args->size % sizeof(u64) != 0)
526 return -EINVAL;
527
528 if (args->flags != 0) {
529 DRM_INFO("Unknown flags set: 0x%08x\n", args->flags);
530 return -EINVAL;
531 }
532
533 if (args->pad != 0) {
534 DRM_INFO("Pad set: 0x%08x\n", args->pad);
535 return -EINVAL;
536 }
537
538 bo = vc4_bo_create(dev, args->size, true);
2c68f1fc
EA
539 if (IS_ERR(bo))
540 return PTR_ERR(bo);
463873d5 541
585cb132 542 if (copy_from_user(bo->base.vaddr,
463873d5 543 (void __user *)(uintptr_t)args->data,
585cb132
DC
544 args->size)) {
545 ret = -EFAULT;
463873d5 546 goto fail;
585cb132 547 }
463873d5
EA
548 /* Clear the rest of the memory from allocating from the BO
549 * cache.
550 */
551 memset(bo->base.vaddr + args->size, 0,
552 bo->base.base.size - args->size);
553
554 bo->validated_shader = vc4_validate_shader(&bo->base);
555 if (!bo->validated_shader) {
556 ret = -EINVAL;
557 goto fail;
558 }
559
560 /* We have to create the handle after validation, to avoid
561 * races for users to do doing things like mmap the shader BO.
562 */
563 ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
564
565 fail:
566 drm_gem_object_unreference_unlocked(&bo->base.base);
567
568 return ret;
569}
570
c826a6e1
EA
571void vc4_bo_cache_init(struct drm_device *dev)
572{
573 struct vc4_dev *vc4 = to_vc4_dev(dev);
574
575 mutex_init(&vc4->bo_lock);
576
577 INIT_LIST_HEAD(&vc4->bo_cache.time_list);
578
579 INIT_WORK(&vc4->bo_cache.time_work, vc4_bo_cache_time_work);
580 setup_timer(&vc4->bo_cache.time_timer,
581 vc4_bo_cache_time_timer,
582 (unsigned long)dev);
583}
584
585void vc4_bo_cache_destroy(struct drm_device *dev)
586{
587 struct vc4_dev *vc4 = to_vc4_dev(dev);
588
589 del_timer(&vc4->bo_cache.time_timer);
590 cancel_work_sync(&vc4->bo_cache.time_work);
591
592 vc4_bo_cache_purge(dev);
593
594 if (vc4->bo_stats.num_allocated) {
595 DRM_ERROR("Destroying BO cache while BOs still allocated:\n");
596 vc4_bo_stats_dump(vc4);
597 }
598}