]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - drivers/gpu/drm/vc4/vc4_bo.c
drm/ast: Fixed 1280x800 Display Issue
[mirror_ubuntu-hirsute-kernel.git] / drivers / gpu / drm / vc4 / vc4_bo.c
CommitLineData
c8b75bca
EA
1/*
2 * Copyright © 2015 Broadcom
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
72f793f1
EA
9/**
10 * DOC: VC4 GEM BO management support
c8b75bca
EA
11 *
12 * The VC4 GPU architecture (both scanout and rendering) has direct
13 * access to system memory with no MMU in between. To support it, we
14 * use the GEM CMA helper functions to allocate contiguous ranges of
15 * physical memory for our BOs.
c826a6e1
EA
16 *
17 * Since the CMA allocator is very slow, we keep a cache of recently
18 * freed BOs around so that the kernel's allocation of objects for 3D
19 * rendering can return quickly.
c8b75bca
EA
20 */
21
cdec4d36
EA
22#include <linux/dma-buf.h>
23
c8b75bca 24#include "vc4_drv.h"
d5bc60f6 25#include "uapi/drm/vc4_drm.h"
c8b75bca 26
f3099462
EA
27static const char * const bo_type_names[] = {
28 "kernel",
29 "V3D",
30 "V3D shader",
31 "dumb",
32 "binner",
33 "RCL",
34 "BCL",
35 "kernel BO cache",
36};
37
38static bool is_user_label(int label)
39{
40 return label >= VC4_BO_TYPE_COUNT;
41}
42
c826a6e1
EA
43static void vc4_bo_stats_dump(struct vc4_dev *vc4)
44{
f3099462
EA
45 int i;
46
47 for (i = 0; i < vc4->num_labels; i++) {
48 if (!vc4->bo_labels[i].num_allocated)
49 continue;
50
51 DRM_INFO("%30s: %6dkb BOs (%d)\n",
52 vc4->bo_labels[i].name,
53 vc4->bo_labels[i].size_allocated / 1024,
54 vc4->bo_labels[i].num_allocated);
55 }
b9f19259
BB
56
57 mutex_lock(&vc4->purgeable.lock);
58 if (vc4->purgeable.num)
59 DRM_INFO("%30s: %6zdkb BOs (%d)\n", "userspace BO cache",
60 vc4->purgeable.size / 1024, vc4->purgeable.num);
61
62 if (vc4->purgeable.purged_num)
63 DRM_INFO("%30s: %6zdkb BOs (%d)\n", "total purged BO",
64 vc4->purgeable.purged_size / 1024,
65 vc4->purgeable.purged_num);
66 mutex_unlock(&vc4->purgeable.lock);
c826a6e1
EA
67}
68
69#ifdef CONFIG_DEBUG_FS
70int vc4_bo_stats_debugfs(struct seq_file *m, void *unused)
71{
72 struct drm_info_node *node = (struct drm_info_node *)m->private;
73 struct drm_device *dev = node->minor->dev;
74 struct vc4_dev *vc4 = to_vc4_dev(dev);
f3099462 75 int i;
c826a6e1 76
c826a6e1 77 mutex_lock(&vc4->bo_lock);
f3099462
EA
78 for (i = 0; i < vc4->num_labels; i++) {
79 if (!vc4->bo_labels[i].num_allocated)
80 continue;
81
82 seq_printf(m, "%30s: %6dkb BOs (%d)\n",
83 vc4->bo_labels[i].name,
84 vc4->bo_labels[i].size_allocated / 1024,
85 vc4->bo_labels[i].num_allocated);
86 }
c826a6e1
EA
87 mutex_unlock(&vc4->bo_lock);
88
b9f19259
BB
89 mutex_lock(&vc4->purgeable.lock);
90 if (vc4->purgeable.num)
e073db5c 91 seq_printf(m, "%30s: %6zdkb BOs (%d)\n", "userspace BO cache",
b9f19259
BB
92 vc4->purgeable.size / 1024, vc4->purgeable.num);
93
94 if (vc4->purgeable.purged_num)
e073db5c 95 seq_printf(m, "%30s: %6zdkb BOs (%d)\n", "total purged BO",
b9f19259
BB
96 vc4->purgeable.purged_size / 1024,
97 vc4->purgeable.purged_num);
98 mutex_unlock(&vc4->purgeable.lock);
99
c826a6e1
EA
100 return 0;
101}
102#endif
103
f3099462
EA
104/* Takes ownership of *name and returns the appropriate slot for it in
105 * the bo_labels[] array, extending it as necessary.
106 *
107 * This is inefficient and could use a hash table instead of walking
108 * an array and strcmp()ing. However, the assumption is that user
109 * labeling will be infrequent (scanout buffers and other long-lived
110 * objects, or debug driver builds), so we can live with it for now.
111 */
112static int vc4_get_user_label(struct vc4_dev *vc4, const char *name)
113{
114 int i;
115 int free_slot = -1;
116
117 for (i = 0; i < vc4->num_labels; i++) {
118 if (!vc4->bo_labels[i].name) {
119 free_slot = i;
120 } else if (strcmp(vc4->bo_labels[i].name, name) == 0) {
121 kfree(name);
122 return i;
123 }
124 }
125
126 if (free_slot != -1) {
127 WARN_ON(vc4->bo_labels[free_slot].num_allocated != 0);
128 vc4->bo_labels[free_slot].name = name;
129 return free_slot;
130 } else {
131 u32 new_label_count = vc4->num_labels + 1;
132 struct vc4_label *new_labels =
133 krealloc(vc4->bo_labels,
134 new_label_count * sizeof(*new_labels),
135 GFP_KERNEL);
136
137 if (!new_labels) {
138 kfree(name);
139 return -1;
140 }
141
142 free_slot = vc4->num_labels;
143 vc4->bo_labels = new_labels;
144 vc4->num_labels = new_label_count;
145
146 vc4->bo_labels[free_slot].name = name;
147 vc4->bo_labels[free_slot].num_allocated = 0;
148 vc4->bo_labels[free_slot].size_allocated = 0;
149
150 return free_slot;
151 }
152}
153
154static void vc4_bo_set_label(struct drm_gem_object *gem_obj, int label)
155{
156 struct vc4_bo *bo = to_vc4_bo(gem_obj);
157 struct vc4_dev *vc4 = to_vc4_dev(gem_obj->dev);
158
159 lockdep_assert_held(&vc4->bo_lock);
160
161 if (label != -1) {
162 vc4->bo_labels[label].num_allocated++;
163 vc4->bo_labels[label].size_allocated += gem_obj->size;
164 }
165
166 vc4->bo_labels[bo->label].num_allocated--;
167 vc4->bo_labels[bo->label].size_allocated -= gem_obj->size;
168
169 if (vc4->bo_labels[bo->label].num_allocated == 0 &&
170 is_user_label(bo->label)) {
171 /* Free user BO label slots on last unreference.
172 * Slots are just where we track the stats for a given
173 * name, and once a name is unused we can reuse that
174 * slot.
175 */
176 kfree(vc4->bo_labels[bo->label].name);
177 vc4->bo_labels[bo->label].name = NULL;
178 }
179
180 bo->label = label;
181}
182
c826a6e1
EA
183static uint32_t bo_page_index(size_t size)
184{
185 return (size / PAGE_SIZE) - 1;
186}
187
c826a6e1 188static void vc4_bo_destroy(struct vc4_bo *bo)
c8b75bca 189{
c826a6e1 190 struct drm_gem_object *obj = &bo->base.base;
4e6b1e91
EA
191 struct vc4_dev *vc4 = to_vc4_dev(obj->dev);
192
193 lockdep_assert_held(&vc4->bo_lock);
f3099462
EA
194
195 vc4_bo_set_label(obj, -1);
c826a6e1 196
463873d5
EA
197 if (bo->validated_shader) {
198 kfree(bo->validated_shader->texture_samples);
199 kfree(bo->validated_shader);
200 bo->validated_shader = NULL;
201 }
202
24bb206f 203 reservation_object_fini(&bo->_resv);
cdec4d36 204
c826a6e1
EA
205 drm_gem_cma_free_object(obj);
206}
207
c826a6e1
EA
208static void vc4_bo_remove_from_cache(struct vc4_bo *bo)
209{
4e6b1e91
EA
210 struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev);
211
212 lockdep_assert_held(&vc4->bo_lock);
c826a6e1
EA
213 list_del(&bo->unref_head);
214 list_del(&bo->size_head);
215}
216
217static struct list_head *vc4_get_cache_list_for_size(struct drm_device *dev,
218 size_t size)
219{
220 struct vc4_dev *vc4 = to_vc4_dev(dev);
221 uint32_t page_index = bo_page_index(size);
222
223 if (vc4->bo_cache.size_list_size <= page_index) {
224 uint32_t new_size = max(vc4->bo_cache.size_list_size * 2,
225 page_index + 1);
226 struct list_head *new_list;
227 uint32_t i;
228
229 new_list = kmalloc_array(new_size, sizeof(struct list_head),
230 GFP_KERNEL);
231 if (!new_list)
232 return NULL;
233
234 /* Rebase the old cached BO lists to their new list
235 * head locations.
236 */
237 for (i = 0; i < vc4->bo_cache.size_list_size; i++) {
238 struct list_head *old_list =
239 &vc4->bo_cache.size_list[i];
240
241 if (list_empty(old_list))
242 INIT_LIST_HEAD(&new_list[i]);
243 else
244 list_replace(old_list, &new_list[i]);
245 }
246 /* And initialize the brand new BO list heads. */
247 for (i = vc4->bo_cache.size_list_size; i < new_size; i++)
248 INIT_LIST_HEAD(&new_list[i]);
249
250 kfree(vc4->bo_cache.size_list);
251 vc4->bo_cache.size_list = new_list;
252 vc4->bo_cache.size_list_size = new_size;
253 }
254
255 return &vc4->bo_cache.size_list[page_index];
256}
257
ea903838 258static void vc4_bo_cache_purge(struct drm_device *dev)
c826a6e1
EA
259{
260 struct vc4_dev *vc4 = to_vc4_dev(dev);
261
262 mutex_lock(&vc4->bo_lock);
263 while (!list_empty(&vc4->bo_cache.time_list)) {
264 struct vc4_bo *bo = list_last_entry(&vc4->bo_cache.time_list,
265 struct vc4_bo, unref_head);
266 vc4_bo_remove_from_cache(bo);
267 vc4_bo_destroy(bo);
268 }
269 mutex_unlock(&vc4->bo_lock);
270}
271
b9f19259
BB
272void vc4_bo_add_to_purgeable_pool(struct vc4_bo *bo)
273{
274 struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev);
275
276 mutex_lock(&vc4->purgeable.lock);
277 list_add_tail(&bo->size_head, &vc4->purgeable.list);
278 vc4->purgeable.num++;
279 vc4->purgeable.size += bo->base.base.size;
280 mutex_unlock(&vc4->purgeable.lock);
281}
282
283static void vc4_bo_remove_from_purgeable_pool_locked(struct vc4_bo *bo)
284{
285 struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev);
286
287 /* list_del_init() is used here because the caller might release
288 * the purgeable lock in order to acquire the madv one and update the
289 * madv status.
290 * During this short period of time a user might decide to mark
291 * the BO as unpurgeable, and if bo->madv is set to
292 * VC4_MADV_DONTNEED it will try to remove the BO from the
293 * purgeable list which will fail if the ->next/prev fields
294 * are set to LIST_POISON1/LIST_POISON2 (which is what
295 * list_del() does).
296 * Re-initializing the list element guarantees that list_del()
297 * will work correctly even if it's a NOP.
298 */
299 list_del_init(&bo->size_head);
300 vc4->purgeable.num--;
301 vc4->purgeable.size -= bo->base.base.size;
302}
303
304void vc4_bo_remove_from_purgeable_pool(struct vc4_bo *bo)
305{
306 struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev);
307
308 mutex_lock(&vc4->purgeable.lock);
309 vc4_bo_remove_from_purgeable_pool_locked(bo);
310 mutex_unlock(&vc4->purgeable.lock);
311}
312
313static void vc4_bo_purge(struct drm_gem_object *obj)
314{
315 struct vc4_bo *bo = to_vc4_bo(obj);
316 struct drm_device *dev = obj->dev;
317
318 WARN_ON(!mutex_is_locked(&bo->madv_lock));
319 WARN_ON(bo->madv != VC4_MADV_DONTNEED);
320
321 drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
322
323 dma_free_wc(dev->dev, obj->size, bo->base.vaddr, bo->base.paddr);
324 bo->base.vaddr = NULL;
325 bo->madv = __VC4_MADV_PURGED;
326}
327
328static void vc4_bo_userspace_cache_purge(struct drm_device *dev)
329{
330 struct vc4_dev *vc4 = to_vc4_dev(dev);
331
332 mutex_lock(&vc4->purgeable.lock);
333 while (!list_empty(&vc4->purgeable.list)) {
334 struct vc4_bo *bo = list_first_entry(&vc4->purgeable.list,
335 struct vc4_bo, size_head);
336 struct drm_gem_object *obj = &bo->base.base;
337 size_t purged_size = 0;
338
339 vc4_bo_remove_from_purgeable_pool_locked(bo);
340
341 /* Release the purgeable lock while we're purging the BO so
342 * that other people can continue inserting things in the
343 * purgeable pool without having to wait for all BOs to be
344 * purged.
345 */
346 mutex_unlock(&vc4->purgeable.lock);
347 mutex_lock(&bo->madv_lock);
348
349 /* Since we released the purgeable pool lock before acquiring
350 * the BO madv one, the user may have marked the BO as WILLNEED
351 * and re-used it in the meantime.
352 * Before purging the BO we need to make sure
353 * - it is still marked as DONTNEED
354 * - it has not been re-inserted in the purgeable list
355 * - it is not used by HW blocks
356 * If one of these conditions is not met, just skip the entry.
357 */
358 if (bo->madv == VC4_MADV_DONTNEED &&
359 list_empty(&bo->size_head) &&
360 !refcount_read(&bo->usecnt)) {
361 purged_size = bo->base.base.size;
362 vc4_bo_purge(obj);
363 }
364 mutex_unlock(&bo->madv_lock);
365 mutex_lock(&vc4->purgeable.lock);
366
367 if (purged_size) {
368 vc4->purgeable.purged_size += purged_size;
369 vc4->purgeable.purged_num++;
370 }
371 }
372 mutex_unlock(&vc4->purgeable.lock);
373}
374
c826a6e1 375static struct vc4_bo *vc4_bo_get_from_cache(struct drm_device *dev,
f3099462
EA
376 uint32_t size,
377 enum vc4_kernel_bo_type type)
c826a6e1
EA
378{
379 struct vc4_dev *vc4 = to_vc4_dev(dev);
380 uint32_t page_index = bo_page_index(size);
381 struct vc4_bo *bo = NULL;
382
383 size = roundup(size, PAGE_SIZE);
384
385 mutex_lock(&vc4->bo_lock);
386 if (page_index >= vc4->bo_cache.size_list_size)
387 goto out;
388
389 if (list_empty(&vc4->bo_cache.size_list[page_index]))
390 goto out;
391
392 bo = list_first_entry(&vc4->bo_cache.size_list[page_index],
393 struct vc4_bo, size_head);
394 vc4_bo_remove_from_cache(bo);
395 kref_init(&bo->base.base.refcount);
396
397out:
f3099462
EA
398 if (bo)
399 vc4_bo_set_label(&bo->base.base, type);
c826a6e1
EA
400 mutex_unlock(&vc4->bo_lock);
401 return bo;
402}
403
404/**
405 * vc4_gem_create_object - Implementation of driver->gem_create_object.
72f793f1
EA
406 * @dev: DRM device
407 * @size: Size in bytes of the memory the object will reference
c826a6e1
EA
408 *
409 * This lets the CMA helpers allocate object structs for us, and keep
410 * our BO stats correct.
411 */
412struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size)
413{
414 struct vc4_dev *vc4 = to_vc4_dev(dev);
415 struct vc4_bo *bo;
416
417 bo = kzalloc(sizeof(*bo), GFP_KERNEL);
418 if (!bo)
419 return ERR_PTR(-ENOMEM);
420
b9f19259
BB
421 bo->madv = VC4_MADV_WILLNEED;
422 refcount_set(&bo->usecnt, 0);
423 mutex_init(&bo->madv_lock);
c826a6e1 424 mutex_lock(&vc4->bo_lock);
f3099462
EA
425 bo->label = VC4_BO_TYPE_KERNEL;
426 vc4->bo_labels[VC4_BO_TYPE_KERNEL].num_allocated++;
427 vc4->bo_labels[VC4_BO_TYPE_KERNEL].size_allocated += size;
c826a6e1 428 mutex_unlock(&vc4->bo_lock);
24bb206f
HV
429 bo->resv = &bo->_resv;
430 reservation_object_init(bo->resv);
c826a6e1
EA
431
432 return &bo->base.base;
433}
434
435struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t unaligned_size,
f3099462 436 bool allow_unzeroed, enum vc4_kernel_bo_type type)
c826a6e1
EA
437{
438 size_t size = roundup(unaligned_size, PAGE_SIZE);
439 struct vc4_dev *vc4 = to_vc4_dev(dev);
c8b75bca 440 struct drm_gem_cma_object *cma_obj;
eb981383 441 struct vc4_bo *bo;
c8b75bca 442
c826a6e1 443 if (size == 0)
2c68f1fc 444 return ERR_PTR(-EINVAL);
c826a6e1
EA
445
446 /* First, try to get a vc4_bo from the kernel BO cache. */
f3099462 447 bo = vc4_bo_get_from_cache(dev, size, type);
eb981383
EA
448 if (bo) {
449 if (!allow_unzeroed)
450 memset(bo->base.vaddr, 0, bo->base.base.size);
451 return bo;
c826a6e1
EA
452 }
453
454 cma_obj = drm_gem_cma_create(dev, size);
455 if (IS_ERR(cma_obj)) {
456 /*
457 * If we've run out of CMA memory, kill the cache of
458 * CMA allocations we've got laying around and try again.
459 */
460 vc4_bo_cache_purge(dev);
b9f19259
BB
461 cma_obj = drm_gem_cma_create(dev, size);
462 }
c826a6e1 463
b9f19259
BB
464 if (IS_ERR(cma_obj)) {
465 /*
466 * Still not enough CMA memory, purge the userspace BO
467 * cache and retry.
468 * This is sub-optimal since we purge the whole userspace
469 * BO cache which forces user that want to re-use the BO to
470 * restore its initial content.
471 * Ideally, we should purge entries one by one and retry
472 * after each to see if CMA allocation succeeds. Or even
473 * better, try to find an entry with at least the same
474 * size.
475 */
476 vc4_bo_userspace_cache_purge(dev);
c826a6e1 477 cma_obj = drm_gem_cma_create(dev, size);
b9f19259
BB
478 }
479
480 if (IS_ERR(cma_obj)) {
481 DRM_ERROR("Failed to allocate from CMA:\n");
482 vc4_bo_stats_dump(vc4);
483 return ERR_PTR(-ENOMEM);
c826a6e1 484 }
f3099462
EA
485 bo = to_vc4_bo(&cma_obj->base);
486
b9f19259
BB
487 /* By default, BOs do not support the MADV ioctl. This will be enabled
488 * only on BOs that are exposed to userspace (V3D, V3D_SHADER and DUMB
489 * BOs).
490 */
491 bo->madv = __VC4_MADV_NOTSUPP;
492
f3099462
EA
493 mutex_lock(&vc4->bo_lock);
494 vc4_bo_set_label(&cma_obj->base, type);
495 mutex_unlock(&vc4->bo_lock);
496
497 return bo;
c8b75bca
EA
498}
499
500int vc4_dumb_create(struct drm_file *file_priv,
501 struct drm_device *dev,
502 struct drm_mode_create_dumb *args)
503{
504 int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
505 struct vc4_bo *bo = NULL;
506 int ret;
507
508 if (args->pitch < min_pitch)
509 args->pitch = min_pitch;
510
511 if (args->size < args->pitch * args->height)
512 args->size = args->pitch * args->height;
513
f3099462 514 bo = vc4_bo_create(dev, args->size, false, VC4_BO_TYPE_DUMB);
2c68f1fc
EA
515 if (IS_ERR(bo))
516 return PTR_ERR(bo);
c8b75bca 517
b9f19259
BB
518 bo->madv = VC4_MADV_WILLNEED;
519
c8b75bca 520 ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
1d5494e9 521 drm_gem_object_put_unlocked(&bo->base.base);
c8b75bca
EA
522
523 return ret;
524}
c826a6e1 525
c826a6e1
EA
526static void vc4_bo_cache_free_old(struct drm_device *dev)
527{
528 struct vc4_dev *vc4 = to_vc4_dev(dev);
529 unsigned long expire_time = jiffies - msecs_to_jiffies(1000);
530
4e6b1e91
EA
531 lockdep_assert_held(&vc4->bo_lock);
532
c826a6e1
EA
533 while (!list_empty(&vc4->bo_cache.time_list)) {
534 struct vc4_bo *bo = list_last_entry(&vc4->bo_cache.time_list,
535 struct vc4_bo, unref_head);
536 if (time_before(expire_time, bo->free_time)) {
537 mod_timer(&vc4->bo_cache.time_timer,
538 round_jiffies_up(jiffies +
539 msecs_to_jiffies(1000)));
540 return;
541 }
542
543 vc4_bo_remove_from_cache(bo);
544 vc4_bo_destroy(bo);
545 }
546}
547
548/* Called on the last userspace/kernel unreference of the BO. Returns
549 * it to the BO cache if possible, otherwise frees it.
c826a6e1
EA
550 */
551void vc4_free_object(struct drm_gem_object *gem_bo)
552{
553 struct drm_device *dev = gem_bo->dev;
554 struct vc4_dev *vc4 = to_vc4_dev(dev);
555 struct vc4_bo *bo = to_vc4_bo(gem_bo);
556 struct list_head *cache_list;
557
b9f19259
BB
558 /* Remove the BO from the purgeable list. */
559 mutex_lock(&bo->madv_lock);
560 if (bo->madv == VC4_MADV_DONTNEED && !refcount_read(&bo->usecnt))
561 vc4_bo_remove_from_purgeable_pool(bo);
562 mutex_unlock(&bo->madv_lock);
563
c826a6e1
EA
564 mutex_lock(&vc4->bo_lock);
565 /* If the object references someone else's memory, we can't cache it.
566 */
567 if (gem_bo->import_attach) {
568 vc4_bo_destroy(bo);
569 goto out;
570 }
571
572 /* Don't cache if it was publicly named. */
573 if (gem_bo->name) {
574 vc4_bo_destroy(bo);
575 goto out;
576 }
577
ca39b449 578 /* If this object was partially constructed but CMA allocation
b9f19259
BB
579 * had failed, just free it. Can also happen when the BO has been
580 * purged.
ca39b449
EA
581 */
582 if (!bo->base.vaddr) {
583 vc4_bo_destroy(bo);
584 goto out;
585 }
586
c826a6e1
EA
587 cache_list = vc4_get_cache_list_for_size(dev, gem_bo->size);
588 if (!cache_list) {
589 vc4_bo_destroy(bo);
590 goto out;
591 }
592
463873d5
EA
593 if (bo->validated_shader) {
594 kfree(bo->validated_shader->texture_samples);
595 kfree(bo->validated_shader);
596 bo->validated_shader = NULL;
597 }
598
b9f19259
BB
599 /* Reset madv and usecnt before adding the BO to the cache. */
600 bo->madv = __VC4_MADV_NOTSUPP;
601 refcount_set(&bo->usecnt, 0);
602
83753117 603 bo->t_format = false;
c826a6e1
EA
604 bo->free_time = jiffies;
605 list_add(&bo->size_head, cache_list);
606 list_add(&bo->unref_head, &vc4->bo_cache.time_list);
607
f3099462 608 vc4_bo_set_label(&bo->base.base, VC4_BO_TYPE_KERNEL_CACHE);
c826a6e1
EA
609
610 vc4_bo_cache_free_old(dev);
611
612out:
613 mutex_unlock(&vc4->bo_lock);
614}
615
616static void vc4_bo_cache_time_work(struct work_struct *work)
617{
618 struct vc4_dev *vc4 =
619 container_of(work, struct vc4_dev, bo_cache.time_work);
620 struct drm_device *dev = vc4->dev;
621
622 mutex_lock(&vc4->bo_lock);
623 vc4_bo_cache_free_old(dev);
624 mutex_unlock(&vc4->bo_lock);
625}
626
b9f19259
BB
627int vc4_bo_inc_usecnt(struct vc4_bo *bo)
628{
629 int ret;
630
631 /* Fast path: if the BO is already retained by someone, no need to
632 * check the madv status.
633 */
634 if (refcount_inc_not_zero(&bo->usecnt))
635 return 0;
636
637 mutex_lock(&bo->madv_lock);
638 switch (bo->madv) {
639 case VC4_MADV_WILLNEED:
5bfd4013
BB
640 if (!refcount_inc_not_zero(&bo->usecnt))
641 refcount_set(&bo->usecnt, 1);
b9f19259
BB
642 ret = 0;
643 break;
644 case VC4_MADV_DONTNEED:
645 /* We shouldn't use a BO marked as purgeable if at least
646 * someone else retained its content by incrementing usecnt.
647 * Luckily the BO hasn't been purged yet, but something wrong
648 * is happening here. Just throw an error instead of
649 * authorizing this use case.
650 */
651 case __VC4_MADV_PURGED:
652 /* We can't use a purged BO. */
653 default:
654 /* Invalid madv value. */
655 ret = -EINVAL;
656 break;
657 }
658 mutex_unlock(&bo->madv_lock);
659
660 return ret;
661}
662
663void vc4_bo_dec_usecnt(struct vc4_bo *bo)
664{
665 /* Fast path: if the BO is still retained by someone, no need to test
666 * the madv value.
667 */
668 if (refcount_dec_not_one(&bo->usecnt))
669 return;
670
671 mutex_lock(&bo->madv_lock);
672 if (refcount_dec_and_test(&bo->usecnt) &&
673 bo->madv == VC4_MADV_DONTNEED)
674 vc4_bo_add_to_purgeable_pool(bo);
675 mutex_unlock(&bo->madv_lock);
676}
677
0078730f 678static void vc4_bo_cache_time_timer(struct timer_list *t)
c826a6e1 679{
0078730f 680 struct vc4_dev *vc4 = from_timer(vc4, t, bo_cache.time_timer);
c826a6e1
EA
681
682 schedule_work(&vc4->bo_cache.time_work);
683}
684
cdec4d36
EA
685struct reservation_object *vc4_prime_res_obj(struct drm_gem_object *obj)
686{
687 struct vc4_bo *bo = to_vc4_bo(obj);
688
689 return bo->resv;
690}
691
463873d5
EA
692struct dma_buf *
693vc4_prime_export(struct drm_device *dev, struct drm_gem_object *obj, int flags)
694{
695 struct vc4_bo *bo = to_vc4_bo(obj);
b9f19259
BB
696 struct dma_buf *dmabuf;
697 int ret;
463873d5
EA
698
699 if (bo->validated_shader) {
fb95992a 700 DRM_DEBUG("Attempting to export shader BO\n");
463873d5
EA
701 return ERR_PTR(-EINVAL);
702 }
703
b9f19259
BB
704 /* Note: as soon as the BO is exported it becomes unpurgeable, because
705 * noone ever decrements the usecnt even if the reference held by the
706 * exported BO is released. This shouldn't be a problem since we don't
707 * expect exported BOs to be marked as purgeable.
708 */
709 ret = vc4_bo_inc_usecnt(bo);
710 if (ret) {
711 DRM_ERROR("Failed to increment BO usecnt\n");
712 return ERR_PTR(ret);
713 }
714
715 dmabuf = drm_gem_prime_export(dev, obj, flags);
716 if (IS_ERR(dmabuf))
717 vc4_bo_dec_usecnt(bo);
718
719 return dmabuf;
720}
721
722int vc4_fault(struct vm_fault *vmf)
723{
724 struct vm_area_struct *vma = vmf->vma;
725 struct drm_gem_object *obj = vma->vm_private_data;
726 struct vc4_bo *bo = to_vc4_bo(obj);
727
728 /* The only reason we would end up here is when user-space accesses
729 * BO's memory after it's been purged.
730 */
731 mutex_lock(&bo->madv_lock);
732 WARN_ON(bo->madv != __VC4_MADV_PURGED);
733 mutex_unlock(&bo->madv_lock);
734
735 return VM_FAULT_SIGBUS;
463873d5
EA
736}
737
738int vc4_mmap(struct file *filp, struct vm_area_struct *vma)
739{
740 struct drm_gem_object *gem_obj;
b9f19259 741 unsigned long vm_pgoff;
463873d5
EA
742 struct vc4_bo *bo;
743 int ret;
744
745 ret = drm_gem_mmap(filp, vma);
746 if (ret)
747 return ret;
748
749 gem_obj = vma->vm_private_data;
750 bo = to_vc4_bo(gem_obj);
751
752 if (bo->validated_shader && (vma->vm_flags & VM_WRITE)) {
fb95992a 753 DRM_DEBUG("mmaping of shader BOs for writing not allowed.\n");
463873d5
EA
754 return -EINVAL;
755 }
756
b9f19259
BB
757 if (bo->madv != VC4_MADV_WILLNEED) {
758 DRM_DEBUG("mmaping of %s BO not allowed\n",
759 bo->madv == VC4_MADV_DONTNEED ?
760 "purgeable" : "purged");
761 return -EINVAL;
762 }
763
463873d5
EA
764 /*
765 * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the
766 * vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map
767 * the whole buffer.
768 */
769 vma->vm_flags &= ~VM_PFNMAP;
463873d5 770
b9f19259
BB
771 /* This ->vm_pgoff dance is needed to make all parties happy:
772 * - dma_mmap_wc() uses ->vm_pgoff as an offset within the allocated
773 * mem-region, hence the need to set it to zero (the value set by
774 * the DRM core is a virtual offset encoding the GEM object-id)
775 * - the mmap() core logic needs ->vm_pgoff to be restored to its
776 * initial value before returning from this function because it
777 * encodes the offset of this GEM in the dev->anon_inode pseudo-file
778 * and this information will be used when we invalidate userspace
779 * mappings with drm_vma_node_unmap() (called from vc4_gem_purge()).
780 */
781 vm_pgoff = vma->vm_pgoff;
782 vma->vm_pgoff = 0;
f6e45661
LR
783 ret = dma_mmap_wc(bo->base.base.dev->dev, vma, bo->base.vaddr,
784 bo->base.paddr, vma->vm_end - vma->vm_start);
b9f19259
BB
785 vma->vm_pgoff = vm_pgoff;
786
463873d5
EA
787 if (ret)
788 drm_gem_vm_close(vma);
789
790 return ret;
791}
792
793int vc4_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
794{
795 struct vc4_bo *bo = to_vc4_bo(obj);
796
797 if (bo->validated_shader && (vma->vm_flags & VM_WRITE)) {
fb95992a 798 DRM_DEBUG("mmaping of shader BOs for writing not allowed.\n");
463873d5
EA
799 return -EINVAL;
800 }
801
802 return drm_gem_cma_prime_mmap(obj, vma);
803}
804
805void *vc4_prime_vmap(struct drm_gem_object *obj)
806{
807 struct vc4_bo *bo = to_vc4_bo(obj);
808
809 if (bo->validated_shader) {
fb95992a 810 DRM_DEBUG("mmaping of shader BOs not allowed.\n");
463873d5
EA
811 return ERR_PTR(-EINVAL);
812 }
813
814 return drm_gem_cma_prime_vmap(obj);
815}
816
cdec4d36
EA
817struct drm_gem_object *
818vc4_prime_import_sg_table(struct drm_device *dev,
819 struct dma_buf_attachment *attach,
820 struct sg_table *sgt)
821{
822 struct drm_gem_object *obj;
823 struct vc4_bo *bo;
824
825 obj = drm_gem_cma_prime_import_sg_table(dev, attach, sgt);
826 if (IS_ERR(obj))
827 return obj;
828
829 bo = to_vc4_bo(obj);
830 bo->resv = attach->dmabuf->resv;
831
832 return obj;
833}
834
d5bc60f6
EA
835int vc4_create_bo_ioctl(struct drm_device *dev, void *data,
836 struct drm_file *file_priv)
837{
838 struct drm_vc4_create_bo *args = data;
839 struct vc4_bo *bo = NULL;
840 int ret;
841
842 /*
843 * We can't allocate from the BO cache, because the BOs don't
844 * get zeroed, and that might leak data between users.
845 */
f3099462 846 bo = vc4_bo_create(dev, args->size, false, VC4_BO_TYPE_V3D);
2c68f1fc
EA
847 if (IS_ERR(bo))
848 return PTR_ERR(bo);
d5bc60f6 849
b9f19259
BB
850 bo->madv = VC4_MADV_WILLNEED;
851
d5bc60f6 852 ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
1d5494e9 853 drm_gem_object_put_unlocked(&bo->base.base);
d5bc60f6
EA
854
855 return ret;
856}
857
858int vc4_mmap_bo_ioctl(struct drm_device *dev, void *data,
859 struct drm_file *file_priv)
860{
861 struct drm_vc4_mmap_bo *args = data;
862 struct drm_gem_object *gem_obj;
863
a8ad0bd8 864 gem_obj = drm_gem_object_lookup(file_priv, args->handle);
d5bc60f6 865 if (!gem_obj) {
fb95992a 866 DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
d5bc60f6
EA
867 return -EINVAL;
868 }
869
870 /* The mmap offset was set up at BO allocation time. */
871 args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
872
1d5494e9 873 drm_gem_object_put_unlocked(gem_obj);
d5bc60f6
EA
874 return 0;
875}
876
463873d5
EA
877int
878vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data,
879 struct drm_file *file_priv)
880{
881 struct drm_vc4_create_shader_bo *args = data;
882 struct vc4_bo *bo = NULL;
883 int ret;
884
885 if (args->size == 0)
886 return -EINVAL;
887
888 if (args->size % sizeof(u64) != 0)
889 return -EINVAL;
890
891 if (args->flags != 0) {
892 DRM_INFO("Unknown flags set: 0x%08x\n", args->flags);
893 return -EINVAL;
894 }
895
896 if (args->pad != 0) {
897 DRM_INFO("Pad set: 0x%08x\n", args->pad);
898 return -EINVAL;
899 }
900
f3099462 901 bo = vc4_bo_create(dev, args->size, true, VC4_BO_TYPE_V3D_SHADER);
2c68f1fc
EA
902 if (IS_ERR(bo))
903 return PTR_ERR(bo);
463873d5 904
b9f19259
BB
905 bo->madv = VC4_MADV_WILLNEED;
906
585cb132 907 if (copy_from_user(bo->base.vaddr,
463873d5 908 (void __user *)(uintptr_t)args->data,
585cb132
DC
909 args->size)) {
910 ret = -EFAULT;
463873d5 911 goto fail;
585cb132 912 }
463873d5
EA
913 /* Clear the rest of the memory from allocating from the BO
914 * cache.
915 */
916 memset(bo->base.vaddr + args->size, 0,
917 bo->base.base.size - args->size);
918
919 bo->validated_shader = vc4_validate_shader(&bo->base);
920 if (!bo->validated_shader) {
921 ret = -EINVAL;
922 goto fail;
923 }
924
925 /* We have to create the handle after validation, to avoid
926 * races for users to do doing things like mmap the shader BO.
927 */
928 ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
929
930 fail:
1d5494e9 931 drm_gem_object_put_unlocked(&bo->base.base);
463873d5
EA
932
933 return ret;
934}
935
83753117
EA
936/**
937 * vc4_set_tiling_ioctl() - Sets the tiling modifier for a BO.
938 * @dev: DRM device
939 * @data: ioctl argument
940 * @file_priv: DRM file for this fd
941 *
942 * The tiling state of the BO decides the default modifier of an fb if
943 * no specific modifier was set by userspace, and the return value of
944 * vc4_get_tiling_ioctl() (so that userspace can treat a BO it
945 * received from dmabuf as the same tiling format as the producer
946 * used).
947 */
948int vc4_set_tiling_ioctl(struct drm_device *dev, void *data,
949 struct drm_file *file_priv)
950{
951 struct drm_vc4_set_tiling *args = data;
952 struct drm_gem_object *gem_obj;
953 struct vc4_bo *bo;
954 bool t_format;
955
956 if (args->flags != 0)
957 return -EINVAL;
958
959 switch (args->modifier) {
960 case DRM_FORMAT_MOD_NONE:
961 t_format = false;
962 break;
963 case DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED:
964 t_format = true;
965 break;
966 default:
967 return -EINVAL;
968 }
969
970 gem_obj = drm_gem_object_lookup(file_priv, args->handle);
971 if (!gem_obj) {
fb95992a 972 DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
83753117
EA
973 return -ENOENT;
974 }
975 bo = to_vc4_bo(gem_obj);
976 bo->t_format = t_format;
977
1d5494e9 978 drm_gem_object_put_unlocked(gem_obj);
83753117
EA
979
980 return 0;
981}
982
983/**
984 * vc4_get_tiling_ioctl() - Gets the tiling modifier for a BO.
985 * @dev: DRM device
986 * @data: ioctl argument
987 * @file_priv: DRM file for this fd
988 *
989 * Returns the tiling modifier for a BO as set by vc4_set_tiling_ioctl().
990 */
991int vc4_get_tiling_ioctl(struct drm_device *dev, void *data,
992 struct drm_file *file_priv)
993{
994 struct drm_vc4_get_tiling *args = data;
995 struct drm_gem_object *gem_obj;
996 struct vc4_bo *bo;
997
998 if (args->flags != 0 || args->modifier != 0)
999 return -EINVAL;
1000
1001 gem_obj = drm_gem_object_lookup(file_priv, args->handle);
1002 if (!gem_obj) {
fb95992a 1003 DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
83753117
EA
1004 return -ENOENT;
1005 }
1006 bo = to_vc4_bo(gem_obj);
1007
1008 if (bo->t_format)
1009 args->modifier = DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED;
1010 else
1011 args->modifier = DRM_FORMAT_MOD_NONE;
1012
1d5494e9 1013 drm_gem_object_put_unlocked(gem_obj);
83753117
EA
1014
1015 return 0;
1016}
1017
f3099462 1018int vc4_bo_cache_init(struct drm_device *dev)
c826a6e1
EA
1019{
1020 struct vc4_dev *vc4 = to_vc4_dev(dev);
f3099462
EA
1021 int i;
1022
1023 /* Create the initial set of BO labels that the kernel will
1024 * use. This lets us avoid a bunch of string reallocation in
1025 * the kernel's draw and BO allocation paths.
1026 */
1027 vc4->bo_labels = kcalloc(VC4_BO_TYPE_COUNT, sizeof(*vc4->bo_labels),
1028 GFP_KERNEL);
1029 if (!vc4->bo_labels)
1030 return -ENOMEM;
1031 vc4->num_labels = VC4_BO_TYPE_COUNT;
1032
1033 BUILD_BUG_ON(ARRAY_SIZE(bo_type_names) != VC4_BO_TYPE_COUNT);
1034 for (i = 0; i < VC4_BO_TYPE_COUNT; i++)
1035 vc4->bo_labels[i].name = bo_type_names[i];
c826a6e1
EA
1036
1037 mutex_init(&vc4->bo_lock);
1038
1039 INIT_LIST_HEAD(&vc4->bo_cache.time_list);
1040
1041 INIT_WORK(&vc4->bo_cache.time_work, vc4_bo_cache_time_work);
0078730f 1042 timer_setup(&vc4->bo_cache.time_timer, vc4_bo_cache_time_timer, 0);
f3099462
EA
1043
1044 return 0;
c826a6e1
EA
1045}
1046
1047void vc4_bo_cache_destroy(struct drm_device *dev)
1048{
1049 struct vc4_dev *vc4 = to_vc4_dev(dev);
f3099462 1050 int i;
c826a6e1
EA
1051
1052 del_timer(&vc4->bo_cache.time_timer);
1053 cancel_work_sync(&vc4->bo_cache.time_work);
1054
1055 vc4_bo_cache_purge(dev);
1056
f3099462
EA
1057 for (i = 0; i < vc4->num_labels; i++) {
1058 if (vc4->bo_labels[i].num_allocated) {
1059 DRM_ERROR("Destroying BO cache with %d %s "
1060 "BOs still allocated\n",
1061 vc4->bo_labels[i].num_allocated,
1062 vc4->bo_labels[i].name);
1063 }
1064
1065 if (is_user_label(i))
1066 kfree(vc4->bo_labels[i].name);
c826a6e1 1067 }
f3099462
EA
1068 kfree(vc4->bo_labels);
1069}
1070
1071int vc4_label_bo_ioctl(struct drm_device *dev, void *data,
1072 struct drm_file *file_priv)
1073{
1074 struct vc4_dev *vc4 = to_vc4_dev(dev);
1075 struct drm_vc4_label_bo *args = data;
1076 char *name;
1077 struct drm_gem_object *gem_obj;
1078 int ret = 0, label;
1079
1080 if (!args->len)
1081 return -EINVAL;
1082
1083 name = strndup_user(u64_to_user_ptr(args->name), args->len + 1);
1084 if (IS_ERR(name))
1085 return PTR_ERR(name);
1086
1087 gem_obj = drm_gem_object_lookup(file_priv, args->handle);
1088 if (!gem_obj) {
1089 DRM_ERROR("Failed to look up GEM BO %d\n", args->handle);
1090 kfree(name);
1091 return -ENOENT;
1092 }
1093
1094 mutex_lock(&vc4->bo_lock);
1095 label = vc4_get_user_label(vc4, name);
1096 if (label != -1)
1097 vc4_bo_set_label(gem_obj, label);
1098 else
1099 ret = -ENOMEM;
1100 mutex_unlock(&vc4->bo_lock);
1101
b9c55b6e 1102 drm_gem_object_put_unlocked(gem_obj);
f3099462
EA
1103
1104 return ret;
c826a6e1 1105}