]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - drivers/gpu/drm/vc4/vc4_bo.c
drm: v3d: Switch to use drm_gem_object reservation_object
[mirror_ubuntu-hirsute-kernel.git] / drivers / gpu / drm / vc4 / vc4_bo.c
CommitLineData
c8b75bca
EA
1/*
2 * Copyright © 2015 Broadcom
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
72f793f1
EA
9/**
10 * DOC: VC4 GEM BO management support
c8b75bca
EA
11 *
12 * The VC4 GPU architecture (both scanout and rendering) has direct
13 * access to system memory with no MMU in between. To support it, we
14 * use the GEM CMA helper functions to allocate contiguous ranges of
15 * physical memory for our BOs.
c826a6e1
EA
16 *
17 * Since the CMA allocator is very slow, we keep a cache of recently
18 * freed BOs around so that the kernel's allocation of objects for 3D
19 * rendering can return quickly.
c8b75bca
EA
20 */
21
cdec4d36
EA
22#include <linux/dma-buf.h>
23
c8b75bca 24#include "vc4_drv.h"
d5bc60f6 25#include "uapi/drm/vc4_drm.h"
c8b75bca 26
f3099462
EA
27static const char * const bo_type_names[] = {
28 "kernel",
29 "V3D",
30 "V3D shader",
31 "dumb",
32 "binner",
33 "RCL",
34 "BCL",
35 "kernel BO cache",
36};
37
38static bool is_user_label(int label)
39{
40 return label >= VC4_BO_TYPE_COUNT;
41}
42
c826a6e1
EA
43static void vc4_bo_stats_dump(struct vc4_dev *vc4)
44{
f3099462
EA
45 int i;
46
47 for (i = 0; i < vc4->num_labels; i++) {
48 if (!vc4->bo_labels[i].num_allocated)
49 continue;
50
51 DRM_INFO("%30s: %6dkb BOs (%d)\n",
52 vc4->bo_labels[i].name,
53 vc4->bo_labels[i].size_allocated / 1024,
54 vc4->bo_labels[i].num_allocated);
55 }
b9f19259
BB
56
57 mutex_lock(&vc4->purgeable.lock);
58 if (vc4->purgeable.num)
59 DRM_INFO("%30s: %6zdkb BOs (%d)\n", "userspace BO cache",
60 vc4->purgeable.size / 1024, vc4->purgeable.num);
61
62 if (vc4->purgeable.purged_num)
63 DRM_INFO("%30s: %6zdkb BOs (%d)\n", "total purged BO",
64 vc4->purgeable.purged_size / 1024,
65 vc4->purgeable.purged_num);
66 mutex_unlock(&vc4->purgeable.lock);
c826a6e1
EA
67}
68
69#ifdef CONFIG_DEBUG_FS
70int vc4_bo_stats_debugfs(struct seq_file *m, void *unused)
71{
72 struct drm_info_node *node = (struct drm_info_node *)m->private;
73 struct drm_device *dev = node->minor->dev;
74 struct vc4_dev *vc4 = to_vc4_dev(dev);
f3099462 75 int i;
c826a6e1 76
c826a6e1 77 mutex_lock(&vc4->bo_lock);
f3099462
EA
78 for (i = 0; i < vc4->num_labels; i++) {
79 if (!vc4->bo_labels[i].num_allocated)
80 continue;
81
82 seq_printf(m, "%30s: %6dkb BOs (%d)\n",
83 vc4->bo_labels[i].name,
84 vc4->bo_labels[i].size_allocated / 1024,
85 vc4->bo_labels[i].num_allocated);
86 }
c826a6e1
EA
87 mutex_unlock(&vc4->bo_lock);
88
b9f19259
BB
89 mutex_lock(&vc4->purgeable.lock);
90 if (vc4->purgeable.num)
e073db5c 91 seq_printf(m, "%30s: %6zdkb BOs (%d)\n", "userspace BO cache",
b9f19259
BB
92 vc4->purgeable.size / 1024, vc4->purgeable.num);
93
94 if (vc4->purgeable.purged_num)
e073db5c 95 seq_printf(m, "%30s: %6zdkb BOs (%d)\n", "total purged BO",
b9f19259
BB
96 vc4->purgeable.purged_size / 1024,
97 vc4->purgeable.purged_num);
98 mutex_unlock(&vc4->purgeable.lock);
99
c826a6e1
EA
100 return 0;
101}
102#endif
103
f3099462
EA
104/* Takes ownership of *name and returns the appropriate slot for it in
105 * the bo_labels[] array, extending it as necessary.
106 *
107 * This is inefficient and could use a hash table instead of walking
108 * an array and strcmp()ing. However, the assumption is that user
109 * labeling will be infrequent (scanout buffers and other long-lived
110 * objects, or debug driver builds), so we can live with it for now.
111 */
112static int vc4_get_user_label(struct vc4_dev *vc4, const char *name)
113{
114 int i;
115 int free_slot = -1;
116
117 for (i = 0; i < vc4->num_labels; i++) {
118 if (!vc4->bo_labels[i].name) {
119 free_slot = i;
120 } else if (strcmp(vc4->bo_labels[i].name, name) == 0) {
121 kfree(name);
122 return i;
123 }
124 }
125
126 if (free_slot != -1) {
127 WARN_ON(vc4->bo_labels[free_slot].num_allocated != 0);
128 vc4->bo_labels[free_slot].name = name;
129 return free_slot;
130 } else {
131 u32 new_label_count = vc4->num_labels + 1;
132 struct vc4_label *new_labels =
133 krealloc(vc4->bo_labels,
134 new_label_count * sizeof(*new_labels),
135 GFP_KERNEL);
136
137 if (!new_labels) {
138 kfree(name);
139 return -1;
140 }
141
142 free_slot = vc4->num_labels;
143 vc4->bo_labels = new_labels;
144 vc4->num_labels = new_label_count;
145
146 vc4->bo_labels[free_slot].name = name;
147 vc4->bo_labels[free_slot].num_allocated = 0;
148 vc4->bo_labels[free_slot].size_allocated = 0;
149
150 return free_slot;
151 }
152}
153
154static void vc4_bo_set_label(struct drm_gem_object *gem_obj, int label)
155{
156 struct vc4_bo *bo = to_vc4_bo(gem_obj);
157 struct vc4_dev *vc4 = to_vc4_dev(gem_obj->dev);
158
159 lockdep_assert_held(&vc4->bo_lock);
160
161 if (label != -1) {
162 vc4->bo_labels[label].num_allocated++;
163 vc4->bo_labels[label].size_allocated += gem_obj->size;
164 }
165
166 vc4->bo_labels[bo->label].num_allocated--;
167 vc4->bo_labels[bo->label].size_allocated -= gem_obj->size;
168
169 if (vc4->bo_labels[bo->label].num_allocated == 0 &&
170 is_user_label(bo->label)) {
171 /* Free user BO label slots on last unreference.
172 * Slots are just where we track the stats for a given
173 * name, and once a name is unused we can reuse that
174 * slot.
175 */
176 kfree(vc4->bo_labels[bo->label].name);
177 vc4->bo_labels[bo->label].name = NULL;
178 }
179
180 bo->label = label;
181}
182
c826a6e1
EA
183static uint32_t bo_page_index(size_t size)
184{
185 return (size / PAGE_SIZE) - 1;
186}
187
c826a6e1 188static void vc4_bo_destroy(struct vc4_bo *bo)
c8b75bca 189{
c826a6e1 190 struct drm_gem_object *obj = &bo->base.base;
4e6b1e91
EA
191 struct vc4_dev *vc4 = to_vc4_dev(obj->dev);
192
193 lockdep_assert_held(&vc4->bo_lock);
f3099462
EA
194
195 vc4_bo_set_label(obj, -1);
c826a6e1 196
463873d5 197 if (bo->validated_shader) {
c0db1b67 198 kfree(bo->validated_shader->uniform_addr_offsets);
463873d5
EA
199 kfree(bo->validated_shader->texture_samples);
200 kfree(bo->validated_shader);
201 bo->validated_shader = NULL;
202 }
203
24bb206f 204 reservation_object_fini(&bo->_resv);
cdec4d36 205
c826a6e1
EA
206 drm_gem_cma_free_object(obj);
207}
208
c826a6e1
EA
209static void vc4_bo_remove_from_cache(struct vc4_bo *bo)
210{
4e6b1e91
EA
211 struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev);
212
213 lockdep_assert_held(&vc4->bo_lock);
c826a6e1
EA
214 list_del(&bo->unref_head);
215 list_del(&bo->size_head);
216}
217
218static struct list_head *vc4_get_cache_list_for_size(struct drm_device *dev,
219 size_t size)
220{
221 struct vc4_dev *vc4 = to_vc4_dev(dev);
222 uint32_t page_index = bo_page_index(size);
223
224 if (vc4->bo_cache.size_list_size <= page_index) {
225 uint32_t new_size = max(vc4->bo_cache.size_list_size * 2,
226 page_index + 1);
227 struct list_head *new_list;
228 uint32_t i;
229
230 new_list = kmalloc_array(new_size, sizeof(struct list_head),
231 GFP_KERNEL);
232 if (!new_list)
233 return NULL;
234
235 /* Rebase the old cached BO lists to their new list
236 * head locations.
237 */
238 for (i = 0; i < vc4->bo_cache.size_list_size; i++) {
239 struct list_head *old_list =
240 &vc4->bo_cache.size_list[i];
241
242 if (list_empty(old_list))
243 INIT_LIST_HEAD(&new_list[i]);
244 else
245 list_replace(old_list, &new_list[i]);
246 }
247 /* And initialize the brand new BO list heads. */
248 for (i = vc4->bo_cache.size_list_size; i < new_size; i++)
249 INIT_LIST_HEAD(&new_list[i]);
250
251 kfree(vc4->bo_cache.size_list);
252 vc4->bo_cache.size_list = new_list;
253 vc4->bo_cache.size_list_size = new_size;
254 }
255
256 return &vc4->bo_cache.size_list[page_index];
257}
258
ea903838 259static void vc4_bo_cache_purge(struct drm_device *dev)
c826a6e1
EA
260{
261 struct vc4_dev *vc4 = to_vc4_dev(dev);
262
263 mutex_lock(&vc4->bo_lock);
264 while (!list_empty(&vc4->bo_cache.time_list)) {
265 struct vc4_bo *bo = list_last_entry(&vc4->bo_cache.time_list,
266 struct vc4_bo, unref_head);
267 vc4_bo_remove_from_cache(bo);
268 vc4_bo_destroy(bo);
269 }
270 mutex_unlock(&vc4->bo_lock);
271}
272
b9f19259
BB
273void vc4_bo_add_to_purgeable_pool(struct vc4_bo *bo)
274{
275 struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev);
276
277 mutex_lock(&vc4->purgeable.lock);
278 list_add_tail(&bo->size_head, &vc4->purgeable.list);
279 vc4->purgeable.num++;
280 vc4->purgeable.size += bo->base.base.size;
281 mutex_unlock(&vc4->purgeable.lock);
282}
283
284static void vc4_bo_remove_from_purgeable_pool_locked(struct vc4_bo *bo)
285{
286 struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev);
287
288 /* list_del_init() is used here because the caller might release
289 * the purgeable lock in order to acquire the madv one and update the
290 * madv status.
291 * During this short period of time a user might decide to mark
292 * the BO as unpurgeable, and if bo->madv is set to
293 * VC4_MADV_DONTNEED it will try to remove the BO from the
294 * purgeable list which will fail if the ->next/prev fields
295 * are set to LIST_POISON1/LIST_POISON2 (which is what
296 * list_del() does).
297 * Re-initializing the list element guarantees that list_del()
298 * will work correctly even if it's a NOP.
299 */
300 list_del_init(&bo->size_head);
301 vc4->purgeable.num--;
302 vc4->purgeable.size -= bo->base.base.size;
303}
304
305void vc4_bo_remove_from_purgeable_pool(struct vc4_bo *bo)
306{
307 struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev);
308
309 mutex_lock(&vc4->purgeable.lock);
310 vc4_bo_remove_from_purgeable_pool_locked(bo);
311 mutex_unlock(&vc4->purgeable.lock);
312}
313
314static void vc4_bo_purge(struct drm_gem_object *obj)
315{
316 struct vc4_bo *bo = to_vc4_bo(obj);
317 struct drm_device *dev = obj->dev;
318
319 WARN_ON(!mutex_is_locked(&bo->madv_lock));
320 WARN_ON(bo->madv != VC4_MADV_DONTNEED);
321
322 drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
323
324 dma_free_wc(dev->dev, obj->size, bo->base.vaddr, bo->base.paddr);
325 bo->base.vaddr = NULL;
326 bo->madv = __VC4_MADV_PURGED;
327}
328
329static void vc4_bo_userspace_cache_purge(struct drm_device *dev)
330{
331 struct vc4_dev *vc4 = to_vc4_dev(dev);
332
333 mutex_lock(&vc4->purgeable.lock);
334 while (!list_empty(&vc4->purgeable.list)) {
335 struct vc4_bo *bo = list_first_entry(&vc4->purgeable.list,
336 struct vc4_bo, size_head);
337 struct drm_gem_object *obj = &bo->base.base;
338 size_t purged_size = 0;
339
340 vc4_bo_remove_from_purgeable_pool_locked(bo);
341
342 /* Release the purgeable lock while we're purging the BO so
343 * that other people can continue inserting things in the
344 * purgeable pool without having to wait for all BOs to be
345 * purged.
346 */
347 mutex_unlock(&vc4->purgeable.lock);
348 mutex_lock(&bo->madv_lock);
349
350 /* Since we released the purgeable pool lock before acquiring
351 * the BO madv one, the user may have marked the BO as WILLNEED
352 * and re-used it in the meantime.
353 * Before purging the BO we need to make sure
354 * - it is still marked as DONTNEED
355 * - it has not been re-inserted in the purgeable list
356 * - it is not used by HW blocks
357 * If one of these conditions is not met, just skip the entry.
358 */
359 if (bo->madv == VC4_MADV_DONTNEED &&
360 list_empty(&bo->size_head) &&
361 !refcount_read(&bo->usecnt)) {
362 purged_size = bo->base.base.size;
363 vc4_bo_purge(obj);
364 }
365 mutex_unlock(&bo->madv_lock);
366 mutex_lock(&vc4->purgeable.lock);
367
368 if (purged_size) {
369 vc4->purgeable.purged_size += purged_size;
370 vc4->purgeable.purged_num++;
371 }
372 }
373 mutex_unlock(&vc4->purgeable.lock);
374}
375
c826a6e1 376static struct vc4_bo *vc4_bo_get_from_cache(struct drm_device *dev,
f3099462
EA
377 uint32_t size,
378 enum vc4_kernel_bo_type type)
c826a6e1
EA
379{
380 struct vc4_dev *vc4 = to_vc4_dev(dev);
381 uint32_t page_index = bo_page_index(size);
382 struct vc4_bo *bo = NULL;
383
384 size = roundup(size, PAGE_SIZE);
385
386 mutex_lock(&vc4->bo_lock);
387 if (page_index >= vc4->bo_cache.size_list_size)
388 goto out;
389
390 if (list_empty(&vc4->bo_cache.size_list[page_index]))
391 goto out;
392
393 bo = list_first_entry(&vc4->bo_cache.size_list[page_index],
394 struct vc4_bo, size_head);
395 vc4_bo_remove_from_cache(bo);
396 kref_init(&bo->base.base.refcount);
397
398out:
f3099462
EA
399 if (bo)
400 vc4_bo_set_label(&bo->base.base, type);
c826a6e1
EA
401 mutex_unlock(&vc4->bo_lock);
402 return bo;
403}
404
405/**
406 * vc4_gem_create_object - Implementation of driver->gem_create_object.
72f793f1
EA
407 * @dev: DRM device
408 * @size: Size in bytes of the memory the object will reference
c826a6e1
EA
409 *
410 * This lets the CMA helpers allocate object structs for us, and keep
411 * our BO stats correct.
412 */
413struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size)
414{
415 struct vc4_dev *vc4 = to_vc4_dev(dev);
416 struct vc4_bo *bo;
417
418 bo = kzalloc(sizeof(*bo), GFP_KERNEL);
419 if (!bo)
420 return ERR_PTR(-ENOMEM);
421
b9f19259
BB
422 bo->madv = VC4_MADV_WILLNEED;
423 refcount_set(&bo->usecnt, 0);
424 mutex_init(&bo->madv_lock);
c826a6e1 425 mutex_lock(&vc4->bo_lock);
f3099462
EA
426 bo->label = VC4_BO_TYPE_KERNEL;
427 vc4->bo_labels[VC4_BO_TYPE_KERNEL].num_allocated++;
428 vc4->bo_labels[VC4_BO_TYPE_KERNEL].size_allocated += size;
c826a6e1 429 mutex_unlock(&vc4->bo_lock);
24bb206f
HV
430 bo->resv = &bo->_resv;
431 reservation_object_init(bo->resv);
c826a6e1
EA
432
433 return &bo->base.base;
434}
435
436struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t unaligned_size,
f3099462 437 bool allow_unzeroed, enum vc4_kernel_bo_type type)
c826a6e1
EA
438{
439 size_t size = roundup(unaligned_size, PAGE_SIZE);
440 struct vc4_dev *vc4 = to_vc4_dev(dev);
c8b75bca 441 struct drm_gem_cma_object *cma_obj;
eb981383 442 struct vc4_bo *bo;
c8b75bca 443
c826a6e1 444 if (size == 0)
2c68f1fc 445 return ERR_PTR(-EINVAL);
c826a6e1
EA
446
447 /* First, try to get a vc4_bo from the kernel BO cache. */
f3099462 448 bo = vc4_bo_get_from_cache(dev, size, type);
eb981383
EA
449 if (bo) {
450 if (!allow_unzeroed)
451 memset(bo->base.vaddr, 0, bo->base.base.size);
452 return bo;
c826a6e1
EA
453 }
454
455 cma_obj = drm_gem_cma_create(dev, size);
456 if (IS_ERR(cma_obj)) {
457 /*
458 * If we've run out of CMA memory, kill the cache of
459 * CMA allocations we've got laying around and try again.
460 */
461 vc4_bo_cache_purge(dev);
b9f19259
BB
462 cma_obj = drm_gem_cma_create(dev, size);
463 }
c826a6e1 464
b9f19259
BB
465 if (IS_ERR(cma_obj)) {
466 /*
467 * Still not enough CMA memory, purge the userspace BO
468 * cache and retry.
469 * This is sub-optimal since we purge the whole userspace
470 * BO cache which forces user that want to re-use the BO to
471 * restore its initial content.
472 * Ideally, we should purge entries one by one and retry
473 * after each to see if CMA allocation succeeds. Or even
474 * better, try to find an entry with at least the same
475 * size.
476 */
477 vc4_bo_userspace_cache_purge(dev);
c826a6e1 478 cma_obj = drm_gem_cma_create(dev, size);
b9f19259
BB
479 }
480
481 if (IS_ERR(cma_obj)) {
482 DRM_ERROR("Failed to allocate from CMA:\n");
483 vc4_bo_stats_dump(vc4);
484 return ERR_PTR(-ENOMEM);
c826a6e1 485 }
f3099462
EA
486 bo = to_vc4_bo(&cma_obj->base);
487
b9f19259
BB
488 /* By default, BOs do not support the MADV ioctl. This will be enabled
489 * only on BOs that are exposed to userspace (V3D, V3D_SHADER and DUMB
490 * BOs).
491 */
492 bo->madv = __VC4_MADV_NOTSUPP;
493
f3099462
EA
494 mutex_lock(&vc4->bo_lock);
495 vc4_bo_set_label(&cma_obj->base, type);
496 mutex_unlock(&vc4->bo_lock);
497
498 return bo;
c8b75bca
EA
499}
500
501int vc4_dumb_create(struct drm_file *file_priv,
502 struct drm_device *dev,
503 struct drm_mode_create_dumb *args)
504{
505 int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
506 struct vc4_bo *bo = NULL;
507 int ret;
508
509 if (args->pitch < min_pitch)
510 args->pitch = min_pitch;
511
512 if (args->size < args->pitch * args->height)
513 args->size = args->pitch * args->height;
514
f3099462 515 bo = vc4_bo_create(dev, args->size, false, VC4_BO_TYPE_DUMB);
2c68f1fc
EA
516 if (IS_ERR(bo))
517 return PTR_ERR(bo);
c8b75bca 518
b9f19259
BB
519 bo->madv = VC4_MADV_WILLNEED;
520
c8b75bca 521 ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
1d5494e9 522 drm_gem_object_put_unlocked(&bo->base.base);
c8b75bca
EA
523
524 return ret;
525}
c826a6e1 526
c826a6e1
EA
527static void vc4_bo_cache_free_old(struct drm_device *dev)
528{
529 struct vc4_dev *vc4 = to_vc4_dev(dev);
530 unsigned long expire_time = jiffies - msecs_to_jiffies(1000);
531
4e6b1e91
EA
532 lockdep_assert_held(&vc4->bo_lock);
533
c826a6e1
EA
534 while (!list_empty(&vc4->bo_cache.time_list)) {
535 struct vc4_bo *bo = list_last_entry(&vc4->bo_cache.time_list,
536 struct vc4_bo, unref_head);
537 if (time_before(expire_time, bo->free_time)) {
538 mod_timer(&vc4->bo_cache.time_timer,
539 round_jiffies_up(jiffies +
540 msecs_to_jiffies(1000)));
541 return;
542 }
543
544 vc4_bo_remove_from_cache(bo);
545 vc4_bo_destroy(bo);
546 }
547}
548
549/* Called on the last userspace/kernel unreference of the BO. Returns
550 * it to the BO cache if possible, otherwise frees it.
c826a6e1
EA
551 */
552void vc4_free_object(struct drm_gem_object *gem_bo)
553{
554 struct drm_device *dev = gem_bo->dev;
555 struct vc4_dev *vc4 = to_vc4_dev(dev);
556 struct vc4_bo *bo = to_vc4_bo(gem_bo);
557 struct list_head *cache_list;
558
b9f19259
BB
559 /* Remove the BO from the purgeable list. */
560 mutex_lock(&bo->madv_lock);
561 if (bo->madv == VC4_MADV_DONTNEED && !refcount_read(&bo->usecnt))
562 vc4_bo_remove_from_purgeable_pool(bo);
563 mutex_unlock(&bo->madv_lock);
564
c826a6e1
EA
565 mutex_lock(&vc4->bo_lock);
566 /* If the object references someone else's memory, we can't cache it.
567 */
568 if (gem_bo->import_attach) {
569 vc4_bo_destroy(bo);
570 goto out;
571 }
572
573 /* Don't cache if it was publicly named. */
574 if (gem_bo->name) {
575 vc4_bo_destroy(bo);
576 goto out;
577 }
578
ca39b449 579 /* If this object was partially constructed but CMA allocation
b9f19259
BB
580 * had failed, just free it. Can also happen when the BO has been
581 * purged.
ca39b449
EA
582 */
583 if (!bo->base.vaddr) {
584 vc4_bo_destroy(bo);
585 goto out;
586 }
587
c826a6e1
EA
588 cache_list = vc4_get_cache_list_for_size(dev, gem_bo->size);
589 if (!cache_list) {
590 vc4_bo_destroy(bo);
591 goto out;
592 }
593
463873d5 594 if (bo->validated_shader) {
c0db1b67 595 kfree(bo->validated_shader->uniform_addr_offsets);
463873d5
EA
596 kfree(bo->validated_shader->texture_samples);
597 kfree(bo->validated_shader);
598 bo->validated_shader = NULL;
599 }
600
b9f19259
BB
601 /* Reset madv and usecnt before adding the BO to the cache. */
602 bo->madv = __VC4_MADV_NOTSUPP;
603 refcount_set(&bo->usecnt, 0);
604
83753117 605 bo->t_format = false;
c826a6e1
EA
606 bo->free_time = jiffies;
607 list_add(&bo->size_head, cache_list);
608 list_add(&bo->unref_head, &vc4->bo_cache.time_list);
609
f3099462 610 vc4_bo_set_label(&bo->base.base, VC4_BO_TYPE_KERNEL_CACHE);
c826a6e1
EA
611
612 vc4_bo_cache_free_old(dev);
613
614out:
615 mutex_unlock(&vc4->bo_lock);
616}
617
618static void vc4_bo_cache_time_work(struct work_struct *work)
619{
620 struct vc4_dev *vc4 =
621 container_of(work, struct vc4_dev, bo_cache.time_work);
622 struct drm_device *dev = vc4->dev;
623
624 mutex_lock(&vc4->bo_lock);
625 vc4_bo_cache_free_old(dev);
626 mutex_unlock(&vc4->bo_lock);
627}
628
b9f19259
BB
629int vc4_bo_inc_usecnt(struct vc4_bo *bo)
630{
631 int ret;
632
633 /* Fast path: if the BO is already retained by someone, no need to
634 * check the madv status.
635 */
636 if (refcount_inc_not_zero(&bo->usecnt))
637 return 0;
638
639 mutex_lock(&bo->madv_lock);
640 switch (bo->madv) {
641 case VC4_MADV_WILLNEED:
5bfd4013
BB
642 if (!refcount_inc_not_zero(&bo->usecnt))
643 refcount_set(&bo->usecnt, 1);
b9f19259
BB
644 ret = 0;
645 break;
646 case VC4_MADV_DONTNEED:
647 /* We shouldn't use a BO marked as purgeable if at least
648 * someone else retained its content by incrementing usecnt.
649 * Luckily the BO hasn't been purged yet, but something wrong
650 * is happening here. Just throw an error instead of
651 * authorizing this use case.
652 */
653 case __VC4_MADV_PURGED:
654 /* We can't use a purged BO. */
655 default:
656 /* Invalid madv value. */
657 ret = -EINVAL;
658 break;
659 }
660 mutex_unlock(&bo->madv_lock);
661
662 return ret;
663}
664
665void vc4_bo_dec_usecnt(struct vc4_bo *bo)
666{
667 /* Fast path: if the BO is still retained by someone, no need to test
668 * the madv value.
669 */
670 if (refcount_dec_not_one(&bo->usecnt))
671 return;
672
673 mutex_lock(&bo->madv_lock);
674 if (refcount_dec_and_test(&bo->usecnt) &&
675 bo->madv == VC4_MADV_DONTNEED)
676 vc4_bo_add_to_purgeable_pool(bo);
677 mutex_unlock(&bo->madv_lock);
678}
679
0078730f 680static void vc4_bo_cache_time_timer(struct timer_list *t)
c826a6e1 681{
0078730f 682 struct vc4_dev *vc4 = from_timer(vc4, t, bo_cache.time_timer);
c826a6e1
EA
683
684 schedule_work(&vc4->bo_cache.time_work);
685}
686
cdec4d36
EA
687struct reservation_object *vc4_prime_res_obj(struct drm_gem_object *obj)
688{
689 struct vc4_bo *bo = to_vc4_bo(obj);
690
691 return bo->resv;
692}
693
463873d5
EA
694struct dma_buf *
695vc4_prime_export(struct drm_device *dev, struct drm_gem_object *obj, int flags)
696{
697 struct vc4_bo *bo = to_vc4_bo(obj);
b9f19259
BB
698 struct dma_buf *dmabuf;
699 int ret;
463873d5
EA
700
701 if (bo->validated_shader) {
fb95992a 702 DRM_DEBUG("Attempting to export shader BO\n");
463873d5
EA
703 return ERR_PTR(-EINVAL);
704 }
705
b9f19259
BB
706 /* Note: as soon as the BO is exported it becomes unpurgeable, because
707 * noone ever decrements the usecnt even if the reference held by the
708 * exported BO is released. This shouldn't be a problem since we don't
709 * expect exported BOs to be marked as purgeable.
710 */
711 ret = vc4_bo_inc_usecnt(bo);
712 if (ret) {
713 DRM_ERROR("Failed to increment BO usecnt\n");
714 return ERR_PTR(ret);
715 }
716
717 dmabuf = drm_gem_prime_export(dev, obj, flags);
718 if (IS_ERR(dmabuf))
719 vc4_bo_dec_usecnt(bo);
720
721 return dmabuf;
722}
723
abd7dbe9 724vm_fault_t vc4_fault(struct vm_fault *vmf)
b9f19259
BB
725{
726 struct vm_area_struct *vma = vmf->vma;
727 struct drm_gem_object *obj = vma->vm_private_data;
728 struct vc4_bo *bo = to_vc4_bo(obj);
729
730 /* The only reason we would end up here is when user-space accesses
731 * BO's memory after it's been purged.
732 */
733 mutex_lock(&bo->madv_lock);
734 WARN_ON(bo->madv != __VC4_MADV_PURGED);
735 mutex_unlock(&bo->madv_lock);
736
737 return VM_FAULT_SIGBUS;
463873d5
EA
738}
739
740int vc4_mmap(struct file *filp, struct vm_area_struct *vma)
741{
742 struct drm_gem_object *gem_obj;
b9f19259 743 unsigned long vm_pgoff;
463873d5
EA
744 struct vc4_bo *bo;
745 int ret;
746
747 ret = drm_gem_mmap(filp, vma);
748 if (ret)
749 return ret;
750
751 gem_obj = vma->vm_private_data;
752 bo = to_vc4_bo(gem_obj);
753
754 if (bo->validated_shader && (vma->vm_flags & VM_WRITE)) {
fb95992a 755 DRM_DEBUG("mmaping of shader BOs for writing not allowed.\n");
463873d5
EA
756 return -EINVAL;
757 }
758
b9f19259
BB
759 if (bo->madv != VC4_MADV_WILLNEED) {
760 DRM_DEBUG("mmaping of %s BO not allowed\n",
761 bo->madv == VC4_MADV_DONTNEED ?
762 "purgeable" : "purged");
763 return -EINVAL;
764 }
765
463873d5
EA
766 /*
767 * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the
768 * vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map
769 * the whole buffer.
770 */
771 vma->vm_flags &= ~VM_PFNMAP;
463873d5 772
b9f19259
BB
773 /* This ->vm_pgoff dance is needed to make all parties happy:
774 * - dma_mmap_wc() uses ->vm_pgoff as an offset within the allocated
775 * mem-region, hence the need to set it to zero (the value set by
776 * the DRM core is a virtual offset encoding the GEM object-id)
777 * - the mmap() core logic needs ->vm_pgoff to be restored to its
778 * initial value before returning from this function because it
779 * encodes the offset of this GEM in the dev->anon_inode pseudo-file
780 * and this information will be used when we invalidate userspace
781 * mappings with drm_vma_node_unmap() (called from vc4_gem_purge()).
782 */
783 vm_pgoff = vma->vm_pgoff;
784 vma->vm_pgoff = 0;
f6e45661
LR
785 ret = dma_mmap_wc(bo->base.base.dev->dev, vma, bo->base.vaddr,
786 bo->base.paddr, vma->vm_end - vma->vm_start);
b9f19259
BB
787 vma->vm_pgoff = vm_pgoff;
788
463873d5
EA
789 if (ret)
790 drm_gem_vm_close(vma);
791
792 return ret;
793}
794
795int vc4_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
796{
797 struct vc4_bo *bo = to_vc4_bo(obj);
798
799 if (bo->validated_shader && (vma->vm_flags & VM_WRITE)) {
fb95992a 800 DRM_DEBUG("mmaping of shader BOs for writing not allowed.\n");
463873d5
EA
801 return -EINVAL;
802 }
803
804 return drm_gem_cma_prime_mmap(obj, vma);
805}
806
807void *vc4_prime_vmap(struct drm_gem_object *obj)
808{
809 struct vc4_bo *bo = to_vc4_bo(obj);
810
811 if (bo->validated_shader) {
fb95992a 812 DRM_DEBUG("mmaping of shader BOs not allowed.\n");
463873d5
EA
813 return ERR_PTR(-EINVAL);
814 }
815
816 return drm_gem_cma_prime_vmap(obj);
817}
818
cdec4d36
EA
819struct drm_gem_object *
820vc4_prime_import_sg_table(struct drm_device *dev,
821 struct dma_buf_attachment *attach,
822 struct sg_table *sgt)
823{
824 struct drm_gem_object *obj;
825 struct vc4_bo *bo;
826
827 obj = drm_gem_cma_prime_import_sg_table(dev, attach, sgt);
828 if (IS_ERR(obj))
829 return obj;
830
831 bo = to_vc4_bo(obj);
832 bo->resv = attach->dmabuf->resv;
833
834 return obj;
835}
836
d5bc60f6
EA
837int vc4_create_bo_ioctl(struct drm_device *dev, void *data,
838 struct drm_file *file_priv)
839{
840 struct drm_vc4_create_bo *args = data;
841 struct vc4_bo *bo = NULL;
842 int ret;
843
844 /*
845 * We can't allocate from the BO cache, because the BOs don't
846 * get zeroed, and that might leak data between users.
847 */
f3099462 848 bo = vc4_bo_create(dev, args->size, false, VC4_BO_TYPE_V3D);
2c68f1fc
EA
849 if (IS_ERR(bo))
850 return PTR_ERR(bo);
d5bc60f6 851
b9f19259
BB
852 bo->madv = VC4_MADV_WILLNEED;
853
d5bc60f6 854 ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
1d5494e9 855 drm_gem_object_put_unlocked(&bo->base.base);
d5bc60f6
EA
856
857 return ret;
858}
859
860int vc4_mmap_bo_ioctl(struct drm_device *dev, void *data,
861 struct drm_file *file_priv)
862{
863 struct drm_vc4_mmap_bo *args = data;
864 struct drm_gem_object *gem_obj;
865
a8ad0bd8 866 gem_obj = drm_gem_object_lookup(file_priv, args->handle);
d5bc60f6 867 if (!gem_obj) {
fb95992a 868 DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
d5bc60f6
EA
869 return -EINVAL;
870 }
871
872 /* The mmap offset was set up at BO allocation time. */
873 args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
874
1d5494e9 875 drm_gem_object_put_unlocked(gem_obj);
d5bc60f6
EA
876 return 0;
877}
878
463873d5
EA
879int
880vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data,
881 struct drm_file *file_priv)
882{
883 struct drm_vc4_create_shader_bo *args = data;
884 struct vc4_bo *bo = NULL;
885 int ret;
886
887 if (args->size == 0)
888 return -EINVAL;
889
890 if (args->size % sizeof(u64) != 0)
891 return -EINVAL;
892
893 if (args->flags != 0) {
894 DRM_INFO("Unknown flags set: 0x%08x\n", args->flags);
895 return -EINVAL;
896 }
897
898 if (args->pad != 0) {
899 DRM_INFO("Pad set: 0x%08x\n", args->pad);
900 return -EINVAL;
901 }
902
f3099462 903 bo = vc4_bo_create(dev, args->size, true, VC4_BO_TYPE_V3D_SHADER);
2c68f1fc
EA
904 if (IS_ERR(bo))
905 return PTR_ERR(bo);
463873d5 906
b9f19259
BB
907 bo->madv = VC4_MADV_WILLNEED;
908
585cb132 909 if (copy_from_user(bo->base.vaddr,
463873d5 910 (void __user *)(uintptr_t)args->data,
585cb132
DC
911 args->size)) {
912 ret = -EFAULT;
463873d5 913 goto fail;
585cb132 914 }
463873d5
EA
915 /* Clear the rest of the memory from allocating from the BO
916 * cache.
917 */
918 memset(bo->base.vaddr + args->size, 0,
919 bo->base.base.size - args->size);
920
921 bo->validated_shader = vc4_validate_shader(&bo->base);
922 if (!bo->validated_shader) {
923 ret = -EINVAL;
924 goto fail;
925 }
926
927 /* We have to create the handle after validation, to avoid
928 * races for users to do doing things like mmap the shader BO.
929 */
930 ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
931
932 fail:
1d5494e9 933 drm_gem_object_put_unlocked(&bo->base.base);
463873d5
EA
934
935 return ret;
936}
937
83753117
EA
938/**
939 * vc4_set_tiling_ioctl() - Sets the tiling modifier for a BO.
940 * @dev: DRM device
941 * @data: ioctl argument
942 * @file_priv: DRM file for this fd
943 *
944 * The tiling state of the BO decides the default modifier of an fb if
945 * no specific modifier was set by userspace, and the return value of
946 * vc4_get_tiling_ioctl() (so that userspace can treat a BO it
947 * received from dmabuf as the same tiling format as the producer
948 * used).
949 */
950int vc4_set_tiling_ioctl(struct drm_device *dev, void *data,
951 struct drm_file *file_priv)
952{
953 struct drm_vc4_set_tiling *args = data;
954 struct drm_gem_object *gem_obj;
955 struct vc4_bo *bo;
956 bool t_format;
957
958 if (args->flags != 0)
959 return -EINVAL;
960
961 switch (args->modifier) {
962 case DRM_FORMAT_MOD_NONE:
963 t_format = false;
964 break;
965 case DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED:
966 t_format = true;
967 break;
968 default:
969 return -EINVAL;
970 }
971
972 gem_obj = drm_gem_object_lookup(file_priv, args->handle);
973 if (!gem_obj) {
fb95992a 974 DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
83753117
EA
975 return -ENOENT;
976 }
977 bo = to_vc4_bo(gem_obj);
978 bo->t_format = t_format;
979
1d5494e9 980 drm_gem_object_put_unlocked(gem_obj);
83753117
EA
981
982 return 0;
983}
984
985/**
986 * vc4_get_tiling_ioctl() - Gets the tiling modifier for a BO.
987 * @dev: DRM device
988 * @data: ioctl argument
989 * @file_priv: DRM file for this fd
990 *
991 * Returns the tiling modifier for a BO as set by vc4_set_tiling_ioctl().
992 */
993int vc4_get_tiling_ioctl(struct drm_device *dev, void *data,
994 struct drm_file *file_priv)
995{
996 struct drm_vc4_get_tiling *args = data;
997 struct drm_gem_object *gem_obj;
998 struct vc4_bo *bo;
999
1000 if (args->flags != 0 || args->modifier != 0)
1001 return -EINVAL;
1002
1003 gem_obj = drm_gem_object_lookup(file_priv, args->handle);
1004 if (!gem_obj) {
fb95992a 1005 DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
83753117
EA
1006 return -ENOENT;
1007 }
1008 bo = to_vc4_bo(gem_obj);
1009
1010 if (bo->t_format)
1011 args->modifier = DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED;
1012 else
1013 args->modifier = DRM_FORMAT_MOD_NONE;
1014
1d5494e9 1015 drm_gem_object_put_unlocked(gem_obj);
83753117
EA
1016
1017 return 0;
1018}
1019
f3099462 1020int vc4_bo_cache_init(struct drm_device *dev)
c826a6e1
EA
1021{
1022 struct vc4_dev *vc4 = to_vc4_dev(dev);
f3099462
EA
1023 int i;
1024
1025 /* Create the initial set of BO labels that the kernel will
1026 * use. This lets us avoid a bunch of string reallocation in
1027 * the kernel's draw and BO allocation paths.
1028 */
1029 vc4->bo_labels = kcalloc(VC4_BO_TYPE_COUNT, sizeof(*vc4->bo_labels),
1030 GFP_KERNEL);
1031 if (!vc4->bo_labels)
1032 return -ENOMEM;
1033 vc4->num_labels = VC4_BO_TYPE_COUNT;
1034
1035 BUILD_BUG_ON(ARRAY_SIZE(bo_type_names) != VC4_BO_TYPE_COUNT);
1036 for (i = 0; i < VC4_BO_TYPE_COUNT; i++)
1037 vc4->bo_labels[i].name = bo_type_names[i];
c826a6e1
EA
1038
1039 mutex_init(&vc4->bo_lock);
1040
1041 INIT_LIST_HEAD(&vc4->bo_cache.time_list);
1042
1043 INIT_WORK(&vc4->bo_cache.time_work, vc4_bo_cache_time_work);
0078730f 1044 timer_setup(&vc4->bo_cache.time_timer, vc4_bo_cache_time_timer, 0);
f3099462
EA
1045
1046 return 0;
c826a6e1
EA
1047}
1048
1049void vc4_bo_cache_destroy(struct drm_device *dev)
1050{
1051 struct vc4_dev *vc4 = to_vc4_dev(dev);
f3099462 1052 int i;
c826a6e1
EA
1053
1054 del_timer(&vc4->bo_cache.time_timer);
1055 cancel_work_sync(&vc4->bo_cache.time_work);
1056
1057 vc4_bo_cache_purge(dev);
1058
f3099462
EA
1059 for (i = 0; i < vc4->num_labels; i++) {
1060 if (vc4->bo_labels[i].num_allocated) {
1061 DRM_ERROR("Destroying BO cache with %d %s "
1062 "BOs still allocated\n",
1063 vc4->bo_labels[i].num_allocated,
1064 vc4->bo_labels[i].name);
1065 }
1066
1067 if (is_user_label(i))
1068 kfree(vc4->bo_labels[i].name);
c826a6e1 1069 }
f3099462
EA
1070 kfree(vc4->bo_labels);
1071}
1072
1073int vc4_label_bo_ioctl(struct drm_device *dev, void *data,
1074 struct drm_file *file_priv)
1075{
1076 struct vc4_dev *vc4 = to_vc4_dev(dev);
1077 struct drm_vc4_label_bo *args = data;
1078 char *name;
1079 struct drm_gem_object *gem_obj;
1080 int ret = 0, label;
1081
1082 if (!args->len)
1083 return -EINVAL;
1084
1085 name = strndup_user(u64_to_user_ptr(args->name), args->len + 1);
1086 if (IS_ERR(name))
1087 return PTR_ERR(name);
1088
1089 gem_obj = drm_gem_object_lookup(file_priv, args->handle);
1090 if (!gem_obj) {
1091 DRM_ERROR("Failed to look up GEM BO %d\n", args->handle);
1092 kfree(name);
1093 return -ENOENT;
1094 }
1095
1096 mutex_lock(&vc4->bo_lock);
1097 label = vc4_get_user_label(vc4, name);
1098 if (label != -1)
1099 vc4_bo_set_label(gem_obj, label);
1100 else
1101 ret = -ENOMEM;
1102 mutex_unlock(&vc4->bo_lock);
1103
b9c55b6e 1104 drm_gem_object_put_unlocked(gem_obj);
f3099462
EA
1105
1106 return ret;
c826a6e1 1107}