2 * Copyright © 2015 Broadcom
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
10 * DOC: VC4 GEM BO management support
12 * The VC4 GPU architecture (both scanout and rendering) has direct
13 * access to system memory with no MMU in between. To support it, we
14 * use the GEM CMA helper functions to allocate contiguous ranges of
15 * physical memory for our BOs.
17 * Since the CMA allocator is very slow, we keep a cache of recently
18 * freed BOs around so that the kernel's allocation of objects for 3D
19 * rendering can return quickly.
22 #include <linux/dma-buf.h>
25 #include "uapi/drm/vc4_drm.h"
27 static const char * const bo_type_names
[] = {
38 static bool is_user_label(int label
)
40 return label
>= VC4_BO_TYPE_COUNT
;
43 static void vc4_bo_stats_print(struct drm_printer
*p
, struct vc4_dev
*vc4
)
47 for (i
= 0; i
< vc4
->num_labels
; i
++) {
48 if (!vc4
->bo_labels
[i
].num_allocated
)
51 drm_printf(p
, "%30s: %6dkb BOs (%d)\n",
52 vc4
->bo_labels
[i
].name
,
53 vc4
->bo_labels
[i
].size_allocated
/ 1024,
54 vc4
->bo_labels
[i
].num_allocated
);
57 mutex_lock(&vc4
->purgeable
.lock
);
58 if (vc4
->purgeable
.num
)
59 drm_printf(p
, "%30s: %6zdkb BOs (%d)\n", "userspace BO cache",
60 vc4
->purgeable
.size
/ 1024, vc4
->purgeable
.num
);
62 if (vc4
->purgeable
.purged_num
)
63 drm_printf(p
, "%30s: %6zdkb BOs (%d)\n", "total purged BO",
64 vc4
->purgeable
.purged_size
/ 1024,
65 vc4
->purgeable
.purged_num
);
66 mutex_unlock(&vc4
->purgeable
.lock
);
69 static int vc4_bo_stats_debugfs(struct seq_file
*m
, void *unused
)
71 struct drm_info_node
*node
= (struct drm_info_node
*)m
->private;
72 struct drm_device
*dev
= node
->minor
->dev
;
73 struct vc4_dev
*vc4
= to_vc4_dev(dev
);
74 struct drm_printer p
= drm_seq_file_printer(m
);
76 vc4_bo_stats_print(&p
, vc4
);
81 /* Takes ownership of *name and returns the appropriate slot for it in
82 * the bo_labels[] array, extending it as necessary.
84 * This is inefficient and could use a hash table instead of walking
85 * an array and strcmp()ing. However, the assumption is that user
86 * labeling will be infrequent (scanout buffers and other long-lived
87 * objects, or debug driver builds), so we can live with it for now.
89 static int vc4_get_user_label(struct vc4_dev
*vc4
, const char *name
)
94 for (i
= 0; i
< vc4
->num_labels
; i
++) {
95 if (!vc4
->bo_labels
[i
].name
) {
97 } else if (strcmp(vc4
->bo_labels
[i
].name
, name
) == 0) {
103 if (free_slot
!= -1) {
104 WARN_ON(vc4
->bo_labels
[free_slot
].num_allocated
!= 0);
105 vc4
->bo_labels
[free_slot
].name
= name
;
108 u32 new_label_count
= vc4
->num_labels
+ 1;
109 struct vc4_label
*new_labels
=
110 krealloc(vc4
->bo_labels
,
111 new_label_count
* sizeof(*new_labels
),
119 free_slot
= vc4
->num_labels
;
120 vc4
->bo_labels
= new_labels
;
121 vc4
->num_labels
= new_label_count
;
123 vc4
->bo_labels
[free_slot
].name
= name
;
124 vc4
->bo_labels
[free_slot
].num_allocated
= 0;
125 vc4
->bo_labels
[free_slot
].size_allocated
= 0;
131 static void vc4_bo_set_label(struct drm_gem_object
*gem_obj
, int label
)
133 struct vc4_bo
*bo
= to_vc4_bo(gem_obj
);
134 struct vc4_dev
*vc4
= to_vc4_dev(gem_obj
->dev
);
136 lockdep_assert_held(&vc4
->bo_lock
);
139 vc4
->bo_labels
[label
].num_allocated
++;
140 vc4
->bo_labels
[label
].size_allocated
+= gem_obj
->size
;
143 vc4
->bo_labels
[bo
->label
].num_allocated
--;
144 vc4
->bo_labels
[bo
->label
].size_allocated
-= gem_obj
->size
;
146 if (vc4
->bo_labels
[bo
->label
].num_allocated
== 0 &&
147 is_user_label(bo
->label
)) {
148 /* Free user BO label slots on last unreference.
149 * Slots are just where we track the stats for a given
150 * name, and once a name is unused we can reuse that
153 kfree(vc4
->bo_labels
[bo
->label
].name
);
154 vc4
->bo_labels
[bo
->label
].name
= NULL
;
160 static uint32_t bo_page_index(size_t size
)
162 return (size
/ PAGE_SIZE
) - 1;
165 static void vc4_bo_destroy(struct vc4_bo
*bo
)
167 struct drm_gem_object
*obj
= &bo
->base
.base
;
168 struct vc4_dev
*vc4
= to_vc4_dev(obj
->dev
);
170 lockdep_assert_held(&vc4
->bo_lock
);
172 vc4_bo_set_label(obj
, -1);
174 if (bo
->validated_shader
) {
175 kfree(bo
->validated_shader
->uniform_addr_offsets
);
176 kfree(bo
->validated_shader
->texture_samples
);
177 kfree(bo
->validated_shader
);
178 bo
->validated_shader
= NULL
;
181 drm_gem_cma_free_object(obj
);
184 static void vc4_bo_remove_from_cache(struct vc4_bo
*bo
)
186 struct vc4_dev
*vc4
= to_vc4_dev(bo
->base
.base
.dev
);
188 lockdep_assert_held(&vc4
->bo_lock
);
189 list_del(&bo
->unref_head
);
190 list_del(&bo
->size_head
);
193 static struct list_head
*vc4_get_cache_list_for_size(struct drm_device
*dev
,
196 struct vc4_dev
*vc4
= to_vc4_dev(dev
);
197 uint32_t page_index
= bo_page_index(size
);
199 if (vc4
->bo_cache
.size_list_size
<= page_index
) {
200 uint32_t new_size
= max(vc4
->bo_cache
.size_list_size
* 2,
202 struct list_head
*new_list
;
205 new_list
= kmalloc_array(new_size
, sizeof(struct list_head
),
210 /* Rebase the old cached BO lists to their new list
213 for (i
= 0; i
< vc4
->bo_cache
.size_list_size
; i
++) {
214 struct list_head
*old_list
=
215 &vc4
->bo_cache
.size_list
[i
];
217 if (list_empty(old_list
))
218 INIT_LIST_HEAD(&new_list
[i
]);
220 list_replace(old_list
, &new_list
[i
]);
222 /* And initialize the brand new BO list heads. */
223 for (i
= vc4
->bo_cache
.size_list_size
; i
< new_size
; i
++)
224 INIT_LIST_HEAD(&new_list
[i
]);
226 kfree(vc4
->bo_cache
.size_list
);
227 vc4
->bo_cache
.size_list
= new_list
;
228 vc4
->bo_cache
.size_list_size
= new_size
;
231 return &vc4
->bo_cache
.size_list
[page_index
];
234 static void vc4_bo_cache_purge(struct drm_device
*dev
)
236 struct vc4_dev
*vc4
= to_vc4_dev(dev
);
238 mutex_lock(&vc4
->bo_lock
);
239 while (!list_empty(&vc4
->bo_cache
.time_list
)) {
240 struct vc4_bo
*bo
= list_last_entry(&vc4
->bo_cache
.time_list
,
241 struct vc4_bo
, unref_head
);
242 vc4_bo_remove_from_cache(bo
);
245 mutex_unlock(&vc4
->bo_lock
);
248 void vc4_bo_add_to_purgeable_pool(struct vc4_bo
*bo
)
250 struct vc4_dev
*vc4
= to_vc4_dev(bo
->base
.base
.dev
);
252 mutex_lock(&vc4
->purgeable
.lock
);
253 list_add_tail(&bo
->size_head
, &vc4
->purgeable
.list
);
254 vc4
->purgeable
.num
++;
255 vc4
->purgeable
.size
+= bo
->base
.base
.size
;
256 mutex_unlock(&vc4
->purgeable
.lock
);
259 static void vc4_bo_remove_from_purgeable_pool_locked(struct vc4_bo
*bo
)
261 struct vc4_dev
*vc4
= to_vc4_dev(bo
->base
.base
.dev
);
263 /* list_del_init() is used here because the caller might release
264 * the purgeable lock in order to acquire the madv one and update the
266 * During this short period of time a user might decide to mark
267 * the BO as unpurgeable, and if bo->madv is set to
268 * VC4_MADV_DONTNEED it will try to remove the BO from the
269 * purgeable list which will fail if the ->next/prev fields
270 * are set to LIST_POISON1/LIST_POISON2 (which is what
272 * Re-initializing the list element guarantees that list_del()
273 * will work correctly even if it's a NOP.
275 list_del_init(&bo
->size_head
);
276 vc4
->purgeable
.num
--;
277 vc4
->purgeable
.size
-= bo
->base
.base
.size
;
280 void vc4_bo_remove_from_purgeable_pool(struct vc4_bo
*bo
)
282 struct vc4_dev
*vc4
= to_vc4_dev(bo
->base
.base
.dev
);
284 mutex_lock(&vc4
->purgeable
.lock
);
285 vc4_bo_remove_from_purgeable_pool_locked(bo
);
286 mutex_unlock(&vc4
->purgeable
.lock
);
289 static void vc4_bo_purge(struct drm_gem_object
*obj
)
291 struct vc4_bo
*bo
= to_vc4_bo(obj
);
292 struct drm_device
*dev
= obj
->dev
;
294 WARN_ON(!mutex_is_locked(&bo
->madv_lock
));
295 WARN_ON(bo
->madv
!= VC4_MADV_DONTNEED
);
297 drm_vma_node_unmap(&obj
->vma_node
, dev
->anon_inode
->i_mapping
);
299 dma_free_wc(dev
->dev
, obj
->size
, bo
->base
.vaddr
, bo
->base
.paddr
);
300 bo
->base
.vaddr
= NULL
;
301 bo
->madv
= __VC4_MADV_PURGED
;
304 static void vc4_bo_userspace_cache_purge(struct drm_device
*dev
)
306 struct vc4_dev
*vc4
= to_vc4_dev(dev
);
308 mutex_lock(&vc4
->purgeable
.lock
);
309 while (!list_empty(&vc4
->purgeable
.list
)) {
310 struct vc4_bo
*bo
= list_first_entry(&vc4
->purgeable
.list
,
311 struct vc4_bo
, size_head
);
312 struct drm_gem_object
*obj
= &bo
->base
.base
;
313 size_t purged_size
= 0;
315 vc4_bo_remove_from_purgeable_pool_locked(bo
);
317 /* Release the purgeable lock while we're purging the BO so
318 * that other people can continue inserting things in the
319 * purgeable pool without having to wait for all BOs to be
322 mutex_unlock(&vc4
->purgeable
.lock
);
323 mutex_lock(&bo
->madv_lock
);
325 /* Since we released the purgeable pool lock before acquiring
326 * the BO madv one, the user may have marked the BO as WILLNEED
327 * and re-used it in the meantime.
328 * Before purging the BO we need to make sure
329 * - it is still marked as DONTNEED
330 * - it has not been re-inserted in the purgeable list
331 * - it is not used by HW blocks
332 * If one of these conditions is not met, just skip the entry.
334 if (bo
->madv
== VC4_MADV_DONTNEED
&&
335 list_empty(&bo
->size_head
) &&
336 !refcount_read(&bo
->usecnt
)) {
337 purged_size
= bo
->base
.base
.size
;
340 mutex_unlock(&bo
->madv_lock
);
341 mutex_lock(&vc4
->purgeable
.lock
);
344 vc4
->purgeable
.purged_size
+= purged_size
;
345 vc4
->purgeable
.purged_num
++;
348 mutex_unlock(&vc4
->purgeable
.lock
);
351 static struct vc4_bo
*vc4_bo_get_from_cache(struct drm_device
*dev
,
353 enum vc4_kernel_bo_type type
)
355 struct vc4_dev
*vc4
= to_vc4_dev(dev
);
356 uint32_t page_index
= bo_page_index(size
);
357 struct vc4_bo
*bo
= NULL
;
359 size
= roundup(size
, PAGE_SIZE
);
361 mutex_lock(&vc4
->bo_lock
);
362 if (page_index
>= vc4
->bo_cache
.size_list_size
)
365 if (list_empty(&vc4
->bo_cache
.size_list
[page_index
]))
368 bo
= list_first_entry(&vc4
->bo_cache
.size_list
[page_index
],
369 struct vc4_bo
, size_head
);
370 vc4_bo_remove_from_cache(bo
);
371 kref_init(&bo
->base
.base
.refcount
);
375 vc4_bo_set_label(&bo
->base
.base
, type
);
376 mutex_unlock(&vc4
->bo_lock
);
381 * vc4_gem_create_object - Implementation of driver->gem_create_object.
383 * @size: Size in bytes of the memory the object will reference
385 * This lets the CMA helpers allocate object structs for us, and keep
386 * our BO stats correct.
388 struct drm_gem_object
*vc4_create_object(struct drm_device
*dev
, size_t size
)
390 struct vc4_dev
*vc4
= to_vc4_dev(dev
);
393 bo
= kzalloc(sizeof(*bo
), GFP_KERNEL
);
395 return ERR_PTR(-ENOMEM
);
397 bo
->madv
= VC4_MADV_WILLNEED
;
398 refcount_set(&bo
->usecnt
, 0);
399 mutex_init(&bo
->madv_lock
);
400 mutex_lock(&vc4
->bo_lock
);
401 bo
->label
= VC4_BO_TYPE_KERNEL
;
402 vc4
->bo_labels
[VC4_BO_TYPE_KERNEL
].num_allocated
++;
403 vc4
->bo_labels
[VC4_BO_TYPE_KERNEL
].size_allocated
+= size
;
404 mutex_unlock(&vc4
->bo_lock
);
406 return &bo
->base
.base
;
409 struct vc4_bo
*vc4_bo_create(struct drm_device
*dev
, size_t unaligned_size
,
410 bool allow_unzeroed
, enum vc4_kernel_bo_type type
)
412 size_t size
= roundup(unaligned_size
, PAGE_SIZE
);
413 struct vc4_dev
*vc4
= to_vc4_dev(dev
);
414 struct drm_gem_cma_object
*cma_obj
;
418 return ERR_PTR(-EINVAL
);
420 /* First, try to get a vc4_bo from the kernel BO cache. */
421 bo
= vc4_bo_get_from_cache(dev
, size
, type
);
424 memset(bo
->base
.vaddr
, 0, bo
->base
.base
.size
);
428 cma_obj
= drm_gem_cma_create(dev
, size
);
429 if (IS_ERR(cma_obj
)) {
431 * If we've run out of CMA memory, kill the cache of
432 * CMA allocations we've got laying around and try again.
434 vc4_bo_cache_purge(dev
);
435 cma_obj
= drm_gem_cma_create(dev
, size
);
438 if (IS_ERR(cma_obj
)) {
440 * Still not enough CMA memory, purge the userspace BO
442 * This is sub-optimal since we purge the whole userspace
443 * BO cache which forces user that want to re-use the BO to
444 * restore its initial content.
445 * Ideally, we should purge entries one by one and retry
446 * after each to see if CMA allocation succeeds. Or even
447 * better, try to find an entry with at least the same
450 vc4_bo_userspace_cache_purge(dev
);
451 cma_obj
= drm_gem_cma_create(dev
, size
);
454 if (IS_ERR(cma_obj
)) {
455 struct drm_printer p
= drm_info_printer(vc4
->dev
->dev
);
456 DRM_ERROR("Failed to allocate from CMA:\n");
457 vc4_bo_stats_print(&p
, vc4
);
458 return ERR_PTR(-ENOMEM
);
460 bo
= to_vc4_bo(&cma_obj
->base
);
462 /* By default, BOs do not support the MADV ioctl. This will be enabled
463 * only on BOs that are exposed to userspace (V3D, V3D_SHADER and DUMB
466 bo
->madv
= __VC4_MADV_NOTSUPP
;
468 mutex_lock(&vc4
->bo_lock
);
469 vc4_bo_set_label(&cma_obj
->base
, type
);
470 mutex_unlock(&vc4
->bo_lock
);
475 int vc4_dumb_create(struct drm_file
*file_priv
,
476 struct drm_device
*dev
,
477 struct drm_mode_create_dumb
*args
)
479 int min_pitch
= DIV_ROUND_UP(args
->width
* args
->bpp
, 8);
480 struct vc4_bo
*bo
= NULL
;
483 if (args
->pitch
< min_pitch
)
484 args
->pitch
= min_pitch
;
486 if (args
->size
< args
->pitch
* args
->height
)
487 args
->size
= args
->pitch
* args
->height
;
489 bo
= vc4_bo_create(dev
, args
->size
, false, VC4_BO_TYPE_DUMB
);
493 bo
->madv
= VC4_MADV_WILLNEED
;
495 ret
= drm_gem_handle_create(file_priv
, &bo
->base
.base
, &args
->handle
);
496 drm_gem_object_put_unlocked(&bo
->base
.base
);
501 static void vc4_bo_cache_free_old(struct drm_device
*dev
)
503 struct vc4_dev
*vc4
= to_vc4_dev(dev
);
504 unsigned long expire_time
= jiffies
- msecs_to_jiffies(1000);
506 lockdep_assert_held(&vc4
->bo_lock
);
508 while (!list_empty(&vc4
->bo_cache
.time_list
)) {
509 struct vc4_bo
*bo
= list_last_entry(&vc4
->bo_cache
.time_list
,
510 struct vc4_bo
, unref_head
);
511 if (time_before(expire_time
, bo
->free_time
)) {
512 mod_timer(&vc4
->bo_cache
.time_timer
,
513 round_jiffies_up(jiffies
+
514 msecs_to_jiffies(1000)));
518 vc4_bo_remove_from_cache(bo
);
523 /* Called on the last userspace/kernel unreference of the BO. Returns
524 * it to the BO cache if possible, otherwise frees it.
526 void vc4_free_object(struct drm_gem_object
*gem_bo
)
528 struct drm_device
*dev
= gem_bo
->dev
;
529 struct vc4_dev
*vc4
= to_vc4_dev(dev
);
530 struct vc4_bo
*bo
= to_vc4_bo(gem_bo
);
531 struct list_head
*cache_list
;
533 /* Remove the BO from the purgeable list. */
534 mutex_lock(&bo
->madv_lock
);
535 if (bo
->madv
== VC4_MADV_DONTNEED
&& !refcount_read(&bo
->usecnt
))
536 vc4_bo_remove_from_purgeable_pool(bo
);
537 mutex_unlock(&bo
->madv_lock
);
539 mutex_lock(&vc4
->bo_lock
);
540 /* If the object references someone else's memory, we can't cache it.
542 if (gem_bo
->import_attach
) {
547 /* Don't cache if it was publicly named. */
553 /* If this object was partially constructed but CMA allocation
554 * had failed, just free it. Can also happen when the BO has been
557 if (!bo
->base
.vaddr
) {
562 cache_list
= vc4_get_cache_list_for_size(dev
, gem_bo
->size
);
568 if (bo
->validated_shader
) {
569 kfree(bo
->validated_shader
->uniform_addr_offsets
);
570 kfree(bo
->validated_shader
->texture_samples
);
571 kfree(bo
->validated_shader
);
572 bo
->validated_shader
= NULL
;
575 /* Reset madv and usecnt before adding the BO to the cache. */
576 bo
->madv
= __VC4_MADV_NOTSUPP
;
577 refcount_set(&bo
->usecnt
, 0);
579 bo
->t_format
= false;
580 bo
->free_time
= jiffies
;
581 list_add(&bo
->size_head
, cache_list
);
582 list_add(&bo
->unref_head
, &vc4
->bo_cache
.time_list
);
584 vc4_bo_set_label(&bo
->base
.base
, VC4_BO_TYPE_KERNEL_CACHE
);
586 vc4_bo_cache_free_old(dev
);
589 mutex_unlock(&vc4
->bo_lock
);
592 static void vc4_bo_cache_time_work(struct work_struct
*work
)
594 struct vc4_dev
*vc4
=
595 container_of(work
, struct vc4_dev
, bo_cache
.time_work
);
596 struct drm_device
*dev
= vc4
->dev
;
598 mutex_lock(&vc4
->bo_lock
);
599 vc4_bo_cache_free_old(dev
);
600 mutex_unlock(&vc4
->bo_lock
);
603 int vc4_bo_inc_usecnt(struct vc4_bo
*bo
)
607 /* Fast path: if the BO is already retained by someone, no need to
608 * check the madv status.
610 if (refcount_inc_not_zero(&bo
->usecnt
))
613 mutex_lock(&bo
->madv_lock
);
615 case VC4_MADV_WILLNEED
:
616 if (!refcount_inc_not_zero(&bo
->usecnt
))
617 refcount_set(&bo
->usecnt
, 1);
620 case VC4_MADV_DONTNEED
:
621 /* We shouldn't use a BO marked as purgeable if at least
622 * someone else retained its content by incrementing usecnt.
623 * Luckily the BO hasn't been purged yet, but something wrong
624 * is happening here. Just throw an error instead of
625 * authorizing this use case.
627 case __VC4_MADV_PURGED
:
628 /* We can't use a purged BO. */
630 /* Invalid madv value. */
634 mutex_unlock(&bo
->madv_lock
);
639 void vc4_bo_dec_usecnt(struct vc4_bo
*bo
)
641 /* Fast path: if the BO is still retained by someone, no need to test
644 if (refcount_dec_not_one(&bo
->usecnt
))
647 mutex_lock(&bo
->madv_lock
);
648 if (refcount_dec_and_test(&bo
->usecnt
) &&
649 bo
->madv
== VC4_MADV_DONTNEED
)
650 vc4_bo_add_to_purgeable_pool(bo
);
651 mutex_unlock(&bo
->madv_lock
);
654 static void vc4_bo_cache_time_timer(struct timer_list
*t
)
656 struct vc4_dev
*vc4
= from_timer(vc4
, t
, bo_cache
.time_timer
);
658 schedule_work(&vc4
->bo_cache
.time_work
);
662 vc4_prime_export(struct drm_device
*dev
, struct drm_gem_object
*obj
, int flags
)
664 struct vc4_bo
*bo
= to_vc4_bo(obj
);
665 struct dma_buf
*dmabuf
;
668 if (bo
->validated_shader
) {
669 DRM_DEBUG("Attempting to export shader BO\n");
670 return ERR_PTR(-EINVAL
);
673 /* Note: as soon as the BO is exported it becomes unpurgeable, because
674 * noone ever decrements the usecnt even if the reference held by the
675 * exported BO is released. This shouldn't be a problem since we don't
676 * expect exported BOs to be marked as purgeable.
678 ret
= vc4_bo_inc_usecnt(bo
);
680 DRM_ERROR("Failed to increment BO usecnt\n");
684 dmabuf
= drm_gem_prime_export(dev
, obj
, flags
);
686 vc4_bo_dec_usecnt(bo
);
691 vm_fault_t
vc4_fault(struct vm_fault
*vmf
)
693 struct vm_area_struct
*vma
= vmf
->vma
;
694 struct drm_gem_object
*obj
= vma
->vm_private_data
;
695 struct vc4_bo
*bo
= to_vc4_bo(obj
);
697 /* The only reason we would end up here is when user-space accesses
698 * BO's memory after it's been purged.
700 mutex_lock(&bo
->madv_lock
);
701 WARN_ON(bo
->madv
!= __VC4_MADV_PURGED
);
702 mutex_unlock(&bo
->madv_lock
);
704 return VM_FAULT_SIGBUS
;
707 int vc4_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
709 struct drm_gem_object
*gem_obj
;
710 unsigned long vm_pgoff
;
714 ret
= drm_gem_mmap(filp
, vma
);
718 gem_obj
= vma
->vm_private_data
;
719 bo
= to_vc4_bo(gem_obj
);
721 if (bo
->validated_shader
&& (vma
->vm_flags
& VM_WRITE
)) {
722 DRM_DEBUG("mmaping of shader BOs for writing not allowed.\n");
726 if (bo
->madv
!= VC4_MADV_WILLNEED
) {
727 DRM_DEBUG("mmaping of %s BO not allowed\n",
728 bo
->madv
== VC4_MADV_DONTNEED
?
729 "purgeable" : "purged");
734 * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the
735 * vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map
738 vma
->vm_flags
&= ~VM_PFNMAP
;
740 /* This ->vm_pgoff dance is needed to make all parties happy:
741 * - dma_mmap_wc() uses ->vm_pgoff as an offset within the allocated
742 * mem-region, hence the need to set it to zero (the value set by
743 * the DRM core is a virtual offset encoding the GEM object-id)
744 * - the mmap() core logic needs ->vm_pgoff to be restored to its
745 * initial value before returning from this function because it
746 * encodes the offset of this GEM in the dev->anon_inode pseudo-file
747 * and this information will be used when we invalidate userspace
748 * mappings with drm_vma_node_unmap() (called from vc4_gem_purge()).
750 vm_pgoff
= vma
->vm_pgoff
;
752 ret
= dma_mmap_wc(bo
->base
.base
.dev
->dev
, vma
, bo
->base
.vaddr
,
753 bo
->base
.paddr
, vma
->vm_end
- vma
->vm_start
);
754 vma
->vm_pgoff
= vm_pgoff
;
757 drm_gem_vm_close(vma
);
762 int vc4_prime_mmap(struct drm_gem_object
*obj
, struct vm_area_struct
*vma
)
764 struct vc4_bo
*bo
= to_vc4_bo(obj
);
766 if (bo
->validated_shader
&& (vma
->vm_flags
& VM_WRITE
)) {
767 DRM_DEBUG("mmaping of shader BOs for writing not allowed.\n");
771 return drm_gem_cma_prime_mmap(obj
, vma
);
774 void *vc4_prime_vmap(struct drm_gem_object
*obj
)
776 struct vc4_bo
*bo
= to_vc4_bo(obj
);
778 if (bo
->validated_shader
) {
779 DRM_DEBUG("mmaping of shader BOs not allowed.\n");
780 return ERR_PTR(-EINVAL
);
783 return drm_gem_cma_prime_vmap(obj
);
786 struct drm_gem_object
*
787 vc4_prime_import_sg_table(struct drm_device
*dev
,
788 struct dma_buf_attachment
*attach
,
789 struct sg_table
*sgt
)
791 struct drm_gem_object
*obj
;
793 obj
= drm_gem_cma_prime_import_sg_table(dev
, attach
, sgt
);
797 obj
->resv
= attach
->dmabuf
->resv
;
802 int vc4_create_bo_ioctl(struct drm_device
*dev
, void *data
,
803 struct drm_file
*file_priv
)
805 struct drm_vc4_create_bo
*args
= data
;
806 struct vc4_bo
*bo
= NULL
;
810 * We can't allocate from the BO cache, because the BOs don't
811 * get zeroed, and that might leak data between users.
813 bo
= vc4_bo_create(dev
, args
->size
, false, VC4_BO_TYPE_V3D
);
817 bo
->madv
= VC4_MADV_WILLNEED
;
819 ret
= drm_gem_handle_create(file_priv
, &bo
->base
.base
, &args
->handle
);
820 drm_gem_object_put_unlocked(&bo
->base
.base
);
825 int vc4_mmap_bo_ioctl(struct drm_device
*dev
, void *data
,
826 struct drm_file
*file_priv
)
828 struct drm_vc4_mmap_bo
*args
= data
;
829 struct drm_gem_object
*gem_obj
;
831 gem_obj
= drm_gem_object_lookup(file_priv
, args
->handle
);
833 DRM_DEBUG("Failed to look up GEM BO %d\n", args
->handle
);
837 /* The mmap offset was set up at BO allocation time. */
838 args
->offset
= drm_vma_node_offset_addr(&gem_obj
->vma_node
);
840 drm_gem_object_put_unlocked(gem_obj
);
845 vc4_create_shader_bo_ioctl(struct drm_device
*dev
, void *data
,
846 struct drm_file
*file_priv
)
848 struct drm_vc4_create_shader_bo
*args
= data
;
849 struct vc4_bo
*bo
= NULL
;
855 if (args
->size
% sizeof(u64
) != 0)
858 if (args
->flags
!= 0) {
859 DRM_INFO("Unknown flags set: 0x%08x\n", args
->flags
);
863 if (args
->pad
!= 0) {
864 DRM_INFO("Pad set: 0x%08x\n", args
->pad
);
868 bo
= vc4_bo_create(dev
, args
->size
, true, VC4_BO_TYPE_V3D_SHADER
);
872 bo
->madv
= VC4_MADV_WILLNEED
;
874 if (copy_from_user(bo
->base
.vaddr
,
875 (void __user
*)(uintptr_t)args
->data
,
880 /* Clear the rest of the memory from allocating from the BO
883 memset(bo
->base
.vaddr
+ args
->size
, 0,
884 bo
->base
.base
.size
- args
->size
);
886 bo
->validated_shader
= vc4_validate_shader(&bo
->base
);
887 if (!bo
->validated_shader
) {
892 /* We have to create the handle after validation, to avoid
893 * races for users to do doing things like mmap the shader BO.
895 ret
= drm_gem_handle_create(file_priv
, &bo
->base
.base
, &args
->handle
);
898 drm_gem_object_put_unlocked(&bo
->base
.base
);
904 * vc4_set_tiling_ioctl() - Sets the tiling modifier for a BO.
906 * @data: ioctl argument
907 * @file_priv: DRM file for this fd
909 * The tiling state of the BO decides the default modifier of an fb if
910 * no specific modifier was set by userspace, and the return value of
911 * vc4_get_tiling_ioctl() (so that userspace can treat a BO it
912 * received from dmabuf as the same tiling format as the producer
915 int vc4_set_tiling_ioctl(struct drm_device
*dev
, void *data
,
916 struct drm_file
*file_priv
)
918 struct drm_vc4_set_tiling
*args
= data
;
919 struct drm_gem_object
*gem_obj
;
923 if (args
->flags
!= 0)
926 switch (args
->modifier
) {
927 case DRM_FORMAT_MOD_NONE
:
930 case DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED
:
937 gem_obj
= drm_gem_object_lookup(file_priv
, args
->handle
);
939 DRM_DEBUG("Failed to look up GEM BO %d\n", args
->handle
);
942 bo
= to_vc4_bo(gem_obj
);
943 bo
->t_format
= t_format
;
945 drm_gem_object_put_unlocked(gem_obj
);
951 * vc4_get_tiling_ioctl() - Gets the tiling modifier for a BO.
953 * @data: ioctl argument
954 * @file_priv: DRM file for this fd
956 * Returns the tiling modifier for a BO as set by vc4_set_tiling_ioctl().
958 int vc4_get_tiling_ioctl(struct drm_device
*dev
, void *data
,
959 struct drm_file
*file_priv
)
961 struct drm_vc4_get_tiling
*args
= data
;
962 struct drm_gem_object
*gem_obj
;
965 if (args
->flags
!= 0 || args
->modifier
!= 0)
968 gem_obj
= drm_gem_object_lookup(file_priv
, args
->handle
);
970 DRM_DEBUG("Failed to look up GEM BO %d\n", args
->handle
);
973 bo
= to_vc4_bo(gem_obj
);
976 args
->modifier
= DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED
;
978 args
->modifier
= DRM_FORMAT_MOD_NONE
;
980 drm_gem_object_put_unlocked(gem_obj
);
985 int vc4_bo_cache_init(struct drm_device
*dev
)
987 struct vc4_dev
*vc4
= to_vc4_dev(dev
);
990 /* Create the initial set of BO labels that the kernel will
991 * use. This lets us avoid a bunch of string reallocation in
992 * the kernel's draw and BO allocation paths.
994 vc4
->bo_labels
= kcalloc(VC4_BO_TYPE_COUNT
, sizeof(*vc4
->bo_labels
),
998 vc4
->num_labels
= VC4_BO_TYPE_COUNT
;
1000 BUILD_BUG_ON(ARRAY_SIZE(bo_type_names
) != VC4_BO_TYPE_COUNT
);
1001 for (i
= 0; i
< VC4_BO_TYPE_COUNT
; i
++)
1002 vc4
->bo_labels
[i
].name
= bo_type_names
[i
];
1004 mutex_init(&vc4
->bo_lock
);
1006 vc4_debugfs_add_file(dev
, "bo_stats", vc4_bo_stats_debugfs
, NULL
);
1008 INIT_LIST_HEAD(&vc4
->bo_cache
.time_list
);
1010 INIT_WORK(&vc4
->bo_cache
.time_work
, vc4_bo_cache_time_work
);
1011 timer_setup(&vc4
->bo_cache
.time_timer
, vc4_bo_cache_time_timer
, 0);
1016 void vc4_bo_cache_destroy(struct drm_device
*dev
)
1018 struct vc4_dev
*vc4
= to_vc4_dev(dev
);
1021 del_timer(&vc4
->bo_cache
.time_timer
);
1022 cancel_work_sync(&vc4
->bo_cache
.time_work
);
1024 vc4_bo_cache_purge(dev
);
1026 for (i
= 0; i
< vc4
->num_labels
; i
++) {
1027 if (vc4
->bo_labels
[i
].num_allocated
) {
1028 DRM_ERROR("Destroying BO cache with %d %s "
1029 "BOs still allocated\n",
1030 vc4
->bo_labels
[i
].num_allocated
,
1031 vc4
->bo_labels
[i
].name
);
1034 if (is_user_label(i
))
1035 kfree(vc4
->bo_labels
[i
].name
);
1037 kfree(vc4
->bo_labels
);
1040 int vc4_label_bo_ioctl(struct drm_device
*dev
, void *data
,
1041 struct drm_file
*file_priv
)
1043 struct vc4_dev
*vc4
= to_vc4_dev(dev
);
1044 struct drm_vc4_label_bo
*args
= data
;
1046 struct drm_gem_object
*gem_obj
;
1052 name
= strndup_user(u64_to_user_ptr(args
->name
), args
->len
+ 1);
1054 return PTR_ERR(name
);
1056 gem_obj
= drm_gem_object_lookup(file_priv
, args
->handle
);
1058 DRM_ERROR("Failed to look up GEM BO %d\n", args
->handle
);
1063 mutex_lock(&vc4
->bo_lock
);
1064 label
= vc4_get_user_label(vc4
, name
);
1066 vc4_bo_set_label(gem_obj
, label
);
1069 mutex_unlock(&vc4
->bo_lock
);
1071 drm_gem_object_put_unlocked(gem_obj
);