1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2013 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com>
7 #include <linux/dma-map-ops.h>
8 #include <linux/spinlock.h>
9 #include <linux/shmem_fs.h>
10 #include <linux/dma-buf.h>
11 #include <linux/pfn_t.h>
13 #include <drm/drm_prime.h>
16 #include "msm_fence.h"
21 static void update_inactive(struct msm_gem_object
*msm_obj
);
23 static dma_addr_t
physaddr(struct drm_gem_object
*obj
)
25 struct msm_gem_object
*msm_obj
= to_msm_bo(obj
);
26 struct msm_drm_private
*priv
= obj
->dev
->dev_private
;
27 return (((dma_addr_t
)msm_obj
->vram_node
->start
) << PAGE_SHIFT
) +
31 static bool use_pages(struct drm_gem_object
*obj
)
33 struct msm_gem_object
*msm_obj
= to_msm_bo(obj
);
34 return !msm_obj
->vram_node
;
38 * Cache sync.. this is a bit over-complicated, to fit dma-mapping
39 * API. Really GPU cache is out of scope here (handled on cmdstream)
40 * and all we need to do is invalidate newly allocated pages before
41 * mapping to CPU as uncached/writecombine.
43 * On top of this, we have the added headache, that depending on
44 * display generation, the display's iommu may be wired up to either
45 * the toplevel drm device (mdss), or to the mdp sub-node, meaning
46 * that here we either have dma-direct or iommu ops.
48 * Let this be a cautionary tail of abstraction gone wrong.
51 static void sync_for_device(struct msm_gem_object
*msm_obj
)
53 struct device
*dev
= msm_obj
->base
.dev
->dev
;
55 dma_map_sgtable(dev
, msm_obj
->sgt
, DMA_BIDIRECTIONAL
, 0);
58 static void sync_for_cpu(struct msm_gem_object
*msm_obj
)
60 struct device
*dev
= msm_obj
->base
.dev
->dev
;
62 dma_unmap_sgtable(dev
, msm_obj
->sgt
, DMA_BIDIRECTIONAL
, 0);
65 /* allocate pages from VRAM carveout, used when no IOMMU: */
66 static struct page
**get_pages_vram(struct drm_gem_object
*obj
, int npages
)
68 struct msm_gem_object
*msm_obj
= to_msm_bo(obj
);
69 struct msm_drm_private
*priv
= obj
->dev
->dev_private
;
74 p
= kvmalloc_array(npages
, sizeof(struct page
*), GFP_KERNEL
);
76 return ERR_PTR(-ENOMEM
);
78 spin_lock(&priv
->vram
.lock
);
79 ret
= drm_mm_insert_node(&priv
->vram
.mm
, msm_obj
->vram_node
, npages
);
80 spin_unlock(&priv
->vram
.lock
);
86 paddr
= physaddr(obj
);
87 for (i
= 0; i
< npages
; i
++) {
88 p
[i
] = phys_to_page(paddr
);
95 static struct page
**get_pages(struct drm_gem_object
*obj
)
97 struct msm_gem_object
*msm_obj
= to_msm_bo(obj
);
99 GEM_WARN_ON(!msm_gem_is_locked(obj
));
101 if (!msm_obj
->pages
) {
102 struct drm_device
*dev
= obj
->dev
;
104 int npages
= obj
->size
>> PAGE_SHIFT
;
107 p
= drm_gem_get_pages(obj
);
109 p
= get_pages_vram(obj
, npages
);
112 DRM_DEV_ERROR(dev
->dev
, "could not get pages: %ld\n",
119 msm_obj
->sgt
= drm_prime_pages_to_sg(obj
->dev
, p
, npages
);
120 if (IS_ERR(msm_obj
->sgt
)) {
121 void *ptr
= ERR_CAST(msm_obj
->sgt
);
123 DRM_DEV_ERROR(dev
->dev
, "failed to allocate sgt\n");
128 /* For non-cached buffers, ensure the new pages are clean
129 * because display controller, GPU, etc. are not coherent:
131 if (msm_obj
->flags
& (MSM_BO_WC
|MSM_BO_UNCACHED
))
132 sync_for_device(msm_obj
);
134 update_inactive(msm_obj
);
137 return msm_obj
->pages
;
140 static void put_pages_vram(struct drm_gem_object
*obj
)
142 struct msm_gem_object
*msm_obj
= to_msm_bo(obj
);
143 struct msm_drm_private
*priv
= obj
->dev
->dev_private
;
145 spin_lock(&priv
->vram
.lock
);
146 drm_mm_remove_node(msm_obj
->vram_node
);
147 spin_unlock(&priv
->vram
.lock
);
149 kvfree(msm_obj
->pages
);
152 static void put_pages(struct drm_gem_object
*obj
)
154 struct msm_gem_object
*msm_obj
= to_msm_bo(obj
);
156 if (msm_obj
->pages
) {
158 /* For non-cached buffers, ensure the new
159 * pages are clean because display controller,
160 * GPU, etc. are not coherent:
162 if (msm_obj
->flags
& (MSM_BO_WC
|MSM_BO_UNCACHED
))
163 sync_for_cpu(msm_obj
);
165 sg_free_table(msm_obj
->sgt
);
171 drm_gem_put_pages(obj
, msm_obj
->pages
, true, false);
175 msm_obj
->pages
= NULL
;
179 struct page
**msm_gem_get_pages(struct drm_gem_object
*obj
)
181 struct msm_gem_object
*msm_obj
= to_msm_bo(obj
);
186 if (GEM_WARN_ON(msm_obj
->madv
!= MSM_MADV_WILLNEED
)) {
188 return ERR_PTR(-EBUSY
);
194 msm_obj
->pin_count
++;
195 update_inactive(msm_obj
);
202 void msm_gem_put_pages(struct drm_gem_object
*obj
)
204 struct msm_gem_object
*msm_obj
= to_msm_bo(obj
);
207 msm_obj
->pin_count
--;
208 GEM_WARN_ON(msm_obj
->pin_count
< 0);
209 update_inactive(msm_obj
);
213 static pgprot_t
msm_gem_pgprot(struct msm_gem_object
*msm_obj
, pgprot_t prot
)
215 if (msm_obj
->flags
& (MSM_BO_WC
|MSM_BO_UNCACHED
))
216 return pgprot_writecombine(prot
);
220 static vm_fault_t
msm_gem_fault(struct vm_fault
*vmf
)
222 struct vm_area_struct
*vma
= vmf
->vma
;
223 struct drm_gem_object
*obj
= vma
->vm_private_data
;
224 struct msm_gem_object
*msm_obj
= to_msm_bo(obj
);
232 * vm_ops.open/drm_gem_mmap_obj and close get and put
233 * a reference on obj. So, we dont need to hold one here.
235 err
= msm_gem_lock_interruptible(obj
);
237 ret
= VM_FAULT_NOPAGE
;
241 if (GEM_WARN_ON(msm_obj
->madv
!= MSM_MADV_WILLNEED
)) {
243 return VM_FAULT_SIGBUS
;
246 /* make sure we have pages attached now */
247 pages
= get_pages(obj
);
249 ret
= vmf_error(PTR_ERR(pages
));
253 /* We don't use vmf->pgoff since that has the fake offset: */
254 pgoff
= (vmf
->address
- vma
->vm_start
) >> PAGE_SHIFT
;
256 pfn
= page_to_pfn(pages
[pgoff
]);
258 VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf
->address
,
259 pfn
, pfn
<< PAGE_SHIFT
);
261 ret
= vmf_insert_mixed(vma
, vmf
->address
, __pfn_to_pfn_t(pfn
, PFN_DEV
));
268 /** get mmap offset */
269 static uint64_t mmap_offset(struct drm_gem_object
*obj
)
271 struct drm_device
*dev
= obj
->dev
;
274 GEM_WARN_ON(!msm_gem_is_locked(obj
));
276 /* Make it mmapable */
277 ret
= drm_gem_create_mmap_offset(obj
);
280 DRM_DEV_ERROR(dev
->dev
, "could not allocate mmap offset\n");
284 return drm_vma_node_offset_addr(&obj
->vma_node
);
287 uint64_t msm_gem_mmap_offset(struct drm_gem_object
*obj
)
292 offset
= mmap_offset(obj
);
297 static struct msm_gem_vma
*add_vma(struct drm_gem_object
*obj
,
298 struct msm_gem_address_space
*aspace
)
300 struct msm_gem_object
*msm_obj
= to_msm_bo(obj
);
301 struct msm_gem_vma
*vma
;
303 GEM_WARN_ON(!msm_gem_is_locked(obj
));
305 vma
= kzalloc(sizeof(*vma
), GFP_KERNEL
);
307 return ERR_PTR(-ENOMEM
);
309 vma
->aspace
= aspace
;
311 list_add_tail(&vma
->list
, &msm_obj
->vmas
);
316 static struct msm_gem_vma
*lookup_vma(struct drm_gem_object
*obj
,
317 struct msm_gem_address_space
*aspace
)
319 struct msm_gem_object
*msm_obj
= to_msm_bo(obj
);
320 struct msm_gem_vma
*vma
;
322 GEM_WARN_ON(!msm_gem_is_locked(obj
));
324 list_for_each_entry(vma
, &msm_obj
->vmas
, list
) {
325 if (vma
->aspace
== aspace
)
332 static void del_vma(struct msm_gem_vma
*vma
)
337 list_del(&vma
->list
);
342 * If close is true, this also closes the VMA (releasing the allocated
343 * iova range) in addition to removing the iommu mapping. In the eviction
344 * case (!close), we keep the iova allocated, but only remove the iommu
348 put_iova_spaces(struct drm_gem_object
*obj
, bool close
)
350 struct msm_gem_object
*msm_obj
= to_msm_bo(obj
);
351 struct msm_gem_vma
*vma
;
353 GEM_WARN_ON(!msm_gem_is_locked(obj
));
355 list_for_each_entry(vma
, &msm_obj
->vmas
, list
) {
357 msm_gem_purge_vma(vma
->aspace
, vma
);
359 msm_gem_close_vma(vma
->aspace
, vma
);
364 /* Called with msm_obj locked */
366 put_iova_vmas(struct drm_gem_object
*obj
)
368 struct msm_gem_object
*msm_obj
= to_msm_bo(obj
);
369 struct msm_gem_vma
*vma
, *tmp
;
371 GEM_WARN_ON(!msm_gem_is_locked(obj
));
373 list_for_each_entry_safe(vma
, tmp
, &msm_obj
->vmas
, list
) {
378 static int get_iova_locked(struct drm_gem_object
*obj
,
379 struct msm_gem_address_space
*aspace
, uint64_t *iova
,
380 u64 range_start
, u64 range_end
)
382 struct msm_gem_vma
*vma
;
385 GEM_WARN_ON(!msm_gem_is_locked(obj
));
387 vma
= lookup_vma(obj
, aspace
);
390 vma
= add_vma(obj
, aspace
);
394 ret
= msm_gem_init_vma(aspace
, vma
, obj
->size
>> PAGE_SHIFT
,
395 range_start
, range_end
);
406 static int msm_gem_pin_iova(struct drm_gem_object
*obj
,
407 struct msm_gem_address_space
*aspace
)
409 struct msm_gem_object
*msm_obj
= to_msm_bo(obj
);
410 struct msm_gem_vma
*vma
;
412 int ret
, prot
= IOMMU_READ
;
414 if (!(msm_obj
->flags
& MSM_BO_GPU_READONLY
))
417 if (msm_obj
->flags
& MSM_BO_MAP_PRIV
)
420 if (msm_obj
->flags
& MSM_BO_CACHED_COHERENT
)
423 GEM_WARN_ON(!msm_gem_is_locked(obj
));
425 if (GEM_WARN_ON(msm_obj
->madv
!= MSM_MADV_WILLNEED
))
428 vma
= lookup_vma(obj
, aspace
);
429 if (GEM_WARN_ON(!vma
))
432 pages
= get_pages(obj
);
434 return PTR_ERR(pages
);
436 ret
= msm_gem_map_vma(aspace
, vma
, prot
,
437 msm_obj
->sgt
, obj
->size
>> PAGE_SHIFT
);
440 msm_obj
->pin_count
++;
445 static int get_and_pin_iova_range_locked(struct drm_gem_object
*obj
,
446 struct msm_gem_address_space
*aspace
, uint64_t *iova
,
447 u64 range_start
, u64 range_end
)
452 GEM_WARN_ON(!msm_gem_is_locked(obj
));
454 ret
= get_iova_locked(obj
, aspace
, &local
,
455 range_start
, range_end
);
458 ret
= msm_gem_pin_iova(obj
, aspace
);
467 * get iova and pin it. Should have a matching put
468 * limits iova to specified range (in pages)
470 int msm_gem_get_and_pin_iova_range(struct drm_gem_object
*obj
,
471 struct msm_gem_address_space
*aspace
, uint64_t *iova
,
472 u64 range_start
, u64 range_end
)
477 ret
= get_and_pin_iova_range_locked(obj
, aspace
, iova
, range_start
, range_end
);
483 int msm_gem_get_and_pin_iova_locked(struct drm_gem_object
*obj
,
484 struct msm_gem_address_space
*aspace
, uint64_t *iova
)
486 return get_and_pin_iova_range_locked(obj
, aspace
, iova
, 0, U64_MAX
);
489 /* get iova and pin it. Should have a matching put */
490 int msm_gem_get_and_pin_iova(struct drm_gem_object
*obj
,
491 struct msm_gem_address_space
*aspace
, uint64_t *iova
)
493 return msm_gem_get_and_pin_iova_range(obj
, aspace
, iova
, 0, U64_MAX
);
497 * Get an iova but don't pin it. Doesn't need a put because iovas are currently
498 * valid for the life of the object
500 int msm_gem_get_iova(struct drm_gem_object
*obj
,
501 struct msm_gem_address_space
*aspace
, uint64_t *iova
)
506 ret
= get_iova_locked(obj
, aspace
, iova
, 0, U64_MAX
);
512 /* get iova without taking a reference, used in places where you have
513 * already done a 'msm_gem_get_and_pin_iova' or 'msm_gem_get_iova'
515 uint64_t msm_gem_iova(struct drm_gem_object
*obj
,
516 struct msm_gem_address_space
*aspace
)
518 struct msm_gem_vma
*vma
;
521 vma
= lookup_vma(obj
, aspace
);
525 return vma
? vma
->iova
: 0;
529 * Locked variant of msm_gem_unpin_iova()
531 void msm_gem_unpin_iova_locked(struct drm_gem_object
*obj
,
532 struct msm_gem_address_space
*aspace
)
534 struct msm_gem_object
*msm_obj
= to_msm_bo(obj
);
535 struct msm_gem_vma
*vma
;
537 GEM_WARN_ON(!msm_gem_is_locked(obj
));
539 vma
= lookup_vma(obj
, aspace
);
541 if (!GEM_WARN_ON(!vma
)) {
542 msm_gem_unmap_vma(aspace
, vma
);
544 msm_obj
->pin_count
--;
545 GEM_WARN_ON(msm_obj
->pin_count
< 0);
547 update_inactive(msm_obj
);
552 * Unpin a iova by updating the reference counts. The memory isn't actually
553 * purged until something else (shrinker, mm_notifier, destroy, etc) decides
556 void msm_gem_unpin_iova(struct drm_gem_object
*obj
,
557 struct msm_gem_address_space
*aspace
)
560 msm_gem_unpin_iova_locked(obj
, aspace
);
564 int msm_gem_dumb_create(struct drm_file
*file
, struct drm_device
*dev
,
565 struct drm_mode_create_dumb
*args
)
567 args
->pitch
= align_pitch(args
->width
, args
->bpp
);
568 args
->size
= PAGE_ALIGN(args
->pitch
* args
->height
);
569 return msm_gem_new_handle(dev
, file
, args
->size
,
570 MSM_BO_SCANOUT
| MSM_BO_WC
, &args
->handle
, "dumb");
573 int msm_gem_dumb_map_offset(struct drm_file
*file
, struct drm_device
*dev
,
574 uint32_t handle
, uint64_t *offset
)
576 struct drm_gem_object
*obj
;
579 /* GEM does all our handle to object mapping */
580 obj
= drm_gem_object_lookup(file
, handle
);
586 *offset
= msm_gem_mmap_offset(obj
);
588 drm_gem_object_put(obj
);
594 static void *get_vaddr(struct drm_gem_object
*obj
, unsigned madv
)
596 struct msm_gem_object
*msm_obj
= to_msm_bo(obj
);
599 GEM_WARN_ON(!msm_gem_is_locked(obj
));
601 if (obj
->import_attach
)
602 return ERR_PTR(-ENODEV
);
604 if (GEM_WARN_ON(msm_obj
->madv
> madv
)) {
605 DRM_DEV_ERROR(obj
->dev
->dev
, "Invalid madv state: %u vs %u\n",
606 msm_obj
->madv
, madv
);
607 return ERR_PTR(-EBUSY
);
610 /* increment vmap_count *before* vmap() call, so shrinker can
611 * check vmap_count (is_vunmapable()) outside of msm_obj lock.
612 * This guarantees that we won't try to msm_gem_vunmap() this
613 * same object from within the vmap() call (while we already
616 msm_obj
->vmap_count
++;
618 if (!msm_obj
->vaddr
) {
619 struct page
**pages
= get_pages(obj
);
621 ret
= PTR_ERR(pages
);
624 msm_obj
->vaddr
= vmap(pages
, obj
->size
>> PAGE_SHIFT
,
625 VM_MAP
, msm_gem_pgprot(msm_obj
, PAGE_KERNEL
));
626 if (msm_obj
->vaddr
== NULL
) {
631 update_inactive(msm_obj
);
634 return msm_obj
->vaddr
;
637 msm_obj
->vmap_count
--;
641 void *msm_gem_get_vaddr_locked(struct drm_gem_object
*obj
)
643 return get_vaddr(obj
, MSM_MADV_WILLNEED
);
646 void *msm_gem_get_vaddr(struct drm_gem_object
*obj
)
651 ret
= msm_gem_get_vaddr_locked(obj
);
658 * Don't use this! It is for the very special case of dumping
659 * submits from GPU hangs or faults, were the bo may already
660 * be MSM_MADV_DONTNEED, but we know the buffer is still on the
663 void *msm_gem_get_vaddr_active(struct drm_gem_object
*obj
)
665 return get_vaddr(obj
, __MSM_MADV_PURGED
);
668 void msm_gem_put_vaddr_locked(struct drm_gem_object
*obj
)
670 struct msm_gem_object
*msm_obj
= to_msm_bo(obj
);
672 GEM_WARN_ON(!msm_gem_is_locked(obj
));
673 GEM_WARN_ON(msm_obj
->vmap_count
< 1);
675 msm_obj
->vmap_count
--;
678 void msm_gem_put_vaddr(struct drm_gem_object
*obj
)
681 msm_gem_put_vaddr_locked(obj
);
685 /* Update madvise status, returns true if not purged, else
688 int msm_gem_madvise(struct drm_gem_object
*obj
, unsigned madv
)
690 struct msm_gem_object
*msm_obj
= to_msm_bo(obj
);
694 if (msm_obj
->madv
!= __MSM_MADV_PURGED
)
695 msm_obj
->madv
= madv
;
697 madv
= msm_obj
->madv
;
699 /* If the obj is inactive, we might need to move it
700 * between inactive lists
702 if (msm_obj
->active_count
== 0)
703 update_inactive(msm_obj
);
707 return (madv
!= __MSM_MADV_PURGED
);
710 void msm_gem_purge(struct drm_gem_object
*obj
)
712 struct drm_device
*dev
= obj
->dev
;
713 struct msm_gem_object
*msm_obj
= to_msm_bo(obj
);
715 GEM_WARN_ON(!msm_gem_is_locked(obj
));
716 GEM_WARN_ON(!is_purgeable(msm_obj
));
718 /* Get rid of any iommu mapping(s): */
719 put_iova_spaces(obj
, true);
723 drm_vma_node_unmap(&obj
->vma_node
, dev
->anon_inode
->i_mapping
);
729 msm_obj
->madv
= __MSM_MADV_PURGED
;
730 update_inactive(msm_obj
);
732 drm_gem_free_mmap_offset(obj
);
734 /* Our goal here is to return as much of the memory as
735 * is possible back to the system as we are called from OOM.
736 * To do this we must instruct the shmfs to drop all of its
737 * backing pages, *now*.
739 shmem_truncate_range(file_inode(obj
->filp
), 0, (loff_t
)-1);
741 invalidate_mapping_pages(file_inode(obj
->filp
)->i_mapping
,
746 * Unpin the backing pages and make them available to be swapped out.
748 void msm_gem_evict(struct drm_gem_object
*obj
)
750 struct drm_device
*dev
= obj
->dev
;
751 struct msm_gem_object
*msm_obj
= to_msm_bo(obj
);
753 GEM_WARN_ON(!msm_gem_is_locked(obj
));
754 GEM_WARN_ON(is_unevictable(msm_obj
));
755 GEM_WARN_ON(!msm_obj
->evictable
);
756 GEM_WARN_ON(msm_obj
->active_count
);
758 /* Get rid of any iommu mapping(s): */
759 put_iova_spaces(obj
, false);
761 drm_vma_node_unmap(&obj
->vma_node
, dev
->anon_inode
->i_mapping
);
765 update_inactive(msm_obj
);
768 void msm_gem_vunmap(struct drm_gem_object
*obj
)
770 struct msm_gem_object
*msm_obj
= to_msm_bo(obj
);
772 GEM_WARN_ON(!msm_gem_is_locked(obj
));
774 if (!msm_obj
->vaddr
|| GEM_WARN_ON(!is_vunmapable(msm_obj
)))
777 vunmap(msm_obj
->vaddr
);
778 msm_obj
->vaddr
= NULL
;
781 void msm_gem_active_get(struct drm_gem_object
*obj
, struct msm_gpu
*gpu
)
783 struct msm_gem_object
*msm_obj
= to_msm_bo(obj
);
784 struct msm_drm_private
*priv
= obj
->dev
->dev_private
;
787 GEM_WARN_ON(!msm_gem_is_locked(obj
));
788 GEM_WARN_ON(msm_obj
->madv
!= MSM_MADV_WILLNEED
);
789 GEM_WARN_ON(msm_obj
->dontneed
);
791 if (msm_obj
->active_count
++ == 0) {
792 mutex_lock(&priv
->mm_lock
);
793 if (msm_obj
->evictable
)
794 mark_unevictable(msm_obj
);
795 list_move_tail(&msm_obj
->mm_list
, &gpu
->active_list
);
796 mutex_unlock(&priv
->mm_lock
);
800 void msm_gem_active_put(struct drm_gem_object
*obj
)
802 struct msm_gem_object
*msm_obj
= to_msm_bo(obj
);
805 GEM_WARN_ON(!msm_gem_is_locked(obj
));
807 if (--msm_obj
->active_count
== 0) {
808 update_inactive(msm_obj
);
812 static void update_inactive(struct msm_gem_object
*msm_obj
)
814 struct msm_drm_private
*priv
= msm_obj
->base
.dev
->dev_private
;
816 GEM_WARN_ON(!msm_gem_is_locked(&msm_obj
->base
));
818 if (msm_obj
->active_count
!= 0)
821 mutex_lock(&priv
->mm_lock
);
823 if (msm_obj
->dontneed
)
824 mark_unpurgeable(msm_obj
);
825 if (msm_obj
->evictable
)
826 mark_unevictable(msm_obj
);
828 list_del(&msm_obj
->mm_list
);
829 if ((msm_obj
->madv
== MSM_MADV_WILLNEED
) && msm_obj
->sgt
) {
830 list_add_tail(&msm_obj
->mm_list
, &priv
->inactive_willneed
);
831 mark_evictable(msm_obj
);
832 } else if (msm_obj
->madv
== MSM_MADV_DONTNEED
) {
833 list_add_tail(&msm_obj
->mm_list
, &priv
->inactive_dontneed
);
834 mark_purgeable(msm_obj
);
836 GEM_WARN_ON((msm_obj
->madv
!= __MSM_MADV_PURGED
) && msm_obj
->sgt
);
837 list_add_tail(&msm_obj
->mm_list
, &priv
->inactive_unpinned
);
840 mutex_unlock(&priv
->mm_lock
);
843 int msm_gem_cpu_prep(struct drm_gem_object
*obj
, uint32_t op
, ktime_t
*timeout
)
845 bool write
= !!(op
& MSM_PREP_WRITE
);
846 unsigned long remain
=
847 op
& MSM_PREP_NOSYNC
? 0 : timeout_to_jiffies(timeout
);
850 ret
= dma_resv_wait_timeout(obj
->resv
, write
, true, remain
);
852 return remain
== 0 ? -EBUSY
: -ETIMEDOUT
;
856 /* TODO cache maintenance */
861 int msm_gem_cpu_fini(struct drm_gem_object
*obj
)
863 /* TODO cache maintenance */
867 #ifdef CONFIG_DEBUG_FS
868 static void describe_fence(struct dma_fence
*fence
, const char *type
,
871 if (!dma_fence_is_signaled(fence
))
872 seq_printf(m
, "\t%9s: %s %s seq %llu\n", type
,
873 fence
->ops
->get_driver_name(fence
),
874 fence
->ops
->get_timeline_name(fence
),
878 void msm_gem_describe(struct drm_gem_object
*obj
, struct seq_file
*m
,
879 struct msm_gem_stats
*stats
)
881 struct msm_gem_object
*msm_obj
= to_msm_bo(obj
);
882 struct dma_resv
*robj
= obj
->resv
;
883 struct dma_resv_list
*fobj
;
884 struct dma_fence
*fence
;
885 struct msm_gem_vma
*vma
;
886 uint64_t off
= drm_vma_node_start(&obj
->vma_node
);
892 stats
->all
.size
+= obj
->size
;
894 if (is_active(msm_obj
)) {
895 stats
->active
.count
++;
896 stats
->active
.size
+= obj
->size
;
899 if (msm_obj
->pages
) {
900 stats
->resident
.count
++;
901 stats
->resident
.size
+= obj
->size
;
904 switch (msm_obj
->madv
) {
905 case __MSM_MADV_PURGED
:
906 stats
->purged
.count
++;
907 stats
->purged
.size
+= obj
->size
;
910 case MSM_MADV_DONTNEED
:
911 stats
->purgeable
.count
++;
912 stats
->purgeable
.size
+= obj
->size
;
915 case MSM_MADV_WILLNEED
:
921 seq_printf(m
, "%08x: %c %2d (%2d) %08llx %p",
922 msm_obj
->flags
, is_active(msm_obj
) ? 'A' : 'I',
923 obj
->name
, kref_read(&obj
->refcount
),
924 off
, msm_obj
->vaddr
);
926 seq_printf(m
, " %08zu %9s %-32s\n", obj
->size
, madv
, msm_obj
->name
);
928 if (!list_empty(&msm_obj
->vmas
)) {
930 seq_puts(m
, " vmas:");
932 list_for_each_entry(vma
, &msm_obj
->vmas
, list
) {
933 const char *name
, *comm
;
935 struct msm_gem_address_space
*aspace
= vma
->aspace
;
936 struct task_struct
*task
=
937 get_pid_task(aspace
->pid
, PIDTYPE_PID
);
939 comm
= kstrdup(task
->comm
, GFP_KERNEL
);
947 seq_printf(m
, " [%s%s%s: aspace=%p, %08llx,%s,inuse=%d]",
948 name
, comm
? ":" : "", comm
? comm
: "",
949 vma
->aspace
, vma
->iova
,
950 vma
->mapped
? "mapped" : "unmapped",
959 fobj
= dma_resv_shared_list(robj
);
961 unsigned int i
, shared_count
= fobj
->shared_count
;
963 for (i
= 0; i
< shared_count
; i
++) {
964 fence
= rcu_dereference(fobj
->shared
[i
]);
965 describe_fence(fence
, "Shared", m
);
969 fence
= dma_resv_excl_fence(robj
);
971 describe_fence(fence
, "Exclusive", m
);
977 void msm_gem_describe_objects(struct list_head
*list
, struct seq_file
*m
)
979 struct msm_gem_stats stats
= {};
980 struct msm_gem_object
*msm_obj
;
982 seq_puts(m
, " flags id ref offset kaddr size madv name\n");
983 list_for_each_entry(msm_obj
, list
, node
) {
984 struct drm_gem_object
*obj
= &msm_obj
->base
;
986 msm_gem_describe(obj
, m
, &stats
);
989 seq_printf(m
, "Total: %4d objects, %9zu bytes\n",
990 stats
.all
.count
, stats
.all
.size
);
991 seq_printf(m
, "Active: %4d objects, %9zu bytes\n",
992 stats
.active
.count
, stats
.active
.size
);
993 seq_printf(m
, "Resident: %4d objects, %9zu bytes\n",
994 stats
.resident
.count
, stats
.resident
.size
);
995 seq_printf(m
, "Purgeable: %4d objects, %9zu bytes\n",
996 stats
.purgeable
.count
, stats
.purgeable
.size
);
997 seq_printf(m
, "Purged: %4d objects, %9zu bytes\n",
998 stats
.purged
.count
, stats
.purged
.size
);
1002 /* don't call directly! Use drm_gem_object_put() */
1003 void msm_gem_free_object(struct drm_gem_object
*obj
)
1005 struct msm_gem_object
*msm_obj
= to_msm_bo(obj
);
1006 struct drm_device
*dev
= obj
->dev
;
1007 struct msm_drm_private
*priv
= dev
->dev_private
;
1009 mutex_lock(&priv
->obj_lock
);
1010 list_del(&msm_obj
->node
);
1011 mutex_unlock(&priv
->obj_lock
);
1013 mutex_lock(&priv
->mm_lock
);
1014 if (msm_obj
->dontneed
)
1015 mark_unpurgeable(msm_obj
);
1016 list_del(&msm_obj
->mm_list
);
1017 mutex_unlock(&priv
->mm_lock
);
1021 /* object should not be on active list: */
1022 GEM_WARN_ON(is_active(msm_obj
));
1024 put_iova_spaces(obj
, true);
1026 if (obj
->import_attach
) {
1027 GEM_WARN_ON(msm_obj
->vaddr
);
1029 /* Don't drop the pages for imported dmabuf, as they are not
1030 * ours, just free the array we allocated:
1032 kvfree(msm_obj
->pages
);
1036 /* dma_buf_detach() grabs resv lock, so we need to unlock
1037 * prior to drm_prime_gem_destroy
1039 msm_gem_unlock(obj
);
1041 drm_prime_gem_destroy(obj
, msm_obj
->sgt
);
1043 msm_gem_vunmap(obj
);
1046 msm_gem_unlock(obj
);
1049 drm_gem_object_release(obj
);
1054 static int msm_gem_object_mmap(struct drm_gem_object
*obj
, struct vm_area_struct
*vma
)
1056 struct msm_gem_object
*msm_obj
= to_msm_bo(obj
);
1058 vma
->vm_flags
&= ~VM_PFNMAP
;
1059 vma
->vm_flags
|= VM_MIXEDMAP
| VM_DONTEXPAND
;
1060 vma
->vm_page_prot
= msm_gem_pgprot(msm_obj
, vm_get_page_prot(vma
->vm_flags
));
1065 /* convenience method to construct a GEM buffer object, and userspace handle */
1066 int msm_gem_new_handle(struct drm_device
*dev
, struct drm_file
*file
,
1067 uint32_t size
, uint32_t flags
, uint32_t *handle
,
1070 struct drm_gem_object
*obj
;
1073 obj
= msm_gem_new(dev
, size
, flags
);
1076 return PTR_ERR(obj
);
1079 msm_gem_object_set_name(obj
, "%s", name
);
1081 ret
= drm_gem_handle_create(file
, obj
, handle
);
1083 /* drop reference from allocate - handle holds it now */
1084 drm_gem_object_put(obj
);
1089 static const struct vm_operations_struct vm_ops
= {
1090 .fault
= msm_gem_fault
,
1091 .open
= drm_gem_vm_open
,
1092 .close
= drm_gem_vm_close
,
1095 static const struct drm_gem_object_funcs msm_gem_object_funcs
= {
1096 .free
= msm_gem_free_object
,
1097 .pin
= msm_gem_prime_pin
,
1098 .unpin
= msm_gem_prime_unpin
,
1099 .get_sg_table
= msm_gem_prime_get_sg_table
,
1100 .vmap
= msm_gem_prime_vmap
,
1101 .vunmap
= msm_gem_prime_vunmap
,
1102 .mmap
= msm_gem_object_mmap
,
1106 static int msm_gem_new_impl(struct drm_device
*dev
,
1107 uint32_t size
, uint32_t flags
,
1108 struct drm_gem_object
**obj
)
1110 struct msm_drm_private
*priv
= dev
->dev_private
;
1111 struct msm_gem_object
*msm_obj
;
1113 switch (flags
& MSM_BO_CACHE_MASK
) {
1114 case MSM_BO_UNCACHED
:
1118 case MSM_BO_CACHED_COHERENT
:
1119 if (priv
->has_cached_coherent
)
1123 DRM_DEV_ERROR(dev
->dev
, "invalid cache flag: %x\n",
1124 (flags
& MSM_BO_CACHE_MASK
));
1128 msm_obj
= kzalloc(sizeof(*msm_obj
), GFP_KERNEL
);
1132 msm_obj
->flags
= flags
;
1133 msm_obj
->madv
= MSM_MADV_WILLNEED
;
1135 INIT_LIST_HEAD(&msm_obj
->vmas
);
1137 *obj
= &msm_obj
->base
;
1138 (*obj
)->funcs
= &msm_gem_object_funcs
;
1143 struct drm_gem_object
*msm_gem_new(struct drm_device
*dev
, uint32_t size
, uint32_t flags
)
1145 struct msm_drm_private
*priv
= dev
->dev_private
;
1146 struct msm_gem_object
*msm_obj
;
1147 struct drm_gem_object
*obj
= NULL
;
1148 bool use_vram
= false;
1151 size
= PAGE_ALIGN(size
);
1153 if (!msm_use_mmu(dev
))
1155 else if ((flags
& (MSM_BO_STOLEN
| MSM_BO_SCANOUT
)) && priv
->vram
.size
)
1158 if (GEM_WARN_ON(use_vram
&& !priv
->vram
.size
))
1159 return ERR_PTR(-EINVAL
);
1161 /* Disallow zero sized objects as they make the underlying
1162 * infrastructure grumpy
1165 return ERR_PTR(-EINVAL
);
1167 ret
= msm_gem_new_impl(dev
, size
, flags
, &obj
);
1171 msm_obj
= to_msm_bo(obj
);
1174 struct msm_gem_vma
*vma
;
1175 struct page
**pages
;
1177 drm_gem_private_object_init(dev
, obj
, size
);
1181 vma
= add_vma(obj
, NULL
);
1182 msm_gem_unlock(obj
);
1188 to_msm_bo(obj
)->vram_node
= &vma
->node
;
1190 /* Call chain get_pages() -> update_inactive() tries to
1191 * access msm_obj->mm_list, but it is not initialized yet.
1192 * To avoid NULL pointer dereference error, initialize
1193 * mm_list to be empty.
1195 INIT_LIST_HEAD(&msm_obj
->mm_list
);
1198 pages
= get_pages(obj
);
1199 msm_gem_unlock(obj
);
1200 if (IS_ERR(pages
)) {
1201 ret
= PTR_ERR(pages
);
1205 vma
->iova
= physaddr(obj
);
1207 ret
= drm_gem_object_init(dev
, obj
, size
);
1211 * Our buffers are kept pinned, so allocating them from the
1212 * MOVABLE zone is a really bad idea, and conflicts with CMA.
1213 * See comments above new_inode() why this is required _and_
1214 * expected if you're going to pin these pages.
1216 mapping_set_gfp_mask(obj
->filp
->f_mapping
, GFP_HIGHUSER
);
1219 mutex_lock(&priv
->mm_lock
);
1220 list_add_tail(&msm_obj
->mm_list
, &priv
->inactive_unpinned
);
1221 mutex_unlock(&priv
->mm_lock
);
1223 mutex_lock(&priv
->obj_lock
);
1224 list_add_tail(&msm_obj
->node
, &priv
->objects
);
1225 mutex_unlock(&priv
->obj_lock
);
1230 drm_gem_object_put(obj
);
1231 return ERR_PTR(ret
);
1234 struct drm_gem_object
*msm_gem_import(struct drm_device
*dev
,
1235 struct dma_buf
*dmabuf
, struct sg_table
*sgt
)
1237 struct msm_drm_private
*priv
= dev
->dev_private
;
1238 struct msm_gem_object
*msm_obj
;
1239 struct drm_gem_object
*obj
;
1243 /* if we don't have IOMMU, don't bother pretending we can import: */
1244 if (!msm_use_mmu(dev
)) {
1245 DRM_DEV_ERROR(dev
->dev
, "cannot import without IOMMU\n");
1246 return ERR_PTR(-EINVAL
);
1249 size
= PAGE_ALIGN(dmabuf
->size
);
1251 ret
= msm_gem_new_impl(dev
, size
, MSM_BO_WC
, &obj
);
1255 drm_gem_private_object_init(dev
, obj
, size
);
1257 npages
= size
/ PAGE_SIZE
;
1259 msm_obj
= to_msm_bo(obj
);
1262 msm_obj
->pages
= kvmalloc_array(npages
, sizeof(struct page
*), GFP_KERNEL
);
1263 if (!msm_obj
->pages
) {
1264 msm_gem_unlock(obj
);
1269 ret
= drm_prime_sg_to_page_array(sgt
, msm_obj
->pages
, npages
);
1271 msm_gem_unlock(obj
);
1275 msm_gem_unlock(obj
);
1277 mutex_lock(&priv
->mm_lock
);
1278 list_add_tail(&msm_obj
->mm_list
, &priv
->inactive_unpinned
);
1279 mutex_unlock(&priv
->mm_lock
);
1281 mutex_lock(&priv
->obj_lock
);
1282 list_add_tail(&msm_obj
->node
, &priv
->objects
);
1283 mutex_unlock(&priv
->obj_lock
);
1288 drm_gem_object_put(obj
);
1289 return ERR_PTR(ret
);
1292 void *msm_gem_kernel_new(struct drm_device
*dev
, uint32_t size
,
1293 uint32_t flags
, struct msm_gem_address_space
*aspace
,
1294 struct drm_gem_object
**bo
, uint64_t *iova
)
1297 struct drm_gem_object
*obj
= msm_gem_new(dev
, size
, flags
);
1301 return ERR_CAST(obj
);
1304 ret
= msm_gem_get_and_pin_iova(obj
, aspace
, iova
);
1309 vaddr
= msm_gem_get_vaddr(obj
);
1310 if (IS_ERR(vaddr
)) {
1311 msm_gem_unpin_iova(obj
, aspace
);
1312 ret
= PTR_ERR(vaddr
);
1321 drm_gem_object_put(obj
);
1323 return ERR_PTR(ret
);
1327 void msm_gem_kernel_put(struct drm_gem_object
*bo
,
1328 struct msm_gem_address_space
*aspace
)
1330 if (IS_ERR_OR_NULL(bo
))
1333 msm_gem_put_vaddr(bo
);
1334 msm_gem_unpin_iova(bo
, aspace
);
1335 drm_gem_object_put(bo
);
1338 void msm_gem_object_set_name(struct drm_gem_object
*bo
, const char *fmt
, ...)
1340 struct msm_gem_object
*msm_obj
= to_msm_bo(bo
);
1347 vsnprintf(msm_obj
->name
, sizeof(msm_obj
->name
), fmt
, ap
);