]>
git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/gpu/drm/msm/msm_gem.c
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
18 #include <linux/spinlock.h>
19 #include <linux/shmem_fs.h>
20 #include <linux/dma-buf.h>
21 #include <linux/pfn_t.h>
24 #include "msm_fence.h"
29 static void msm_gem_vunmap_locked(struct drm_gem_object
*obj
);
32 static dma_addr_t
physaddr(struct drm_gem_object
*obj
)
34 struct msm_gem_object
*msm_obj
= to_msm_bo(obj
);
35 struct msm_drm_private
*priv
= obj
->dev
->dev_private
;
36 return (((dma_addr_t
)msm_obj
->vram_node
->start
) << PAGE_SHIFT
) +
40 static bool use_pages(struct drm_gem_object
*obj
)
42 struct msm_gem_object
*msm_obj
= to_msm_bo(obj
);
43 return !msm_obj
->vram_node
;
46 /* allocate pages from VRAM carveout, used when no IOMMU: */
47 static struct page
**get_pages_vram(struct drm_gem_object
*obj
, int npages
)
49 struct msm_gem_object
*msm_obj
= to_msm_bo(obj
);
50 struct msm_drm_private
*priv
= obj
->dev
->dev_private
;
55 p
= kvmalloc_array(npages
, sizeof(struct page
*), GFP_KERNEL
);
57 return ERR_PTR(-ENOMEM
);
59 spin_lock(&priv
->vram
.lock
);
60 ret
= drm_mm_insert_node(&priv
->vram
.mm
, msm_obj
->vram_node
, npages
);
61 spin_unlock(&priv
->vram
.lock
);
67 paddr
= physaddr(obj
);
68 for (i
= 0; i
< npages
; i
++) {
69 p
[i
] = phys_to_page(paddr
);
76 static struct page
**get_pages(struct drm_gem_object
*obj
)
78 struct msm_gem_object
*msm_obj
= to_msm_bo(obj
);
80 if (!msm_obj
->pages
) {
81 struct drm_device
*dev
= obj
->dev
;
83 int npages
= obj
->size
>> PAGE_SHIFT
;
86 p
= drm_gem_get_pages(obj
);
88 p
= get_pages_vram(obj
, npages
);
91 dev_err(dev
->dev
, "could not get pages: %ld\n",
96 msm_obj
->sgt
= drm_prime_pages_to_sg(p
, npages
);
97 if (IS_ERR(msm_obj
->sgt
)) {
98 dev_err(dev
->dev
, "failed to allocate sgt\n");
99 return ERR_CAST(msm_obj
->sgt
);
104 /* For non-cached buffers, ensure the new pages are clean
105 * because display controller, GPU, etc. are not coherent:
107 if (msm_obj
->flags
& (MSM_BO_WC
|MSM_BO_UNCACHED
))
108 dma_map_sg(dev
->dev
, msm_obj
->sgt
->sgl
,
109 msm_obj
->sgt
->nents
, DMA_BIDIRECTIONAL
);
112 return msm_obj
->pages
;
115 static void put_pages_vram(struct drm_gem_object
*obj
)
117 struct msm_gem_object
*msm_obj
= to_msm_bo(obj
);
118 struct msm_drm_private
*priv
= obj
->dev
->dev_private
;
120 spin_lock(&priv
->vram
.lock
);
121 drm_mm_remove_node(msm_obj
->vram_node
);
122 spin_unlock(&priv
->vram
.lock
);
124 kvfree(msm_obj
->pages
);
127 static void put_pages(struct drm_gem_object
*obj
)
129 struct msm_gem_object
*msm_obj
= to_msm_bo(obj
);
131 if (msm_obj
->pages
) {
132 /* For non-cached buffers, ensure the new pages are clean
133 * because display controller, GPU, etc. are not coherent:
135 if (msm_obj
->flags
& (MSM_BO_WC
|MSM_BO_UNCACHED
))
136 dma_unmap_sg(obj
->dev
->dev
, msm_obj
->sgt
->sgl
,
137 msm_obj
->sgt
->nents
, DMA_BIDIRECTIONAL
);
138 sg_free_table(msm_obj
->sgt
);
142 drm_gem_put_pages(obj
, msm_obj
->pages
, true, false);
146 msm_obj
->pages
= NULL
;
150 struct page
**msm_gem_get_pages(struct drm_gem_object
*obj
)
152 struct msm_gem_object
*msm_obj
= to_msm_bo(obj
);
155 mutex_lock(&msm_obj
->lock
);
157 if (WARN_ON(msm_obj
->madv
!= MSM_MADV_WILLNEED
)) {
158 mutex_unlock(&msm_obj
->lock
);
159 return ERR_PTR(-EBUSY
);
163 mutex_unlock(&msm_obj
->lock
);
167 void msm_gem_put_pages(struct drm_gem_object
*obj
)
169 /* when we start tracking the pin count, then do something here */
172 int msm_gem_mmap_obj(struct drm_gem_object
*obj
,
173 struct vm_area_struct
*vma
)
175 struct msm_gem_object
*msm_obj
= to_msm_bo(obj
);
177 vma
->vm_flags
&= ~VM_PFNMAP
;
178 vma
->vm_flags
|= VM_MIXEDMAP
;
180 if (msm_obj
->flags
& MSM_BO_WC
) {
181 vma
->vm_page_prot
= pgprot_writecombine(vm_get_page_prot(vma
->vm_flags
));
182 } else if (msm_obj
->flags
& MSM_BO_UNCACHED
) {
183 vma
->vm_page_prot
= pgprot_noncached(vm_get_page_prot(vma
->vm_flags
));
186 * Shunt off cached objs to shmem file so they have their own
187 * address_space (so unmap_mapping_range does what we want,
188 * in particular in the case of mmap'd dmabufs)
193 vma
->vm_file
= obj
->filp
;
195 vma
->vm_page_prot
= vm_get_page_prot(vma
->vm_flags
);
201 int msm_gem_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
205 ret
= drm_gem_mmap(filp
, vma
);
207 DBG("mmap failed: %d", ret
);
211 return msm_gem_mmap_obj(vma
->vm_private_data
, vma
);
214 int msm_gem_fault(struct vm_fault
*vmf
)
216 struct vm_area_struct
*vma
= vmf
->vma
;
217 struct drm_gem_object
*obj
= vma
->vm_private_data
;
218 struct msm_gem_object
*msm_obj
= to_msm_bo(obj
);
225 * vm_ops.open/drm_gem_mmap_obj and close get and put
226 * a reference on obj. So, we dont need to hold one here.
228 ret
= mutex_lock_interruptible(&msm_obj
->lock
);
232 if (WARN_ON(msm_obj
->madv
!= MSM_MADV_WILLNEED
)) {
233 mutex_unlock(&msm_obj
->lock
);
234 return VM_FAULT_SIGBUS
;
237 /* make sure we have pages attached now */
238 pages
= get_pages(obj
);
240 ret
= PTR_ERR(pages
);
244 /* We don't use vmf->pgoff since that has the fake offset: */
245 pgoff
= (vmf
->address
- vma
->vm_start
) >> PAGE_SHIFT
;
247 pfn
= page_to_pfn(pages
[pgoff
]);
249 VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf
->address
,
250 pfn
, pfn
<< PAGE_SHIFT
);
252 ret
= vm_insert_mixed(vma
, vmf
->address
, __pfn_to_pfn_t(pfn
, PFN_DEV
));
255 mutex_unlock(&msm_obj
->lock
);
264 * EBUSY is ok: this just means that another thread
265 * already did the job.
267 return VM_FAULT_NOPAGE
;
271 return VM_FAULT_SIGBUS
;
275 /** get mmap offset */
276 static uint64_t mmap_offset(struct drm_gem_object
*obj
)
278 struct drm_device
*dev
= obj
->dev
;
279 struct msm_gem_object
*msm_obj
= to_msm_bo(obj
);
282 WARN_ON(!mutex_is_locked(&msm_obj
->lock
));
284 /* Make it mmapable */
285 ret
= drm_gem_create_mmap_offset(obj
);
288 dev_err(dev
->dev
, "could not allocate mmap offset\n");
292 return drm_vma_node_offset_addr(&obj
->vma_node
);
295 uint64_t msm_gem_mmap_offset(struct drm_gem_object
*obj
)
298 struct msm_gem_object
*msm_obj
= to_msm_bo(obj
);
300 mutex_lock(&msm_obj
->lock
);
301 offset
= mmap_offset(obj
);
302 mutex_unlock(&msm_obj
->lock
);
306 static struct msm_gem_vma
*add_vma(struct drm_gem_object
*obj
,
307 struct msm_gem_address_space
*aspace
)
309 struct msm_gem_object
*msm_obj
= to_msm_bo(obj
);
310 struct msm_gem_vma
*vma
;
312 WARN_ON(!mutex_is_locked(&msm_obj
->lock
));
314 vma
= kzalloc(sizeof(*vma
), GFP_KERNEL
);
316 return ERR_PTR(-ENOMEM
);
318 vma
->aspace
= aspace
;
320 list_add_tail(&vma
->list
, &msm_obj
->vmas
);
325 static struct msm_gem_vma
*lookup_vma(struct drm_gem_object
*obj
,
326 struct msm_gem_address_space
*aspace
)
328 struct msm_gem_object
*msm_obj
= to_msm_bo(obj
);
329 struct msm_gem_vma
*vma
;
331 WARN_ON(!mutex_is_locked(&msm_obj
->lock
));
333 list_for_each_entry(vma
, &msm_obj
->vmas
, list
) {
334 if (vma
->aspace
== aspace
)
341 static void del_vma(struct msm_gem_vma
*vma
)
346 list_del(&vma
->list
);
350 /* Called with msm_obj->lock locked */
352 put_iova(struct drm_gem_object
*obj
)
354 struct msm_gem_object
*msm_obj
= to_msm_bo(obj
);
355 struct msm_gem_vma
*vma
, *tmp
;
357 WARN_ON(!mutex_is_locked(&msm_obj
->lock
));
359 list_for_each_entry_safe(vma
, tmp
, &msm_obj
->vmas
, list
) {
360 msm_gem_unmap_vma(vma
->aspace
, vma
, msm_obj
->sgt
);
365 /* get iova, taking a reference. Should have a matching put */
366 int msm_gem_get_iova(struct drm_gem_object
*obj
,
367 struct msm_gem_address_space
*aspace
, uint64_t *iova
)
369 struct msm_gem_object
*msm_obj
= to_msm_bo(obj
);
370 struct msm_gem_vma
*vma
;
373 mutex_lock(&msm_obj
->lock
);
375 if (WARN_ON(msm_obj
->madv
!= MSM_MADV_WILLNEED
)) {
376 mutex_unlock(&msm_obj
->lock
);
380 vma
= lookup_vma(obj
, aspace
);
385 vma
= add_vma(obj
, aspace
);
389 pages
= get_pages(obj
);
391 ret
= PTR_ERR(pages
);
395 ret
= msm_gem_map_vma(aspace
, vma
, msm_obj
->sgt
,
396 obj
->size
>> PAGE_SHIFT
);
403 mutex_unlock(&msm_obj
->lock
);
409 mutex_unlock(&msm_obj
->lock
);
413 /* get iova without taking a reference, used in places where you have
414 * already done a 'msm_gem_get_iova()'.
416 uint64_t msm_gem_iova(struct drm_gem_object
*obj
,
417 struct msm_gem_address_space
*aspace
)
419 struct msm_gem_object
*msm_obj
= to_msm_bo(obj
);
420 struct msm_gem_vma
*vma
;
422 mutex_lock(&msm_obj
->lock
);
423 vma
= lookup_vma(obj
, aspace
);
424 mutex_unlock(&msm_obj
->lock
);
427 return vma
? vma
->iova
: 0;
430 void msm_gem_put_iova(struct drm_gem_object
*obj
,
431 struct msm_gem_address_space
*aspace
)
434 // NOTE: probably don't need a _locked() version.. we wouldn't
435 // normally unmap here, but instead just mark that it could be
436 // unmapped (if the iova refcnt drops to zero), but then later
437 // if another _get_iova_locked() fails we can start unmapping
438 // things that are no longer needed..
441 int msm_gem_dumb_create(struct drm_file
*file
, struct drm_device
*dev
,
442 struct drm_mode_create_dumb
*args
)
444 args
->pitch
= align_pitch(args
->width
, args
->bpp
);
445 args
->size
= PAGE_ALIGN(args
->pitch
* args
->height
);
446 return msm_gem_new_handle(dev
, file
, args
->size
,
447 MSM_BO_SCANOUT
| MSM_BO_WC
, &args
->handle
);
450 int msm_gem_dumb_map_offset(struct drm_file
*file
, struct drm_device
*dev
,
451 uint32_t handle
, uint64_t *offset
)
453 struct drm_gem_object
*obj
;
456 /* GEM does all our handle to object mapping */
457 obj
= drm_gem_object_lookup(file
, handle
);
463 *offset
= msm_gem_mmap_offset(obj
);
465 drm_gem_object_unreference_unlocked(obj
);
471 void *msm_gem_get_vaddr(struct drm_gem_object
*obj
)
473 struct msm_gem_object
*msm_obj
= to_msm_bo(obj
);
476 mutex_lock(&msm_obj
->lock
);
478 if (WARN_ON(msm_obj
->madv
!= MSM_MADV_WILLNEED
)) {
479 mutex_unlock(&msm_obj
->lock
);
480 return ERR_PTR(-EBUSY
);
483 /* increment vmap_count *before* vmap() call, so shrinker can
484 * check vmap_count (is_vunmapable()) outside of msm_obj->lock.
485 * This guarantees that we won't try to msm_gem_vunmap() this
486 * same object from within the vmap() call (while we already
487 * hold msm_obj->lock)
489 msm_obj
->vmap_count
++;
491 if (!msm_obj
->vaddr
) {
492 struct page
**pages
= get_pages(obj
);
494 ret
= PTR_ERR(pages
);
497 msm_obj
->vaddr
= vmap(pages
, obj
->size
>> PAGE_SHIFT
,
498 VM_MAP
, pgprot_writecombine(PAGE_KERNEL
));
499 if (msm_obj
->vaddr
== NULL
) {
505 mutex_unlock(&msm_obj
->lock
);
506 return msm_obj
->vaddr
;
509 msm_obj
->vmap_count
--;
510 mutex_unlock(&msm_obj
->lock
);
514 void msm_gem_put_vaddr(struct drm_gem_object
*obj
)
516 struct msm_gem_object
*msm_obj
= to_msm_bo(obj
);
518 mutex_lock(&msm_obj
->lock
);
519 WARN_ON(msm_obj
->vmap_count
< 1);
520 msm_obj
->vmap_count
--;
521 mutex_unlock(&msm_obj
->lock
);
524 /* Update madvise status, returns true if not purged, else
527 int msm_gem_madvise(struct drm_gem_object
*obj
, unsigned madv
)
529 struct msm_gem_object
*msm_obj
= to_msm_bo(obj
);
531 mutex_lock(&msm_obj
->lock
);
533 WARN_ON(!mutex_is_locked(&obj
->dev
->struct_mutex
));
535 if (msm_obj
->madv
!= __MSM_MADV_PURGED
)
536 msm_obj
->madv
= madv
;
538 madv
= msm_obj
->madv
;
540 mutex_unlock(&msm_obj
->lock
);
542 return (madv
!= __MSM_MADV_PURGED
);
545 void msm_gem_purge(struct drm_gem_object
*obj
, enum msm_gem_lock subclass
)
547 struct drm_device
*dev
= obj
->dev
;
548 struct msm_gem_object
*msm_obj
= to_msm_bo(obj
);
550 WARN_ON(!mutex_is_locked(&dev
->struct_mutex
));
551 WARN_ON(!is_purgeable(msm_obj
));
552 WARN_ON(obj
->import_attach
);
554 mutex_lock_nested(&msm_obj
->lock
, subclass
);
558 msm_gem_vunmap_locked(obj
);
562 msm_obj
->madv
= __MSM_MADV_PURGED
;
564 drm_vma_node_unmap(&obj
->vma_node
, dev
->anon_inode
->i_mapping
);
565 drm_gem_free_mmap_offset(obj
);
567 /* Our goal here is to return as much of the memory as
568 * is possible back to the system as we are called from OOM.
569 * To do this we must instruct the shmfs to drop all of its
570 * backing pages, *now*.
572 shmem_truncate_range(file_inode(obj
->filp
), 0, (loff_t
)-1);
574 invalidate_mapping_pages(file_inode(obj
->filp
)->i_mapping
,
577 mutex_unlock(&msm_obj
->lock
);
580 static void msm_gem_vunmap_locked(struct drm_gem_object
*obj
)
582 struct msm_gem_object
*msm_obj
= to_msm_bo(obj
);
584 WARN_ON(!mutex_is_locked(&msm_obj
->lock
));
586 if (!msm_obj
->vaddr
|| WARN_ON(!is_vunmapable(msm_obj
)))
589 vunmap(msm_obj
->vaddr
);
590 msm_obj
->vaddr
= NULL
;
593 void msm_gem_vunmap(struct drm_gem_object
*obj
, enum msm_gem_lock subclass
)
595 struct msm_gem_object
*msm_obj
= to_msm_bo(obj
);
597 mutex_lock_nested(&msm_obj
->lock
, subclass
);
598 msm_gem_vunmap_locked(obj
);
599 mutex_unlock(&msm_obj
->lock
);
602 /* must be called before _move_to_active().. */
603 int msm_gem_sync_object(struct drm_gem_object
*obj
,
604 struct msm_fence_context
*fctx
, bool exclusive
)
606 struct msm_gem_object
*msm_obj
= to_msm_bo(obj
);
607 struct reservation_object_list
*fobj
;
608 struct dma_fence
*fence
;
612 /* NOTE: _reserve_shared() must happen before _add_shared_fence(),
613 * which makes this a slightly strange place to call it. OTOH this
614 * is a convenient can-fail point to hook it in. (And similar to
615 * how etnaviv and nouveau handle this.)
617 ret
= reservation_object_reserve_shared(msm_obj
->resv
);
622 fobj
= reservation_object_get_list(msm_obj
->resv
);
623 if (!fobj
|| (fobj
->shared_count
== 0)) {
624 fence
= reservation_object_get_excl(msm_obj
->resv
);
625 /* don't need to wait on our own fences, since ring is fifo */
626 if (fence
&& (fence
->context
!= fctx
->context
)) {
627 ret
= dma_fence_wait(fence
, true);
633 if (!exclusive
|| !fobj
)
636 for (i
= 0; i
< fobj
->shared_count
; i
++) {
637 fence
= rcu_dereference_protected(fobj
->shared
[i
],
638 reservation_object_held(msm_obj
->resv
));
639 if (fence
->context
!= fctx
->context
) {
640 ret
= dma_fence_wait(fence
, true);
649 void msm_gem_move_to_active(struct drm_gem_object
*obj
,
650 struct msm_gpu
*gpu
, bool exclusive
, struct dma_fence
*fence
)
652 struct msm_gem_object
*msm_obj
= to_msm_bo(obj
);
653 WARN_ON(msm_obj
->madv
!= MSM_MADV_WILLNEED
);
656 reservation_object_add_excl_fence(msm_obj
->resv
, fence
);
658 reservation_object_add_shared_fence(msm_obj
->resv
, fence
);
659 list_del_init(&msm_obj
->mm_list
);
660 list_add_tail(&msm_obj
->mm_list
, &gpu
->active_list
);
663 void msm_gem_move_to_inactive(struct drm_gem_object
*obj
)
665 struct drm_device
*dev
= obj
->dev
;
666 struct msm_drm_private
*priv
= dev
->dev_private
;
667 struct msm_gem_object
*msm_obj
= to_msm_bo(obj
);
669 WARN_ON(!mutex_is_locked(&dev
->struct_mutex
));
672 list_del_init(&msm_obj
->mm_list
);
673 list_add_tail(&msm_obj
->mm_list
, &priv
->inactive_list
);
676 int msm_gem_cpu_prep(struct drm_gem_object
*obj
, uint32_t op
, ktime_t
*timeout
)
678 struct msm_gem_object
*msm_obj
= to_msm_bo(obj
);
679 bool write
= !!(op
& MSM_PREP_WRITE
);
680 unsigned long remain
=
681 op
& MSM_PREP_NOSYNC
? 0 : timeout_to_jiffies(timeout
);
684 ret
= reservation_object_wait_timeout_rcu(msm_obj
->resv
, write
,
687 return remain
== 0 ? -EBUSY
: -ETIMEDOUT
;
691 /* TODO cache maintenance */
696 int msm_gem_cpu_fini(struct drm_gem_object
*obj
)
698 /* TODO cache maintenance */
702 #ifdef CONFIG_DEBUG_FS
703 static void describe_fence(struct dma_fence
*fence
, const char *type
,
706 if (!dma_fence_is_signaled(fence
))
707 seq_printf(m
, "\t%9s: %s %s seq %u\n", type
,
708 fence
->ops
->get_driver_name(fence
),
709 fence
->ops
->get_timeline_name(fence
),
713 void msm_gem_describe(struct drm_gem_object
*obj
, struct seq_file
*m
)
715 struct msm_gem_object
*msm_obj
= to_msm_bo(obj
);
716 struct reservation_object
*robj
= msm_obj
->resv
;
717 struct reservation_object_list
*fobj
;
718 struct dma_fence
*fence
;
719 struct msm_gem_vma
*vma
;
720 uint64_t off
= drm_vma_node_start(&obj
->vma_node
);
723 mutex_lock(&msm_obj
->lock
);
725 switch (msm_obj
->madv
) {
726 case __MSM_MADV_PURGED
:
729 case MSM_MADV_DONTNEED
:
732 case MSM_MADV_WILLNEED
:
738 seq_printf(m
, "%08x: %c %2d (%2d) %08llx %p\t",
739 msm_obj
->flags
, is_active(msm_obj
) ? 'A' : 'I',
740 obj
->name
, kref_read(&obj
->refcount
),
741 off
, msm_obj
->vaddr
);
743 /* FIXME: we need to print the address space here too */
744 list_for_each_entry(vma
, &msm_obj
->vmas
, list
)
745 seq_printf(m
, " %08llx", vma
->iova
);
747 seq_printf(m
, " %zu%s\n", obj
->size
, madv
);
750 fobj
= rcu_dereference(robj
->fence
);
752 unsigned int i
, shared_count
= fobj
->shared_count
;
754 for (i
= 0; i
< shared_count
; i
++) {
755 fence
= rcu_dereference(fobj
->shared
[i
]);
756 describe_fence(fence
, "Shared", m
);
760 fence
= rcu_dereference(robj
->fence_excl
);
762 describe_fence(fence
, "Exclusive", m
);
765 mutex_unlock(&msm_obj
->lock
);
768 void msm_gem_describe_objects(struct list_head
*list
, struct seq_file
*m
)
770 struct msm_gem_object
*msm_obj
;
774 list_for_each_entry(msm_obj
, list
, mm_list
) {
775 struct drm_gem_object
*obj
= &msm_obj
->base
;
777 msm_gem_describe(obj
, m
);
782 seq_printf(m
, "Total %d objects, %zu bytes\n", count
, size
);
786 void msm_gem_free_object(struct drm_gem_object
*obj
)
788 struct drm_device
*dev
= obj
->dev
;
789 struct msm_gem_object
*msm_obj
= to_msm_bo(obj
);
791 WARN_ON(!mutex_is_locked(&dev
->struct_mutex
));
793 /* object should not be on active list: */
794 WARN_ON(is_active(msm_obj
));
796 list_del(&msm_obj
->mm_list
);
798 mutex_lock(&msm_obj
->lock
);
802 if (obj
->import_attach
) {
804 dma_buf_vunmap(obj
->import_attach
->dmabuf
, msm_obj
->vaddr
);
806 /* Don't drop the pages for imported dmabuf, as they are not
807 * ours, just free the array we allocated:
810 kvfree(msm_obj
->pages
);
812 drm_prime_gem_destroy(obj
, msm_obj
->sgt
);
814 msm_gem_vunmap_locked(obj
);
818 if (msm_obj
->resv
== &msm_obj
->_resv
)
819 reservation_object_fini(msm_obj
->resv
);
821 drm_gem_object_release(obj
);
823 mutex_unlock(&msm_obj
->lock
);
827 /* convenience method to construct a GEM buffer object, and userspace handle */
828 int msm_gem_new_handle(struct drm_device
*dev
, struct drm_file
*file
,
829 uint32_t size
, uint32_t flags
, uint32_t *handle
)
831 struct drm_gem_object
*obj
;
834 obj
= msm_gem_new(dev
, size
, flags
);
839 ret
= drm_gem_handle_create(file
, obj
, handle
);
841 /* drop reference from allocate - handle holds it now */
842 drm_gem_object_unreference_unlocked(obj
);
847 static int msm_gem_new_impl(struct drm_device
*dev
,
848 uint32_t size
, uint32_t flags
,
849 struct reservation_object
*resv
,
850 struct drm_gem_object
**obj
,
851 bool struct_mutex_locked
)
853 struct msm_drm_private
*priv
= dev
->dev_private
;
854 struct msm_gem_object
*msm_obj
;
856 switch (flags
& MSM_BO_CACHE_MASK
) {
857 case MSM_BO_UNCACHED
:
862 dev_err(dev
->dev
, "invalid cache flag: %x\n",
863 (flags
& MSM_BO_CACHE_MASK
));
867 msm_obj
= kzalloc(sizeof(*msm_obj
), GFP_KERNEL
);
871 mutex_init(&msm_obj
->lock
);
873 msm_obj
->flags
= flags
;
874 msm_obj
->madv
= MSM_MADV_WILLNEED
;
877 msm_obj
->resv
= resv
;
879 msm_obj
->resv
= &msm_obj
->_resv
;
880 reservation_object_init(msm_obj
->resv
);
883 INIT_LIST_HEAD(&msm_obj
->submit_entry
);
884 INIT_LIST_HEAD(&msm_obj
->vmas
);
886 if (struct_mutex_locked
) {
887 WARN_ON(!mutex_is_locked(&dev
->struct_mutex
));
888 list_add_tail(&msm_obj
->mm_list
, &priv
->inactive_list
);
890 mutex_lock(&dev
->struct_mutex
);
891 list_add_tail(&msm_obj
->mm_list
, &priv
->inactive_list
);
892 mutex_unlock(&dev
->struct_mutex
);
895 *obj
= &msm_obj
->base
;
900 static struct drm_gem_object
*_msm_gem_new(struct drm_device
*dev
,
901 uint32_t size
, uint32_t flags
, bool struct_mutex_locked
)
903 struct msm_drm_private
*priv
= dev
->dev_private
;
904 struct drm_gem_object
*obj
= NULL
;
905 bool use_vram
= false;
908 size
= PAGE_ALIGN(size
);
910 if (!iommu_present(&platform_bus_type
))
912 else if ((flags
& MSM_BO_STOLEN
) && priv
->vram
.size
)
915 if (WARN_ON(use_vram
&& !priv
->vram
.size
))
916 return ERR_PTR(-EINVAL
);
918 /* Disallow zero sized objects as they make the underlying
919 * infrastructure grumpy
922 return ERR_PTR(-EINVAL
);
924 ret
= msm_gem_new_impl(dev
, size
, flags
, NULL
, &obj
, struct_mutex_locked
);
929 struct msm_gem_vma
*vma
;
932 vma
= add_vma(obj
, NULL
);
938 to_msm_bo(obj
)->vram_node
= &vma
->node
;
940 drm_gem_private_object_init(dev
, obj
, size
);
942 pages
= get_pages(obj
);
944 ret
= PTR_ERR(pages
);
948 vma
->iova
= physaddr(obj
);
950 ret
= drm_gem_object_init(dev
, obj
, size
);
958 drm_gem_object_unreference_unlocked(obj
);
962 struct drm_gem_object
*msm_gem_new_locked(struct drm_device
*dev
,
963 uint32_t size
, uint32_t flags
)
965 return _msm_gem_new(dev
, size
, flags
, true);
968 struct drm_gem_object
*msm_gem_new(struct drm_device
*dev
,
969 uint32_t size
, uint32_t flags
)
971 return _msm_gem_new(dev
, size
, flags
, false);
974 struct drm_gem_object
*msm_gem_import(struct drm_device
*dev
,
975 struct dma_buf
*dmabuf
, struct sg_table
*sgt
)
977 struct msm_gem_object
*msm_obj
;
978 struct drm_gem_object
*obj
;
982 /* if we don't have IOMMU, don't bother pretending we can import: */
983 if (!iommu_present(&platform_bus_type
)) {
984 dev_err(dev
->dev
, "cannot import without IOMMU\n");
985 return ERR_PTR(-EINVAL
);
988 size
= PAGE_ALIGN(dmabuf
->size
);
990 ret
= msm_gem_new_impl(dev
, size
, MSM_BO_WC
, dmabuf
->resv
, &obj
, false);
994 drm_gem_private_object_init(dev
, obj
, size
);
996 npages
= size
/ PAGE_SIZE
;
998 msm_obj
= to_msm_bo(obj
);
999 mutex_lock(&msm_obj
->lock
);
1001 msm_obj
->pages
= kvmalloc_array(npages
, sizeof(struct page
*), GFP_KERNEL
);
1002 if (!msm_obj
->pages
) {
1003 mutex_unlock(&msm_obj
->lock
);
1008 ret
= drm_prime_sg_to_page_addr_arrays(sgt
, msm_obj
->pages
, NULL
, npages
);
1010 mutex_unlock(&msm_obj
->lock
);
1014 mutex_unlock(&msm_obj
->lock
);
1018 drm_gem_object_unreference_unlocked(obj
);
1019 return ERR_PTR(ret
);