]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blame - drivers/gpu/drm/msm/msm_gem.c
UBUNTU: Ubuntu-5.3.0-29.31
[mirror_ubuntu-eoan-kernel.git] / drivers / gpu / drm / msm / msm_gem.c
CommitLineData
caab277b 1// SPDX-License-Identifier: GPL-2.0-only
c8afe684
RC
2/*
3 * Copyright (C) 2013 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com>
c8afe684
RC
5 */
6
7#include <linux/spinlock.h>
8#include <linux/shmem_fs.h>
05b84911 9#include <linux/dma-buf.h>
01c8f1c4 10#include <linux/pfn_t.h>
c8afe684
RC
11
12#include "msm_drv.h"
fde5de6c 13#include "msm_fence.h"
c8afe684 14#include "msm_gem.h"
7198e6b0 15#include "msm_gpu.h"
871d812a 16#include "msm_mmu.h"
c8afe684 17
0e08270a
SS
18static void msm_gem_vunmap_locked(struct drm_gem_object *obj);
19
20
871d812a
RC
21static dma_addr_t physaddr(struct drm_gem_object *obj)
22{
23 struct msm_gem_object *msm_obj = to_msm_bo(obj);
24 struct msm_drm_private *priv = obj->dev->dev_private;
25 return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) +
26 priv->vram.paddr;
27}
28
072f1f91
RC
29static bool use_pages(struct drm_gem_object *obj)
30{
31 struct msm_gem_object *msm_obj = to_msm_bo(obj);
32 return !msm_obj->vram_node;
33}
34
3de433c5
RC
35/*
36 * Cache sync.. this is a bit over-complicated, to fit dma-mapping
37 * API. Really GPU cache is out of scope here (handled on cmdstream)
38 * and all we need to do is invalidate newly allocated pages before
39 * mapping to CPU as uncached/writecombine.
40 *
41 * On top of this, we have the added headache, that depending on
42 * display generation, the display's iommu may be wired up to either
43 * the toplevel drm device (mdss), or to the mdp sub-node, meaning
44 * that here we either have dma-direct or iommu ops.
45 *
46 * Let this be a cautionary tail of abstraction gone wrong.
47 */
48
49static void sync_for_device(struct msm_gem_object *msm_obj)
50{
51 struct device *dev = msm_obj->base.dev->dev;
52
76e3b1e8 53 if (get_dma_ops(dev) && IS_ENABLED(CONFIG_ARM64)) {
3de433c5
RC
54 dma_sync_sg_for_device(dev, msm_obj->sgt->sgl,
55 msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
56 } else {
57 dma_map_sg(dev, msm_obj->sgt->sgl,
58 msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
59 }
60}
61
62static void sync_for_cpu(struct msm_gem_object *msm_obj)
63{
64 struct device *dev = msm_obj->base.dev->dev;
65
76e3b1e8 66 if (get_dma_ops(dev) && IS_ENABLED(CONFIG_ARM64)) {
3de433c5
RC
67 dma_sync_sg_for_cpu(dev, msm_obj->sgt->sgl,
68 msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
69 } else {
70 dma_unmap_sg(dev, msm_obj->sgt->sgl,
71 msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
72 }
73}
74
871d812a 75/* allocate pages from VRAM carveout, used when no IOMMU: */
0e08270a 76static struct page **get_pages_vram(struct drm_gem_object *obj, int npages)
871d812a
RC
77{
78 struct msm_gem_object *msm_obj = to_msm_bo(obj);
79 struct msm_drm_private *priv = obj->dev->dev_private;
80 dma_addr_t paddr;
81 struct page **p;
82 int ret, i;
83
2098105e 84 p = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
871d812a
RC
85 if (!p)
86 return ERR_PTR(-ENOMEM);
87
0e08270a 88 spin_lock(&priv->vram.lock);
4e64e553 89 ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages);
0e08270a 90 spin_unlock(&priv->vram.lock);
871d812a 91 if (ret) {
2098105e 92 kvfree(p);
871d812a
RC
93 return ERR_PTR(ret);
94 }
95
96 paddr = physaddr(obj);
97 for (i = 0; i < npages; i++) {
98 p[i] = phys_to_page(paddr);
99 paddr += PAGE_SIZE;
100 }
101
102 return p;
103}
c8afe684 104
c8afe684
RC
105static struct page **get_pages(struct drm_gem_object *obj)
106{
107 struct msm_gem_object *msm_obj = to_msm_bo(obj);
108
109 if (!msm_obj->pages) {
110 struct drm_device *dev = obj->dev;
871d812a 111 struct page **p;
c8afe684
RC
112 int npages = obj->size >> PAGE_SHIFT;
113
072f1f91 114 if (use_pages(obj))
0cdbe8ac 115 p = drm_gem_get_pages(obj);
871d812a
RC
116 else
117 p = get_pages_vram(obj, npages);
118
c8afe684 119 if (IS_ERR(p)) {
6a41da17 120 DRM_DEV_ERROR(dev->dev, "could not get pages: %ld\n",
c8afe684
RC
121 PTR_ERR(p));
122 return p;
123 }
124
62e3a3e3
PK
125 msm_obj->pages = p;
126
c8afe684 127 msm_obj->sgt = drm_prime_pages_to_sg(p, npages);
1f70e079 128 if (IS_ERR(msm_obj->sgt)) {
62e3a3e3
PK
129 void *ptr = ERR_CAST(msm_obj->sgt);
130
6a41da17 131 DRM_DEV_ERROR(dev->dev, "failed to allocate sgt\n");
62e3a3e3
PK
132 msm_obj->sgt = NULL;
133 return ptr;
c8afe684
RC
134 }
135
c8afe684
RC
136 /* For non-cached buffers, ensure the new pages are clean
137 * because display controller, GPU, etc. are not coherent:
138 */
139 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
3de433c5 140 sync_for_device(msm_obj);
c8afe684
RC
141 }
142
143 return msm_obj->pages;
144}
145
0e08270a
SS
146static void put_pages_vram(struct drm_gem_object *obj)
147{
148 struct msm_gem_object *msm_obj = to_msm_bo(obj);
149 struct msm_drm_private *priv = obj->dev->dev_private;
150
151 spin_lock(&priv->vram.lock);
152 drm_mm_remove_node(msm_obj->vram_node);
153 spin_unlock(&priv->vram.lock);
154
155 kvfree(msm_obj->pages);
156}
157
c8afe684
RC
158static void put_pages(struct drm_gem_object *obj)
159{
160 struct msm_gem_object *msm_obj = to_msm_bo(obj);
161
162 if (msm_obj->pages) {
3976626e
BH
163 if (msm_obj->sgt) {
164 /* For non-cached buffers, ensure the new
165 * pages are clean because display controller,
166 * GPU, etc. are not coherent:
167 */
168 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
3de433c5 169 sync_for_cpu(msm_obj);
62e3a3e3 170
62e3a3e3 171 sg_free_table(msm_obj->sgt);
3976626e
BH
172 kfree(msm_obj->sgt);
173 }
c8afe684 174
072f1f91 175 if (use_pages(obj))
871d812a 176 drm_gem_put_pages(obj, msm_obj->pages, true, false);
0e08270a
SS
177 else
178 put_pages_vram(obj);
871d812a 179
c8afe684
RC
180 msm_obj->pages = NULL;
181 }
182}
183
05b84911
RC
184struct page **msm_gem_get_pages(struct drm_gem_object *obj)
185{
0e08270a 186 struct msm_gem_object *msm_obj = to_msm_bo(obj);
05b84911 187 struct page **p;
0e08270a
SS
188
189 mutex_lock(&msm_obj->lock);
190
191 if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
192 mutex_unlock(&msm_obj->lock);
193 return ERR_PTR(-EBUSY);
194 }
195
05b84911 196 p = get_pages(obj);
0e08270a 197 mutex_unlock(&msm_obj->lock);
05b84911
RC
198 return p;
199}
200
201void msm_gem_put_pages(struct drm_gem_object *obj)
202{
203 /* when we start tracking the pin count, then do something here */
204}
205
c8afe684
RC
206int msm_gem_mmap_obj(struct drm_gem_object *obj,
207 struct vm_area_struct *vma)
208{
209 struct msm_gem_object *msm_obj = to_msm_bo(obj);
210
211 vma->vm_flags &= ~VM_PFNMAP;
212 vma->vm_flags |= VM_MIXEDMAP;
213
214 if (msm_obj->flags & MSM_BO_WC) {
215 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
216 } else if (msm_obj->flags & MSM_BO_UNCACHED) {
217 vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
218 } else {
219 /*
220 * Shunt off cached objs to shmem file so they have their own
221 * address_space (so unmap_mapping_range does what we want,
222 * in particular in the case of mmap'd dmabufs)
223 */
224 fput(vma->vm_file);
225 get_file(obj->filp);
226 vma->vm_pgoff = 0;
227 vma->vm_file = obj->filp;
228
229 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
230 }
231
232 return 0;
233}
234
235int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
236{
237 int ret;
238
239 ret = drm_gem_mmap(filp, vma);
240 if (ret) {
241 DBG("mmap failed: %d", ret);
242 return ret;
243 }
244
245 return msm_gem_mmap_obj(vma->vm_private_data, vma);
246}
247
a5f74ec7 248vm_fault_t msm_gem_fault(struct vm_fault *vmf)
c8afe684 249{
11bac800 250 struct vm_area_struct *vma = vmf->vma;
c8afe684 251 struct drm_gem_object *obj = vma->vm_private_data;
0e08270a 252 struct msm_gem_object *msm_obj = to_msm_bo(obj);
c8afe684
RC
253 struct page **pages;
254 unsigned long pfn;
255 pgoff_t pgoff;
a5f74ec7
SJ
256 int err;
257 vm_fault_t ret;
c8afe684 258
0e08270a
SS
259 /*
260 * vm_ops.open/drm_gem_mmap_obj and close get and put
261 * a reference on obj. So, we dont need to hold one here.
c8afe684 262 */
a5f74ec7
SJ
263 err = mutex_lock_interruptible(&msm_obj->lock);
264 if (err) {
265 ret = VM_FAULT_NOPAGE;
c8afe684 266 goto out;
a5f74ec7 267 }
c8afe684 268
0e08270a
SS
269 if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
270 mutex_unlock(&msm_obj->lock);
271 return VM_FAULT_SIGBUS;
272 }
273
c8afe684
RC
274 /* make sure we have pages attached now */
275 pages = get_pages(obj);
276 if (IS_ERR(pages)) {
a5f74ec7 277 ret = vmf_error(PTR_ERR(pages));
c8afe684
RC
278 goto out_unlock;
279 }
280
281 /* We don't use vmf->pgoff since that has the fake offset: */
1a29d85e 282 pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
c8afe684 283
871d812a 284 pfn = page_to_pfn(pages[pgoff]);
c8afe684 285
1a29d85e 286 VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
c8afe684
RC
287 pfn, pfn << PAGE_SHIFT);
288
a5f74ec7 289 ret = vmf_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
c8afe684 290out_unlock:
0e08270a 291 mutex_unlock(&msm_obj->lock);
c8afe684 292out:
a5f74ec7 293 return ret;
c8afe684
RC
294}
295
296/** get mmap offset */
297static uint64_t mmap_offset(struct drm_gem_object *obj)
298{
299 struct drm_device *dev = obj->dev;
0e08270a 300 struct msm_gem_object *msm_obj = to_msm_bo(obj);
c8afe684
RC
301 int ret;
302
0e08270a 303 WARN_ON(!mutex_is_locked(&msm_obj->lock));
c8afe684
RC
304
305 /* Make it mmapable */
306 ret = drm_gem_create_mmap_offset(obj);
307
308 if (ret) {
6a41da17 309 DRM_DEV_ERROR(dev->dev, "could not allocate mmap offset\n");
c8afe684
RC
310 return 0;
311 }
312
313 return drm_vma_node_offset_addr(&obj->vma_node);
314}
315
316uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
317{
318 uint64_t offset;
0e08270a
SS
319 struct msm_gem_object *msm_obj = to_msm_bo(obj);
320
321 mutex_lock(&msm_obj->lock);
c8afe684 322 offset = mmap_offset(obj);
0e08270a 323 mutex_unlock(&msm_obj->lock);
c8afe684
RC
324 return offset;
325}
326
4b85f7f5
RC
327static struct msm_gem_vma *add_vma(struct drm_gem_object *obj,
328 struct msm_gem_address_space *aspace)
329{
330 struct msm_gem_object *msm_obj = to_msm_bo(obj);
331 struct msm_gem_vma *vma;
332
0e08270a
SS
333 WARN_ON(!mutex_is_locked(&msm_obj->lock));
334
4b85f7f5
RC
335 vma = kzalloc(sizeof(*vma), GFP_KERNEL);
336 if (!vma)
337 return ERR_PTR(-ENOMEM);
338
339 vma->aspace = aspace;
340
341 list_add_tail(&vma->list, &msm_obj->vmas);
342
343 return vma;
344}
345
346static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj,
347 struct msm_gem_address_space *aspace)
348{
349 struct msm_gem_object *msm_obj = to_msm_bo(obj);
350 struct msm_gem_vma *vma;
351
0e08270a 352 WARN_ON(!mutex_is_locked(&msm_obj->lock));
4b85f7f5
RC
353
354 list_for_each_entry(vma, &msm_obj->vmas, list) {
355 if (vma->aspace == aspace)
356 return vma;
357 }
358
359 return NULL;
360}
361
362static void del_vma(struct msm_gem_vma *vma)
363{
364 if (!vma)
365 return;
366
367 list_del(&vma->list);
368 kfree(vma);
369}
370
0e08270a 371/* Called with msm_obj->lock locked */
4fe5f65e
RC
372static void
373put_iova(struct drm_gem_object *obj)
374{
4fe5f65e 375 struct msm_gem_object *msm_obj = to_msm_bo(obj);
4b85f7f5 376 struct msm_gem_vma *vma, *tmp;
4fe5f65e 377
0e08270a 378 WARN_ON(!mutex_is_locked(&msm_obj->lock));
4fe5f65e 379
4b85f7f5 380 list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) {
d67f1b6d
BM
381 if (vma->aspace) {
382 msm_gem_purge_vma(vma->aspace, vma);
383 msm_gem_close_vma(vma->aspace, vma);
384 }
4b85f7f5 385 del_vma(vma);
4fe5f65e
RC
386 }
387}
388
c0ee9794 389static int msm_gem_get_iova_locked(struct drm_gem_object *obj,
8bdcd949 390 struct msm_gem_address_space *aspace, uint64_t *iova)
c8afe684
RC
391{
392 struct msm_gem_object *msm_obj = to_msm_bo(obj);
4b85f7f5 393 struct msm_gem_vma *vma;
c8afe684
RC
394 int ret = 0;
395
c0ee9794 396 WARN_ON(!mutex_is_locked(&msm_obj->lock));
cb1e3818 397
4b85f7f5 398 vma = lookup_vma(obj, aspace);
871d812a 399
4b85f7f5 400 if (!vma) {
4b85f7f5 401 vma = add_vma(obj, aspace);
c0ee9794
JC
402 if (IS_ERR(vma))
403 return PTR_ERR(vma);
4b85f7f5 404
c0ee9794
JC
405 ret = msm_gem_init_vma(aspace, vma, obj->size >> PAGE_SHIFT);
406 if (ret) {
407 del_vma(vma);
408 return ret;
4b85f7f5 409 }
c8afe684
RC
410 }
411
4b85f7f5
RC
412 *iova = vma->iova;
413 return 0;
c0ee9794
JC
414}
415
416static int msm_gem_pin_iova(struct drm_gem_object *obj,
417 struct msm_gem_address_space *aspace)
418{
419 struct msm_gem_object *msm_obj = to_msm_bo(obj);
420 struct msm_gem_vma *vma;
421 struct page **pages;
bbc2cd07
RC
422 int prot = IOMMU_READ;
423
424 if (!(msm_obj->flags & MSM_BO_GPU_READONLY))
425 prot |= IOMMU_WRITE;
c0ee9794
JC
426
427 WARN_ON(!mutex_is_locked(&msm_obj->lock));
428
429 if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED))
430 return -EBUSY;
431
432 vma = lookup_vma(obj, aspace);
433 if (WARN_ON(!vma))
434 return -EINVAL;
435
436 pages = get_pages(obj);
437 if (IS_ERR(pages))
438 return PTR_ERR(pages);
439
bbc2cd07
RC
440 return msm_gem_map_vma(aspace, vma, prot,
441 msm_obj->sgt, obj->size >> PAGE_SHIFT);
c0ee9794
JC
442}
443
9fe041f6
JC
444/* get iova and pin it. Should have a matching put */
445int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
c0ee9794
JC
446 struct msm_gem_address_space *aspace, uint64_t *iova)
447{
448 struct msm_gem_object *msm_obj = to_msm_bo(obj);
449 u64 local;
450 int ret;
451
452 mutex_lock(&msm_obj->lock);
453
454 ret = msm_gem_get_iova_locked(obj, aspace, &local);
455
456 if (!ret)
457 ret = msm_gem_pin_iova(obj, aspace);
458
459 if (!ret)
460 *iova = local;
4b85f7f5 461
0e08270a 462 mutex_unlock(&msm_obj->lock);
c8afe684
RC
463 return ret;
464}
465
7ad0e8cf
JC
466/*
467 * Get an iova but don't pin it. Doesn't need a put because iovas are currently
468 * valid for the life of the object
469 */
9fe041f6
JC
470int msm_gem_get_iova(struct drm_gem_object *obj,
471 struct msm_gem_address_space *aspace, uint64_t *iova)
472{
473 struct msm_gem_object *msm_obj = to_msm_bo(obj);
474 int ret;
475
476 mutex_lock(&msm_obj->lock);
477 ret = msm_gem_get_iova_locked(obj, aspace, iova);
478 mutex_unlock(&msm_obj->lock);
479
480 return ret;
481}
482
2638d90a 483/* get iova without taking a reference, used in places where you have
9fe041f6 484 * already done a 'msm_gem_get_and_pin_iova' or 'msm_gem_get_iova'
2638d90a 485 */
8bdcd949
RC
486uint64_t msm_gem_iova(struct drm_gem_object *obj,
487 struct msm_gem_address_space *aspace)
2638d90a 488{
0e08270a 489 struct msm_gem_object *msm_obj = to_msm_bo(obj);
4b85f7f5
RC
490 struct msm_gem_vma *vma;
491
0e08270a 492 mutex_lock(&msm_obj->lock);
4b85f7f5 493 vma = lookup_vma(obj, aspace);
0e08270a 494 mutex_unlock(&msm_obj->lock);
4b85f7f5
RC
495 WARN_ON(!vma);
496
497 return vma ? vma->iova : 0;
2638d90a
RC
498}
499
7ad0e8cf
JC
500/*
501 * Unpin a iova by updating the reference counts. The memory isn't actually
502 * purged until something else (shrinker, mm_notifier, destroy, etc) decides
503 * to get rid of it
504 */
505void msm_gem_unpin_iova(struct drm_gem_object *obj,
8bdcd949 506 struct msm_gem_address_space *aspace)
c8afe684 507{
7ad0e8cf
JC
508 struct msm_gem_object *msm_obj = to_msm_bo(obj);
509 struct msm_gem_vma *vma;
510
511 mutex_lock(&msm_obj->lock);
512 vma = lookup_vma(obj, aspace);
513
514 if (!WARN_ON(!vma))
515 msm_gem_unmap_vma(aspace, vma);
516
517 mutex_unlock(&msm_obj->lock);
c8afe684
RC
518}
519
520int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
521 struct drm_mode_create_dumb *args)
522{
523 args->pitch = align_pitch(args->width, args->bpp);
524 args->size = PAGE_ALIGN(args->pitch * args->height);
525 return msm_gem_new_handle(dev, file, args->size,
0815d774 526 MSM_BO_SCANOUT | MSM_BO_WC, &args->handle, "dumb");
c8afe684
RC
527}
528
c8afe684
RC
529int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
530 uint32_t handle, uint64_t *offset)
531{
532 struct drm_gem_object *obj;
533 int ret = 0;
534
535 /* GEM does all our handle to object mapping */
a8ad0bd8 536 obj = drm_gem_object_lookup(file, handle);
c8afe684
RC
537 if (obj == NULL) {
538 ret = -ENOENT;
539 goto fail;
540 }
541
542 *offset = msm_gem_mmap_offset(obj);
543
dc9a9b32 544 drm_gem_object_put_unlocked(obj);
c8afe684
RC
545
546fail:
547 return ret;
548}
549
fad33f4b 550static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
c8afe684
RC
551{
552 struct msm_gem_object *msm_obj = to_msm_bo(obj);
0e08270a
SS
553 int ret = 0;
554
555 mutex_lock(&msm_obj->lock);
556
fad33f4b 557 if (WARN_ON(msm_obj->madv > madv)) {
6a41da17 558 DRM_DEV_ERROR(obj->dev->dev, "Invalid madv state: %u vs %u\n",
fad33f4b 559 msm_obj->madv, madv);
0e08270a
SS
560 mutex_unlock(&msm_obj->lock);
561 return ERR_PTR(-EBUSY);
562 }
563
564 /* increment vmap_count *before* vmap() call, so shrinker can
565 * check vmap_count (is_vunmapable()) outside of msm_obj->lock.
566 * This guarantees that we won't try to msm_gem_vunmap() this
567 * same object from within the vmap() call (while we already
568 * hold msm_obj->lock)
569 */
570 msm_obj->vmap_count++;
571
c8afe684
RC
572 if (!msm_obj->vaddr) {
573 struct page **pages = get_pages(obj);
0e08270a
SS
574 if (IS_ERR(pages)) {
575 ret = PTR_ERR(pages);
576 goto fail;
577 }
c8afe684
RC
578 msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
579 VM_MAP, pgprot_writecombine(PAGE_KERNEL));
0e08270a
SS
580 if (msm_obj->vaddr == NULL) {
581 ret = -ENOMEM;
582 goto fail;
583 }
c8afe684 584 }
0e08270a
SS
585
586 mutex_unlock(&msm_obj->lock);
c8afe684 587 return msm_obj->vaddr;
c8afe684 588
0e08270a
SS
589fail:
590 msm_obj->vmap_count--;
591 mutex_unlock(&msm_obj->lock);
592 return ERR_PTR(ret);
c8afe684
RC
593}
594
fad33f4b
RC
595void *msm_gem_get_vaddr(struct drm_gem_object *obj)
596{
597 return get_vaddr(obj, MSM_MADV_WILLNEED);
598}
599
600/*
601 * Don't use this! It is for the very special case of dumping
602 * submits from GPU hangs or faults, were the bo may already
603 * be MSM_MADV_DONTNEED, but we know the buffer is still on the
604 * active list.
605 */
606void *msm_gem_get_vaddr_active(struct drm_gem_object *obj)
607{
608 return get_vaddr(obj, __MSM_MADV_PURGED);
609}
610
0e08270a 611void msm_gem_put_vaddr(struct drm_gem_object *obj)
18f23049 612{
e1e9db2c 613 struct msm_gem_object *msm_obj = to_msm_bo(obj);
0e08270a
SS
614
615 mutex_lock(&msm_obj->lock);
e1e9db2c
RC
616 WARN_ON(msm_obj->vmap_count < 1);
617 msm_obj->vmap_count--;
0e08270a 618 mutex_unlock(&msm_obj->lock);
18f23049
RC
619}
620
4cd33c48
RC
621/* Update madvise status, returns true if not purged, else
622 * false or -errno.
623 */
624int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
625{
626 struct msm_gem_object *msm_obj = to_msm_bo(obj);
627
0e08270a
SS
628 mutex_lock(&msm_obj->lock);
629
4cd33c48
RC
630 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
631
632 if (msm_obj->madv != __MSM_MADV_PURGED)
633 msm_obj->madv = madv;
634
0e08270a
SS
635 madv = msm_obj->madv;
636
637 mutex_unlock(&msm_obj->lock);
638
639 return (madv != __MSM_MADV_PURGED);
4cd33c48
RC
640}
641
0e08270a 642void msm_gem_purge(struct drm_gem_object *obj, enum msm_gem_lock subclass)
68209390
RC
643{
644 struct drm_device *dev = obj->dev;
645 struct msm_gem_object *msm_obj = to_msm_bo(obj);
646
647 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
648 WARN_ON(!is_purgeable(msm_obj));
649 WARN_ON(obj->import_attach);
650
0e08270a
SS
651 mutex_lock_nested(&msm_obj->lock, subclass);
652
68209390
RC
653 put_iova(obj);
654
0e08270a 655 msm_gem_vunmap_locked(obj);
68209390
RC
656
657 put_pages(obj);
658
659 msm_obj->madv = __MSM_MADV_PURGED;
660
661 drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
662 drm_gem_free_mmap_offset(obj);
663
664 /* Our goal here is to return as much of the memory as
665 * is possible back to the system as we are called from OOM.
666 * To do this we must instruct the shmfs to drop all of its
667 * backing pages, *now*.
668 */
669 shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
670
671 invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
672 0, (loff_t)-1);
0e08270a
SS
673
674 mutex_unlock(&msm_obj->lock);
68209390
RC
675}
676
0e08270a 677static void msm_gem_vunmap_locked(struct drm_gem_object *obj)
e1e9db2c
RC
678{
679 struct msm_gem_object *msm_obj = to_msm_bo(obj);
680
0e08270a
SS
681 WARN_ON(!mutex_is_locked(&msm_obj->lock));
682
e1e9db2c
RC
683 if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj)))
684 return;
685
686 vunmap(msm_obj->vaddr);
687 msm_obj->vaddr = NULL;
688}
689
0e08270a
SS
690void msm_gem_vunmap(struct drm_gem_object *obj, enum msm_gem_lock subclass)
691{
692 struct msm_gem_object *msm_obj = to_msm_bo(obj);
693
694 mutex_lock_nested(&msm_obj->lock, subclass);
695 msm_gem_vunmap_locked(obj);
696 mutex_unlock(&msm_obj->lock);
697}
698
b6295f9a
RC
699/* must be called before _move_to_active().. */
700int msm_gem_sync_object(struct drm_gem_object *obj,
701 struct msm_fence_context *fctx, bool exclusive)
702{
b6295f9a 703 struct reservation_object_list *fobj;
f54d1867 704 struct dma_fence *fence;
b6295f9a
RC
705 int i, ret;
706
dd55cf69 707 fobj = reservation_object_get_list(obj->resv);
b6295f9a 708 if (!fobj || (fobj->shared_count == 0)) {
dd55cf69 709 fence = reservation_object_get_excl(obj->resv);
b6295f9a
RC
710 /* don't need to wait on our own fences, since ring is fifo */
711 if (fence && (fence->context != fctx->context)) {
f54d1867 712 ret = dma_fence_wait(fence, true);
b6295f9a
RC
713 if (ret)
714 return ret;
715 }
716 }
717
718 if (!exclusive || !fobj)
719 return 0;
720
721 for (i = 0; i < fobj->shared_count; i++) {
722 fence = rcu_dereference_protected(fobj->shared[i],
dd55cf69 723 reservation_object_held(obj->resv));
b6295f9a 724 if (fence->context != fctx->context) {
f54d1867 725 ret = dma_fence_wait(fence, true);
b6295f9a
RC
726 if (ret)
727 return ret;
728 }
729 }
730
731 return 0;
732}
733
7198e6b0 734void msm_gem_move_to_active(struct drm_gem_object *obj,
f54d1867 735 struct msm_gpu *gpu, bool exclusive, struct dma_fence *fence)
7198e6b0
RC
736{
737 struct msm_gem_object *msm_obj = to_msm_bo(obj);
4cd33c48 738 WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
7198e6b0 739 msm_obj->gpu = gpu;
b6295f9a 740 if (exclusive)
dd55cf69 741 reservation_object_add_excl_fence(obj->resv, fence);
bf6811f3 742 else
dd55cf69 743 reservation_object_add_shared_fence(obj->resv, fence);
7198e6b0
RC
744 list_del_init(&msm_obj->mm_list);
745 list_add_tail(&msm_obj->mm_list, &gpu->active_list);
746}
747
748void msm_gem_move_to_inactive(struct drm_gem_object *obj)
749{
750 struct drm_device *dev = obj->dev;
751 struct msm_drm_private *priv = dev->dev_private;
752 struct msm_gem_object *msm_obj = to_msm_bo(obj);
753
754 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
755
756 msm_obj->gpu = NULL;
7198e6b0
RC
757 list_del_init(&msm_obj->mm_list);
758 list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
7198e6b0
RC
759}
760
b6295f9a 761int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
7198e6b0 762{
b6295f9a 763 bool write = !!(op & MSM_PREP_WRITE);
f755e227
CW
764 unsigned long remain =
765 op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
766 long ret;
767
dd55cf69 768 ret = reservation_object_wait_timeout_rcu(obj->resv, write,
f755e227
CW
769 true, remain);
770 if (ret == 0)
771 return remain == 0 ? -EBUSY : -ETIMEDOUT;
772 else if (ret < 0)
773 return ret;
7198e6b0
RC
774
775 /* TODO cache maintenance */
c8afe684 776
b6295f9a 777 return 0;
7198e6b0 778}
c8afe684 779
7198e6b0
RC
780int msm_gem_cpu_fini(struct drm_gem_object *obj)
781{
782 /* TODO cache maintenance */
c8afe684
RC
783 return 0;
784}
785
786#ifdef CONFIG_DEBUG_FS
f54d1867 787static void describe_fence(struct dma_fence *fence, const char *type,
b6295f9a
RC
788 struct seq_file *m)
789{
f54d1867 790 if (!dma_fence_is_signaled(fence))
a3115621 791 seq_printf(m, "\t%9s: %s %s seq %llu\n", type,
b6295f9a
RC
792 fence->ops->get_driver_name(fence),
793 fence->ops->get_timeline_name(fence),
794 fence->seqno);
795}
796
c8afe684
RC
797void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
798{
c8afe684 799 struct msm_gem_object *msm_obj = to_msm_bo(obj);
dd55cf69 800 struct reservation_object *robj = obj->resv;
b6295f9a 801 struct reservation_object_list *fobj;
f54d1867 802 struct dma_fence *fence;
4b85f7f5 803 struct msm_gem_vma *vma;
c8afe684 804 uint64_t off = drm_vma_node_start(&obj->vma_node);
4cd33c48 805 const char *madv;
c8afe684 806
0e08270a 807 mutex_lock(&msm_obj->lock);
b6295f9a 808
4cd33c48
RC
809 switch (msm_obj->madv) {
810 case __MSM_MADV_PURGED:
811 madv = " purged";
812 break;
813 case MSM_MADV_DONTNEED:
814 madv = " purgeable";
815 break;
816 case MSM_MADV_WILLNEED:
817 default:
818 madv = "";
819 break;
820 }
821
575f0485 822 seq_printf(m, "%08x: %c %2d (%2d) %08llx %p",
7198e6b0 823 msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
2c935bc5 824 obj->name, kref_read(&obj->refcount),
667ce33e
RC
825 off, msm_obj->vaddr);
826
0815d774 827 seq_printf(m, " %08zu %9s %-32s\n", obj->size, madv, msm_obj->name);
667ce33e 828
575f0485
JC
829 if (!list_empty(&msm_obj->vmas)) {
830
7ad0e8cf 831 seq_puts(m, " vmas:");
575f0485
JC
832
833 list_for_each_entry(vma, &msm_obj->vmas, list)
90f94660
BM
834 seq_printf(m, " [%s: %08llx,%s,inuse=%d]",
835 vma->aspace != NULL ? vma->aspace->name : NULL,
7ad0e8cf
JC
836 vma->iova, vma->mapped ? "mapped" : "unmapped",
837 vma->inuse);
575f0485
JC
838
839 seq_puts(m, "\n");
840 }
b6295f9a
RC
841
842 rcu_read_lock();
843 fobj = rcu_dereference(robj->fence);
844 if (fobj) {
845 unsigned int i, shared_count = fobj->shared_count;
846
847 for (i = 0; i < shared_count; i++) {
848 fence = rcu_dereference(fobj->shared[i]);
849 describe_fence(fence, "Shared", m);
850 }
851 }
852
853 fence = rcu_dereference(robj->fence_excl);
854 if (fence)
855 describe_fence(fence, "Exclusive", m);
856 rcu_read_unlock();
0e08270a
SS
857
858 mutex_unlock(&msm_obj->lock);
c8afe684
RC
859}
860
861void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
862{
863 struct msm_gem_object *msm_obj;
864 int count = 0;
865 size_t size = 0;
866
0815d774 867 seq_puts(m, " flags id ref offset kaddr size madv name\n");
c8afe684
RC
868 list_for_each_entry(msm_obj, list, mm_list) {
869 struct drm_gem_object *obj = &msm_obj->base;
575f0485 870 seq_puts(m, " ");
c8afe684
RC
871 msm_gem_describe(obj, m);
872 count++;
873 size += obj->size;
874 }
875
876 seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
877}
878#endif
879
d71b6bd8 880/* don't call directly! Use drm_gem_object_put() and friends */
c8afe684
RC
881void msm_gem_free_object(struct drm_gem_object *obj)
882{
c8afe684 883 struct msm_gem_object *msm_obj = to_msm_bo(obj);
48e7f183
KK
884 struct drm_device *dev = obj->dev;
885 struct msm_drm_private *priv = dev->dev_private;
886
887 if (llist_add(&msm_obj->freed, &priv->free_list))
888 queue_work(priv->wq, &priv->free_work);
889}
890
891static void free_object(struct msm_gem_object *msm_obj)
892{
893 struct drm_gem_object *obj = &msm_obj->base;
894 struct drm_device *dev = obj->dev;
c8afe684
RC
895
896 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
897
7198e6b0
RC
898 /* object should not be on active list: */
899 WARN_ON(is_active(msm_obj));
900
c8afe684
RC
901 list_del(&msm_obj->mm_list);
902
0e08270a
SS
903 mutex_lock(&msm_obj->lock);
904
4fe5f65e 905 put_iova(obj);
c8afe684 906
05b84911
RC
907 if (obj->import_attach) {
908 if (msm_obj->vaddr)
909 dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr);
910
911 /* Don't drop the pages for imported dmabuf, as they are not
912 * ours, just free the array we allocated:
913 */
914 if (msm_obj->pages)
2098105e 915 kvfree(msm_obj->pages);
c8afe684 916
f28730c8 917 drm_prime_gem_destroy(obj, msm_obj->sgt);
05b84911 918 } else {
0e08270a 919 msm_gem_vunmap_locked(obj);
05b84911
RC
920 put_pages(obj);
921 }
c8afe684
RC
922
923 drm_gem_object_release(obj);
924
0e08270a 925 mutex_unlock(&msm_obj->lock);
c8afe684
RC
926 kfree(msm_obj);
927}
928
48e7f183
KK
929void msm_gem_free_work(struct work_struct *work)
930{
931 struct msm_drm_private *priv =
932 container_of(work, struct msm_drm_private, free_work);
933 struct drm_device *dev = priv->dev;
934 struct llist_node *freed;
935 struct msm_gem_object *msm_obj, *next;
936
937 while ((freed = llist_del_all(&priv->free_list))) {
938
939 mutex_lock(&dev->struct_mutex);
940
941 llist_for_each_entry_safe(msm_obj, next,
942 freed, freed)
943 free_object(msm_obj);
944
945 mutex_unlock(&dev->struct_mutex);
946
947 if (need_resched())
948 break;
949 }
950}
951
c8afe684
RC
952/* convenience method to construct a GEM buffer object, and userspace handle */
953int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
0815d774
JC
954 uint32_t size, uint32_t flags, uint32_t *handle,
955 char *name)
c8afe684
RC
956{
957 struct drm_gem_object *obj;
958 int ret;
959
c8afe684
RC
960 obj = msm_gem_new(dev, size, flags);
961
c8afe684
RC
962 if (IS_ERR(obj))
963 return PTR_ERR(obj);
964
0815d774
JC
965 if (name)
966 msm_gem_object_set_name(obj, "%s", name);
967
c8afe684
RC
968 ret = drm_gem_handle_create(file, obj, handle);
969
970 /* drop reference from allocate - handle holds it now */
dc9a9b32 971 drm_gem_object_put_unlocked(obj);
c8afe684
RC
972
973 return ret;
974}
975
05b84911
RC
976static int msm_gem_new_impl(struct drm_device *dev,
977 uint32_t size, uint32_t flags,
79f0e202 978 struct reservation_object *resv,
0e08270a
SS
979 struct drm_gem_object **obj,
980 bool struct_mutex_locked)
c8afe684
RC
981{
982 struct msm_drm_private *priv = dev->dev_private;
983 struct msm_gem_object *msm_obj;
c8afe684
RC
984
985 switch (flags & MSM_BO_CACHE_MASK) {
986 case MSM_BO_UNCACHED:
987 case MSM_BO_CACHED:
988 case MSM_BO_WC:
989 break;
990 default:
6a41da17 991 DRM_DEV_ERROR(dev->dev, "invalid cache flag: %x\n",
c8afe684 992 (flags & MSM_BO_CACHE_MASK));
05b84911 993 return -EINVAL;
c8afe684
RC
994 }
995
667ce33e 996 msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
05b84911
RC
997 if (!msm_obj)
998 return -ENOMEM;
c8afe684 999
0e08270a
SS
1000 mutex_init(&msm_obj->lock);
1001
c8afe684 1002 msm_obj->flags = flags;
4cd33c48 1003 msm_obj->madv = MSM_MADV_WILLNEED;
c8afe684 1004
dd55cf69
RH
1005 if (resv)
1006 msm_obj->base.resv = resv;
c8afe684 1007
7198e6b0 1008 INIT_LIST_HEAD(&msm_obj->submit_entry);
4b85f7f5
RC
1009 INIT_LIST_HEAD(&msm_obj->vmas);
1010
0e08270a
SS
1011 if (struct_mutex_locked) {
1012 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
1013 list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
1014 } else {
1015 mutex_lock(&dev->struct_mutex);
1016 list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
1017 mutex_unlock(&dev->struct_mutex);
1018 }
c8afe684 1019
05b84911
RC
1020 *obj = &msm_obj->base;
1021
1022 return 0;
1023}
1024
0e08270a
SS
1025static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
1026 uint32_t size, uint32_t flags, bool struct_mutex_locked)
05b84911 1027{
f4839bd5 1028 struct msm_drm_private *priv = dev->dev_private;
871d812a 1029 struct drm_gem_object *obj = NULL;
f4839bd5 1030 bool use_vram = false;
05b84911
RC
1031 int ret;
1032
05b84911
RC
1033 size = PAGE_ALIGN(size);
1034
c2052a4e 1035 if (!msm_use_mmu(dev))
f4839bd5 1036 use_vram = true;
86f46f25 1037 else if ((flags & (MSM_BO_STOLEN | MSM_BO_SCANOUT)) && priv->vram.size)
f4839bd5
RC
1038 use_vram = true;
1039
1040 if (WARN_ON(use_vram && !priv->vram.size))
1041 return ERR_PTR(-EINVAL);
1042
1a5dff5d
JC
1043 /* Disallow zero sized objects as they make the underlying
1044 * infrastructure grumpy
1045 */
1046 if (size == 0)
1047 return ERR_PTR(-EINVAL);
1048
0e08270a 1049 ret = msm_gem_new_impl(dev, size, flags, NULL, &obj, struct_mutex_locked);
05b84911
RC
1050 if (ret)
1051 goto fail;
1052
f4839bd5 1053 if (use_vram) {
4b85f7f5 1054 struct msm_gem_vma *vma;
f4839bd5 1055 struct page **pages;
b3949a9a
HV
1056 struct msm_gem_object *msm_obj = to_msm_bo(obj);
1057
1058 mutex_lock(&msm_obj->lock);
f4839bd5 1059
4b85f7f5 1060 vma = add_vma(obj, NULL);
b3949a9a 1061 mutex_unlock(&msm_obj->lock);
4b85f7f5
RC
1062 if (IS_ERR(vma)) {
1063 ret = PTR_ERR(vma);
1064 goto fail;
1065 }
1066
1067 to_msm_bo(obj)->vram_node = &vma->node;
1068
f4839bd5
RC
1069 drm_gem_private_object_init(dev, obj, size);
1070
f4839bd5
RC
1071 pages = get_pages(obj);
1072 if (IS_ERR(pages)) {
1073 ret = PTR_ERR(pages);
1074 goto fail;
1075 }
4b85f7f5
RC
1076
1077 vma->iova = physaddr(obj);
f4839bd5 1078 } else {
871d812a
RC
1079 ret = drm_gem_object_init(dev, obj, size);
1080 if (ret)
1081 goto fail;
0abdba47
LS
1082 /*
1083 * Our buffers are kept pinned, so allocating them from the
1084 * MOVABLE zone is a really bad idea, and conflicts with CMA.
1085 * See comments above new_inode() why this is required _and_
1086 * expected if you're going to pin these pages.
1087 */
1088 mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER);
871d812a 1089 }
05b84911
RC
1090
1091 return obj;
1092
1093fail:
dc9a9b32 1094 drm_gem_object_put_unlocked(obj);
05b84911
RC
1095 return ERR_PTR(ret);
1096}
1097
0e08270a
SS
1098struct drm_gem_object *msm_gem_new_locked(struct drm_device *dev,
1099 uint32_t size, uint32_t flags)
1100{
1101 return _msm_gem_new(dev, size, flags, true);
1102}
1103
1104struct drm_gem_object *msm_gem_new(struct drm_device *dev,
1105 uint32_t size, uint32_t flags)
1106{
1107 return _msm_gem_new(dev, size, flags, false);
1108}
1109
05b84911 1110struct drm_gem_object *msm_gem_import(struct drm_device *dev,
79f0e202 1111 struct dma_buf *dmabuf, struct sg_table *sgt)
05b84911
RC
1112{
1113 struct msm_gem_object *msm_obj;
1114 struct drm_gem_object *obj;
79f0e202 1115 uint32_t size;
05b84911
RC
1116 int ret, npages;
1117
871d812a 1118 /* if we don't have IOMMU, don't bother pretending we can import: */
c2052a4e 1119 if (!msm_use_mmu(dev)) {
6a41da17 1120 DRM_DEV_ERROR(dev->dev, "cannot import without IOMMU\n");
871d812a
RC
1121 return ERR_PTR(-EINVAL);
1122 }
1123
79f0e202 1124 size = PAGE_ALIGN(dmabuf->size);
05b84911 1125
0e08270a 1126 ret = msm_gem_new_impl(dev, size, MSM_BO_WC, dmabuf->resv, &obj, false);
05b84911
RC
1127 if (ret)
1128 goto fail;
1129
1130 drm_gem_private_object_init(dev, obj, size);
1131
1132 npages = size / PAGE_SIZE;
1133
1134 msm_obj = to_msm_bo(obj);
0e08270a 1135 mutex_lock(&msm_obj->lock);
05b84911 1136 msm_obj->sgt = sgt;
2098105e 1137 msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
05b84911 1138 if (!msm_obj->pages) {
0e08270a 1139 mutex_unlock(&msm_obj->lock);
05b84911
RC
1140 ret = -ENOMEM;
1141 goto fail;
1142 }
1143
1144 ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages);
0e08270a
SS
1145 if (ret) {
1146 mutex_unlock(&msm_obj->lock);
05b84911 1147 goto fail;
0e08270a 1148 }
05b84911 1149
0e08270a 1150 mutex_unlock(&msm_obj->lock);
c8afe684
RC
1151 return obj;
1152
1153fail:
dc9a9b32 1154 drm_gem_object_put_unlocked(obj);
c8afe684
RC
1155 return ERR_PTR(ret);
1156}
8223286d
JC
1157
1158static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
1159 uint32_t flags, struct msm_gem_address_space *aspace,
1160 struct drm_gem_object **bo, uint64_t *iova, bool locked)
1161{
1162 void *vaddr;
1163 struct drm_gem_object *obj = _msm_gem_new(dev, size, flags, locked);
1164 int ret;
1165
1166 if (IS_ERR(obj))
1167 return ERR_CAST(obj);
1168
1169 if (iova) {
9fe041f6 1170 ret = msm_gem_get_and_pin_iova(obj, aspace, iova);
93f7abf1
JC
1171 if (ret)
1172 goto err;
8223286d
JC
1173 }
1174
1175 vaddr = msm_gem_get_vaddr(obj);
c9811d0f 1176 if (IS_ERR(vaddr)) {
7ad0e8cf 1177 msm_gem_unpin_iova(obj, aspace);
93f7abf1
JC
1178 ret = PTR_ERR(vaddr);
1179 goto err;
8223286d
JC
1180 }
1181
1182 if (bo)
1183 *bo = obj;
1184
1185 return vaddr;
93f7abf1
JC
1186err:
1187 if (locked)
1188 drm_gem_object_put(obj);
1189 else
1190 drm_gem_object_put_unlocked(obj);
1191
1192 return ERR_PTR(ret);
1193
8223286d
JC
1194}
1195
1196void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
1197 uint32_t flags, struct msm_gem_address_space *aspace,
1198 struct drm_gem_object **bo, uint64_t *iova)
1199{
1200 return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, false);
1201}
1202
1203void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size,
1204 uint32_t flags, struct msm_gem_address_space *aspace,
1205 struct drm_gem_object **bo, uint64_t *iova)
1206{
1207 return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, true);
1208}
1e29dff0
JC
1209
1210void msm_gem_kernel_put(struct drm_gem_object *bo,
1211 struct msm_gem_address_space *aspace, bool locked)
1212{
1213 if (IS_ERR_OR_NULL(bo))
1214 return;
1215
1216 msm_gem_put_vaddr(bo);
7ad0e8cf 1217 msm_gem_unpin_iova(bo, aspace);
1e29dff0
JC
1218
1219 if (locked)
1220 drm_gem_object_put(bo);
1221 else
1222 drm_gem_object_put_unlocked(bo);
1223}
0815d774
JC
1224
1225void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...)
1226{
1227 struct msm_gem_object *msm_obj = to_msm_bo(bo);
1228 va_list ap;
1229
1230 if (!fmt)
1231 return;
1232
1233 va_start(ap, fmt);
1234 vsnprintf(msm_obj->name, sizeof(msm_obj->name), fmt, ap);
1235 va_end(ap);
1236}