]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/gpu/drm/msm/msm_gem.c
drm/msm: add IOMMU_SUPPORT dependency
[mirror_ubuntu-jammy-kernel.git] / drivers / gpu / drm / msm / msm_gem.c
CommitLineData
caab277b 1// SPDX-License-Identifier: GPL-2.0-only
c8afe684
RC
2/*
3 * Copyright (C) 2013 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com>
c8afe684
RC
5 */
6
0a0f0d8b 7#include <linux/dma-map-ops.h>
c8afe684
RC
8#include <linux/spinlock.h>
9#include <linux/shmem_fs.h>
05b84911 10#include <linux/dma-buf.h>
01c8f1c4 11#include <linux/pfn_t.h>
c8afe684 12
feea39a8
SR
13#include <drm/drm_prime.h>
14
c8afe684 15#include "msm_drv.h"
fde5de6c 16#include "msm_fence.h"
c8afe684 17#include "msm_gem.h"
7198e6b0 18#include "msm_gpu.h"
871d812a 19#include "msm_mmu.h"
c8afe684 20
3edfa30f 21static void update_inactive(struct msm_gem_object *msm_obj);
0e08270a 22
871d812a
RC
23static dma_addr_t physaddr(struct drm_gem_object *obj)
24{
25 struct msm_gem_object *msm_obj = to_msm_bo(obj);
26 struct msm_drm_private *priv = obj->dev->dev_private;
27 return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) +
28 priv->vram.paddr;
29}
30
072f1f91
RC
31static bool use_pages(struct drm_gem_object *obj)
32{
33 struct msm_gem_object *msm_obj = to_msm_bo(obj);
34 return !msm_obj->vram_node;
35}
36
3de433c5
RC
37/*
38 * Cache sync.. this is a bit over-complicated, to fit dma-mapping
39 * API. Really GPU cache is out of scope here (handled on cmdstream)
40 * and all we need to do is invalidate newly allocated pages before
41 * mapping to CPU as uncached/writecombine.
42 *
43 * On top of this, we have the added headache, that depending on
44 * display generation, the display's iommu may be wired up to either
45 * the toplevel drm device (mdss), or to the mdp sub-node, meaning
46 * that here we either have dma-direct or iommu ops.
47 *
48 * Let this be a cautionary tail of abstraction gone wrong.
49 */
50
51static void sync_for_device(struct msm_gem_object *msm_obj)
52{
53 struct device *dev = msm_obj->base.dev->dev;
54
91d0ca3d 55 dma_map_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
3de433c5
RC
56}
57
58static void sync_for_cpu(struct msm_gem_object *msm_obj)
59{
60 struct device *dev = msm_obj->base.dev->dev;
61
91d0ca3d 62 dma_unmap_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
3de433c5
RC
63}
64
871d812a 65/* allocate pages from VRAM carveout, used when no IOMMU: */
0e08270a 66static struct page **get_pages_vram(struct drm_gem_object *obj, int npages)
871d812a
RC
67{
68 struct msm_gem_object *msm_obj = to_msm_bo(obj);
69 struct msm_drm_private *priv = obj->dev->dev_private;
70 dma_addr_t paddr;
71 struct page **p;
72 int ret, i;
73
2098105e 74 p = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
871d812a
RC
75 if (!p)
76 return ERR_PTR(-ENOMEM);
77
0e08270a 78 spin_lock(&priv->vram.lock);
4e64e553 79 ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages);
0e08270a 80 spin_unlock(&priv->vram.lock);
871d812a 81 if (ret) {
2098105e 82 kvfree(p);
871d812a
RC
83 return ERR_PTR(ret);
84 }
85
86 paddr = physaddr(obj);
87 for (i = 0; i < npages; i++) {
88 p[i] = phys_to_page(paddr);
89 paddr += PAGE_SIZE;
90 }
91
92 return p;
93}
c8afe684 94
c8afe684
RC
95static struct page **get_pages(struct drm_gem_object *obj)
96{
97 struct msm_gem_object *msm_obj = to_msm_bo(obj);
98
99 if (!msm_obj->pages) {
100 struct drm_device *dev = obj->dev;
871d812a 101 struct page **p;
c8afe684
RC
102 int npages = obj->size >> PAGE_SHIFT;
103
072f1f91 104 if (use_pages(obj))
0cdbe8ac 105 p = drm_gem_get_pages(obj);
871d812a
RC
106 else
107 p = get_pages_vram(obj, npages);
108
c8afe684 109 if (IS_ERR(p)) {
6a41da17 110 DRM_DEV_ERROR(dev->dev, "could not get pages: %ld\n",
c8afe684
RC
111 PTR_ERR(p));
112 return p;
113 }
114
62e3a3e3
PK
115 msm_obj->pages = p;
116
707d561f 117 msm_obj->sgt = drm_prime_pages_to_sg(obj->dev, p, npages);
1f70e079 118 if (IS_ERR(msm_obj->sgt)) {
62e3a3e3
PK
119 void *ptr = ERR_CAST(msm_obj->sgt);
120
6a41da17 121 DRM_DEV_ERROR(dev->dev, "failed to allocate sgt\n");
62e3a3e3
PK
122 msm_obj->sgt = NULL;
123 return ptr;
c8afe684
RC
124 }
125
c8afe684
RC
126 /* For non-cached buffers, ensure the new pages are clean
127 * because display controller, GPU, etc. are not coherent:
128 */
129 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
3de433c5 130 sync_for_device(msm_obj);
c8afe684
RC
131 }
132
133 return msm_obj->pages;
134}
135
0e08270a
SS
136static void put_pages_vram(struct drm_gem_object *obj)
137{
138 struct msm_gem_object *msm_obj = to_msm_bo(obj);
139 struct msm_drm_private *priv = obj->dev->dev_private;
140
141 spin_lock(&priv->vram.lock);
142 drm_mm_remove_node(msm_obj->vram_node);
143 spin_unlock(&priv->vram.lock);
144
145 kvfree(msm_obj->pages);
146}
147
c8afe684
RC
148static void put_pages(struct drm_gem_object *obj)
149{
150 struct msm_gem_object *msm_obj = to_msm_bo(obj);
151
152 if (msm_obj->pages) {
3976626e
BH
153 if (msm_obj->sgt) {
154 /* For non-cached buffers, ensure the new
155 * pages are clean because display controller,
156 * GPU, etc. are not coherent:
157 */
158 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
3de433c5 159 sync_for_cpu(msm_obj);
62e3a3e3 160
62e3a3e3 161 sg_free_table(msm_obj->sgt);
3976626e
BH
162 kfree(msm_obj->sgt);
163 }
c8afe684 164
072f1f91 165 if (use_pages(obj))
871d812a 166 drm_gem_put_pages(obj, msm_obj->pages, true, false);
0e08270a
SS
167 else
168 put_pages_vram(obj);
871d812a 169
c8afe684
RC
170 msm_obj->pages = NULL;
171 }
172}
173
05b84911
RC
174struct page **msm_gem_get_pages(struct drm_gem_object *obj)
175{
0e08270a 176 struct msm_gem_object *msm_obj = to_msm_bo(obj);
05b84911 177 struct page **p;
0e08270a 178
a6ae74c9 179 msm_gem_lock(obj);
0e08270a
SS
180
181 if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
a6ae74c9 182 msm_gem_unlock(obj);
0e08270a
SS
183 return ERR_PTR(-EBUSY);
184 }
185
05b84911 186 p = get_pages(obj);
a6ae74c9 187 msm_gem_unlock(obj);
05b84911
RC
188 return p;
189}
190
191void msm_gem_put_pages(struct drm_gem_object *obj)
192{
193 /* when we start tracking the pin count, then do something here */
194}
195
c8afe684
RC
196int msm_gem_mmap_obj(struct drm_gem_object *obj,
197 struct vm_area_struct *vma)
198{
199 struct msm_gem_object *msm_obj = to_msm_bo(obj);
200
201 vma->vm_flags &= ~VM_PFNMAP;
202 vma->vm_flags |= VM_MIXEDMAP;
203
204 if (msm_obj->flags & MSM_BO_WC) {
205 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
206 } else if (msm_obj->flags & MSM_BO_UNCACHED) {
207 vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
208 } else {
209 /*
210 * Shunt off cached objs to shmem file so they have their own
211 * address_space (so unmap_mapping_range does what we want,
212 * in particular in the case of mmap'd dmabufs)
213 */
214 fput(vma->vm_file);
215 get_file(obj->filp);
216 vma->vm_pgoff = 0;
217 vma->vm_file = obj->filp;
218
219 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
220 }
221
222 return 0;
223}
224
225int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
226{
227 int ret;
228
229 ret = drm_gem_mmap(filp, vma);
230 if (ret) {
231 DBG("mmap failed: %d", ret);
232 return ret;
233 }
234
235 return msm_gem_mmap_obj(vma->vm_private_data, vma);
236}
237
a5f74ec7 238vm_fault_t msm_gem_fault(struct vm_fault *vmf)
c8afe684 239{
11bac800 240 struct vm_area_struct *vma = vmf->vma;
c8afe684 241 struct drm_gem_object *obj = vma->vm_private_data;
0e08270a 242 struct msm_gem_object *msm_obj = to_msm_bo(obj);
c8afe684
RC
243 struct page **pages;
244 unsigned long pfn;
245 pgoff_t pgoff;
a5f74ec7
SJ
246 int err;
247 vm_fault_t ret;
c8afe684 248
0e08270a
SS
249 /*
250 * vm_ops.open/drm_gem_mmap_obj and close get and put
251 * a reference on obj. So, we dont need to hold one here.
c8afe684 252 */
a6ae74c9 253 err = msm_gem_lock_interruptible(obj);
a5f74ec7
SJ
254 if (err) {
255 ret = VM_FAULT_NOPAGE;
c8afe684 256 goto out;
a5f74ec7 257 }
c8afe684 258
0e08270a 259 if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
a6ae74c9 260 msm_gem_unlock(obj);
0e08270a
SS
261 return VM_FAULT_SIGBUS;
262 }
263
c8afe684
RC
264 /* make sure we have pages attached now */
265 pages = get_pages(obj);
266 if (IS_ERR(pages)) {
a5f74ec7 267 ret = vmf_error(PTR_ERR(pages));
c8afe684
RC
268 goto out_unlock;
269 }
270
271 /* We don't use vmf->pgoff since that has the fake offset: */
1a29d85e 272 pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
c8afe684 273
871d812a 274 pfn = page_to_pfn(pages[pgoff]);
c8afe684 275
1a29d85e 276 VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
c8afe684
RC
277 pfn, pfn << PAGE_SHIFT);
278
a5f74ec7 279 ret = vmf_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
c8afe684 280out_unlock:
a6ae74c9 281 msm_gem_unlock(obj);
c8afe684 282out:
a5f74ec7 283 return ret;
c8afe684
RC
284}
285
286/** get mmap offset */
287static uint64_t mmap_offset(struct drm_gem_object *obj)
288{
289 struct drm_device *dev = obj->dev;
290 int ret;
291
a6ae74c9 292 WARN_ON(!msm_gem_is_locked(obj));
c8afe684
RC
293
294 /* Make it mmapable */
295 ret = drm_gem_create_mmap_offset(obj);
296
297 if (ret) {
6a41da17 298 DRM_DEV_ERROR(dev->dev, "could not allocate mmap offset\n");
c8afe684
RC
299 return 0;
300 }
301
302 return drm_vma_node_offset_addr(&obj->vma_node);
303}
304
305uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
306{
307 uint64_t offset;
0e08270a 308
a6ae74c9 309 msm_gem_lock(obj);
c8afe684 310 offset = mmap_offset(obj);
a6ae74c9 311 msm_gem_unlock(obj);
c8afe684
RC
312 return offset;
313}
314
4b85f7f5
RC
315static struct msm_gem_vma *add_vma(struct drm_gem_object *obj,
316 struct msm_gem_address_space *aspace)
317{
318 struct msm_gem_object *msm_obj = to_msm_bo(obj);
319 struct msm_gem_vma *vma;
320
a6ae74c9 321 WARN_ON(!msm_gem_is_locked(obj));
0e08270a 322
4b85f7f5
RC
323 vma = kzalloc(sizeof(*vma), GFP_KERNEL);
324 if (!vma)
325 return ERR_PTR(-ENOMEM);
326
327 vma->aspace = aspace;
328
329 list_add_tail(&vma->list, &msm_obj->vmas);
330
331 return vma;
332}
333
334static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj,
335 struct msm_gem_address_space *aspace)
336{
337 struct msm_gem_object *msm_obj = to_msm_bo(obj);
338 struct msm_gem_vma *vma;
339
a6ae74c9 340 WARN_ON(!msm_gem_is_locked(obj));
4b85f7f5
RC
341
342 list_for_each_entry(vma, &msm_obj->vmas, list) {
343 if (vma->aspace == aspace)
344 return vma;
345 }
346
347 return NULL;
348}
349
350static void del_vma(struct msm_gem_vma *vma)
351{
352 if (!vma)
353 return;
354
355 list_del(&vma->list);
356 kfree(vma);
357}
358
a6ae74c9 359/* Called with msm_obj locked */
4fe5f65e 360static void
9b73bde3 361put_iova_spaces(struct drm_gem_object *obj)
4fe5f65e 362{
4fe5f65e 363 struct msm_gem_object *msm_obj = to_msm_bo(obj);
9b73bde3 364 struct msm_gem_vma *vma;
4fe5f65e 365
a6ae74c9 366 WARN_ON(!msm_gem_is_locked(obj));
4fe5f65e 367
9b73bde3 368 list_for_each_entry(vma, &msm_obj->vmas, list) {
d67f1b6d
BM
369 if (vma->aspace) {
370 msm_gem_purge_vma(vma->aspace, vma);
371 msm_gem_close_vma(vma->aspace, vma);
372 }
9b73bde3
IC
373 }
374}
375
376/* Called with msm_obj locked */
377static void
378put_iova_vmas(struct drm_gem_object *obj)
379{
380 struct msm_gem_object *msm_obj = to_msm_bo(obj);
381 struct msm_gem_vma *vma, *tmp;
382
383 WARN_ON(!msm_gem_is_locked(obj));
384
385 list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) {
4b85f7f5 386 del_vma(vma);
4fe5f65e
RC
387 }
388}
389
8117e5e5 390static int get_iova_locked(struct drm_gem_object *obj,
d3b8877e
JM
391 struct msm_gem_address_space *aspace, uint64_t *iova,
392 u64 range_start, u64 range_end)
c8afe684 393{
4b85f7f5 394 struct msm_gem_vma *vma;
c8afe684
RC
395 int ret = 0;
396
a6ae74c9 397 WARN_ON(!msm_gem_is_locked(obj));
cb1e3818 398
4b85f7f5 399 vma = lookup_vma(obj, aspace);
871d812a 400
4b85f7f5 401 if (!vma) {
4b85f7f5 402 vma = add_vma(obj, aspace);
c0ee9794
JC
403 if (IS_ERR(vma))
404 return PTR_ERR(vma);
4b85f7f5 405
d3b8877e
JM
406 ret = msm_gem_init_vma(aspace, vma, obj->size >> PAGE_SHIFT,
407 range_start, range_end);
c0ee9794
JC
408 if (ret) {
409 del_vma(vma);
410 return ret;
4b85f7f5 411 }
c8afe684
RC
412 }
413
4b85f7f5
RC
414 *iova = vma->iova;
415 return 0;
c0ee9794
JC
416}
417
418static int msm_gem_pin_iova(struct drm_gem_object *obj,
419 struct msm_gem_address_space *aspace)
420{
421 struct msm_gem_object *msm_obj = to_msm_bo(obj);
422 struct msm_gem_vma *vma;
423 struct page **pages;
bbc2cd07
RC
424 int prot = IOMMU_READ;
425
426 if (!(msm_obj->flags & MSM_BO_GPU_READONLY))
427 prot |= IOMMU_WRITE;
c0ee9794 428
0b462d7a
JM
429 if (msm_obj->flags & MSM_BO_MAP_PRIV)
430 prot |= IOMMU_PRIV;
431
a6ae74c9 432 WARN_ON(!msm_gem_is_locked(obj));
c0ee9794
JC
433
434 if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED))
435 return -EBUSY;
436
437 vma = lookup_vma(obj, aspace);
438 if (WARN_ON(!vma))
439 return -EINVAL;
440
441 pages = get_pages(obj);
442 if (IS_ERR(pages))
443 return PTR_ERR(pages);
444
bbc2cd07
RC
445 return msm_gem_map_vma(aspace, vma, prot,
446 msm_obj->sgt, obj->size >> PAGE_SHIFT);
c0ee9794
JC
447}
448
e4b87d22 449static int get_and_pin_iova_range_locked(struct drm_gem_object *obj,
d3b8877e
JM
450 struct msm_gem_address_space *aspace, uint64_t *iova,
451 u64 range_start, u64 range_end)
c0ee9794 452{
c0ee9794
JC
453 u64 local;
454 int ret;
455
e4b87d22 456 WARN_ON(!msm_gem_is_locked(obj));
c0ee9794 457
8117e5e5 458 ret = get_iova_locked(obj, aspace, &local,
d3b8877e 459 range_start, range_end);
c0ee9794
JC
460
461 if (!ret)
462 ret = msm_gem_pin_iova(obj, aspace);
463
464 if (!ret)
465 *iova = local;
4b85f7f5 466
e4b87d22
RC
467 return ret;
468}
469
470/*
471 * get iova and pin it. Should have a matching put
472 * limits iova to specified range (in pages)
473 */
474int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj,
475 struct msm_gem_address_space *aspace, uint64_t *iova,
476 u64 range_start, u64 range_end)
477{
478 int ret;
479
480 msm_gem_lock(obj);
481 ret = get_and_pin_iova_range_locked(obj, aspace, iova, range_start, range_end);
a6ae74c9 482 msm_gem_unlock(obj);
e4b87d22 483
c8afe684
RC
484 return ret;
485}
486
e4b87d22
RC
487int msm_gem_get_and_pin_iova_locked(struct drm_gem_object *obj,
488 struct msm_gem_address_space *aspace, uint64_t *iova)
489{
490 return get_and_pin_iova_range_locked(obj, aspace, iova, 0, U64_MAX);
491}
492
d3b8877e
JM
493/* get iova and pin it. Should have a matching put */
494int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
495 struct msm_gem_address_space *aspace, uint64_t *iova)
496{
497 return msm_gem_get_and_pin_iova_range(obj, aspace, iova, 0, U64_MAX);
498}
499
7ad0e8cf
JC
500/*
501 * Get an iova but don't pin it. Doesn't need a put because iovas are currently
502 * valid for the life of the object
503 */
9fe041f6
JC
504int msm_gem_get_iova(struct drm_gem_object *obj,
505 struct msm_gem_address_space *aspace, uint64_t *iova)
506{
9fe041f6
JC
507 int ret;
508
a6ae74c9 509 msm_gem_lock(obj);
8117e5e5 510 ret = get_iova_locked(obj, aspace, iova, 0, U64_MAX);
a6ae74c9 511 msm_gem_unlock(obj);
9fe041f6
JC
512
513 return ret;
514}
515
2638d90a 516/* get iova without taking a reference, used in places where you have
9fe041f6 517 * already done a 'msm_gem_get_and_pin_iova' or 'msm_gem_get_iova'
2638d90a 518 */
8bdcd949
RC
519uint64_t msm_gem_iova(struct drm_gem_object *obj,
520 struct msm_gem_address_space *aspace)
2638d90a 521{
4b85f7f5
RC
522 struct msm_gem_vma *vma;
523
a6ae74c9 524 msm_gem_lock(obj);
4b85f7f5 525 vma = lookup_vma(obj, aspace);
a6ae74c9 526 msm_gem_unlock(obj);
4b85f7f5
RC
527 WARN_ON(!vma);
528
529 return vma ? vma->iova : 0;
2638d90a
RC
530}
531
7ad0e8cf 532/*
e4b87d22 533 * Locked variant of msm_gem_unpin_iova()
7ad0e8cf 534 */
e4b87d22 535void msm_gem_unpin_iova_locked(struct drm_gem_object *obj,
8bdcd949 536 struct msm_gem_address_space *aspace)
c8afe684 537{
7ad0e8cf
JC
538 struct msm_gem_vma *vma;
539
e4b87d22
RC
540 WARN_ON(!msm_gem_is_locked(obj));
541
7ad0e8cf
JC
542 vma = lookup_vma(obj, aspace);
543
544 if (!WARN_ON(!vma))
545 msm_gem_unmap_vma(aspace, vma);
e4b87d22 546}
7ad0e8cf 547
e4b87d22
RC
548/*
549 * Unpin a iova by updating the reference counts. The memory isn't actually
550 * purged until something else (shrinker, mm_notifier, destroy, etc) decides
551 * to get rid of it
552 */
553void msm_gem_unpin_iova(struct drm_gem_object *obj,
554 struct msm_gem_address_space *aspace)
555{
556 msm_gem_lock(obj);
557 msm_gem_unpin_iova_locked(obj, aspace);
a6ae74c9 558 msm_gem_unlock(obj);
c8afe684
RC
559}
560
561int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
562 struct drm_mode_create_dumb *args)
563{
564 args->pitch = align_pitch(args->width, args->bpp);
565 args->size = PAGE_ALIGN(args->pitch * args->height);
566 return msm_gem_new_handle(dev, file, args->size,
0815d774 567 MSM_BO_SCANOUT | MSM_BO_WC, &args->handle, "dumb");
c8afe684
RC
568}
569
c8afe684
RC
570int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
571 uint32_t handle, uint64_t *offset)
572{
573 struct drm_gem_object *obj;
574 int ret = 0;
575
576 /* GEM does all our handle to object mapping */
a8ad0bd8 577 obj = drm_gem_object_lookup(file, handle);
c8afe684
RC
578 if (obj == NULL) {
579 ret = -ENOENT;
580 goto fail;
581 }
582
583 *offset = msm_gem_mmap_offset(obj);
584
f7d33950 585 drm_gem_object_put(obj);
c8afe684
RC
586
587fail:
588 return ret;
589}
590
fad33f4b 591static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
c8afe684
RC
592{
593 struct msm_gem_object *msm_obj = to_msm_bo(obj);
0e08270a
SS
594 int ret = 0;
595
e4b87d22
RC
596 WARN_ON(!msm_gem_is_locked(obj));
597
8b6b7d84
DV
598 if (obj->import_attach)
599 return ERR_PTR(-ENODEV);
600
fad33f4b 601 if (WARN_ON(msm_obj->madv > madv)) {
6a41da17 602 DRM_DEV_ERROR(obj->dev->dev, "Invalid madv state: %u vs %u\n",
fad33f4b 603 msm_obj->madv, madv);
0e08270a
SS
604 return ERR_PTR(-EBUSY);
605 }
606
607 /* increment vmap_count *before* vmap() call, so shrinker can
a6ae74c9 608 * check vmap_count (is_vunmapable()) outside of msm_obj lock.
0e08270a
SS
609 * This guarantees that we won't try to msm_gem_vunmap() this
610 * same object from within the vmap() call (while we already
a6ae74c9 611 * hold msm_obj lock)
0e08270a
SS
612 */
613 msm_obj->vmap_count++;
614
c8afe684
RC
615 if (!msm_obj->vaddr) {
616 struct page **pages = get_pages(obj);
0e08270a
SS
617 if (IS_ERR(pages)) {
618 ret = PTR_ERR(pages);
619 goto fail;
620 }
c8afe684
RC
621 msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
622 VM_MAP, pgprot_writecombine(PAGE_KERNEL));
0e08270a
SS
623 if (msm_obj->vaddr == NULL) {
624 ret = -ENOMEM;
625 goto fail;
626 }
c8afe684 627 }
0e08270a 628
c8afe684 629 return msm_obj->vaddr;
c8afe684 630
0e08270a
SS
631fail:
632 msm_obj->vmap_count--;
0e08270a 633 return ERR_PTR(ret);
c8afe684
RC
634}
635
e4b87d22 636void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj)
fad33f4b
RC
637{
638 return get_vaddr(obj, MSM_MADV_WILLNEED);
639}
640
e4b87d22
RC
641void *msm_gem_get_vaddr(struct drm_gem_object *obj)
642{
643 void *ret;
644
645 msm_gem_lock(obj);
646 ret = msm_gem_get_vaddr_locked(obj);
647 msm_gem_unlock(obj);
648
649 return ret;
650}
651
fad33f4b
RC
652/*
653 * Don't use this! It is for the very special case of dumping
654 * submits from GPU hangs or faults, were the bo may already
655 * be MSM_MADV_DONTNEED, but we know the buffer is still on the
656 * active list.
657 */
658void *msm_gem_get_vaddr_active(struct drm_gem_object *obj)
659{
6c0e3ea2 660 return get_vaddr(obj, __MSM_MADV_PURGED);
fad33f4b
RC
661}
662
e4b87d22 663void msm_gem_put_vaddr_locked(struct drm_gem_object *obj)
18f23049 664{
e1e9db2c 665 struct msm_gem_object *msm_obj = to_msm_bo(obj);
0e08270a 666
e4b87d22 667 WARN_ON(!msm_gem_is_locked(obj));
e1e9db2c 668 WARN_ON(msm_obj->vmap_count < 1);
e4b87d22 669
e1e9db2c 670 msm_obj->vmap_count--;
e4b87d22
RC
671}
672
673void msm_gem_put_vaddr(struct drm_gem_object *obj)
674{
675 msm_gem_lock(obj);
676 msm_gem_put_vaddr_locked(obj);
a6ae74c9 677 msm_gem_unlock(obj);
18f23049
RC
678}
679
4cd33c48
RC
680/* Update madvise status, returns true if not purged, else
681 * false or -errno.
682 */
683int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
684{
685 struct msm_gem_object *msm_obj = to_msm_bo(obj);
686
a6ae74c9 687 msm_gem_lock(obj);
0e08270a 688
4cd33c48
RC
689 if (msm_obj->madv != __MSM_MADV_PURGED)
690 msm_obj->madv = madv;
691
0e08270a
SS
692 madv = msm_obj->madv;
693
3edfa30f
RC
694 /* If the obj is inactive, we might need to move it
695 * between inactive lists
696 */
697 if (msm_obj->active_count == 0)
698 update_inactive(msm_obj);
699
a6ae74c9 700 msm_gem_unlock(obj);
0e08270a
SS
701
702 return (madv != __MSM_MADV_PURGED);
4cd33c48
RC
703}
704
599089c6 705void msm_gem_purge(struct drm_gem_object *obj)
68209390
RC
706{
707 struct drm_device *dev = obj->dev;
708 struct msm_gem_object *msm_obj = to_msm_bo(obj);
709
68209390
RC
710 WARN_ON(!is_purgeable(msm_obj));
711 WARN_ON(obj->import_attach);
712
9b73bde3 713 put_iova_spaces(obj);
68209390 714
599089c6 715 msm_gem_vunmap(obj);
68209390
RC
716
717 put_pages(obj);
718
9b73bde3
IC
719 put_iova_vmas(obj);
720
68209390
RC
721 msm_obj->madv = __MSM_MADV_PURGED;
722
723 drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
724 drm_gem_free_mmap_offset(obj);
725
726 /* Our goal here is to return as much of the memory as
727 * is possible back to the system as we are called from OOM.
728 * To do this we must instruct the shmfs to drop all of its
729 * backing pages, *now*.
730 */
731 shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
732
733 invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
734 0, (loff_t)-1);
735}
736
599089c6 737void msm_gem_vunmap(struct drm_gem_object *obj)
e1e9db2c
RC
738{
739 struct msm_gem_object *msm_obj = to_msm_bo(obj);
740
a6ae74c9 741 WARN_ON(!msm_gem_is_locked(obj));
0e08270a 742
e1e9db2c
RC
743 if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj)))
744 return;
745
746 vunmap(msm_obj->vaddr);
747 msm_obj->vaddr = NULL;
748}
749
b6295f9a
RC
750/* must be called before _move_to_active().. */
751int msm_gem_sync_object(struct drm_gem_object *obj,
752 struct msm_fence_context *fctx, bool exclusive)
753{
52791eee 754 struct dma_resv_list *fobj;
f54d1867 755 struct dma_fence *fence;
b6295f9a
RC
756 int i, ret;
757
52791eee 758 fobj = dma_resv_get_list(obj->resv);
b6295f9a 759 if (!fobj || (fobj->shared_count == 0)) {
52791eee 760 fence = dma_resv_get_excl(obj->resv);
b6295f9a
RC
761 /* don't need to wait on our own fences, since ring is fifo */
762 if (fence && (fence->context != fctx->context)) {
f54d1867 763 ret = dma_fence_wait(fence, true);
b6295f9a
RC
764 if (ret)
765 return ret;
766 }
767 }
768
769 if (!exclusive || !fobj)
770 return 0;
771
772 for (i = 0; i < fobj->shared_count; i++) {
773 fence = rcu_dereference_protected(fobj->shared[i],
52791eee 774 dma_resv_held(obj->resv));
b6295f9a 775 if (fence->context != fctx->context) {
f54d1867 776 ret = dma_fence_wait(fence, true);
b6295f9a
RC
777 if (ret)
778 return ret;
779 }
780 }
781
782 return 0;
783}
784
9d8baa2b 785void msm_gem_active_get(struct drm_gem_object *obj, struct msm_gpu *gpu)
7198e6b0
RC
786{
787 struct msm_gem_object *msm_obj = to_msm_bo(obj);
d984457b
RC
788 struct msm_drm_private *priv = obj->dev->dev_private;
789
790 might_sleep();
f92f026a 791 WARN_ON(!msm_gem_is_locked(obj));
4cd33c48 792 WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
9d8baa2b 793
ab5c54cb 794 if (msm_obj->active_count++ == 0) {
d984457b 795 mutex_lock(&priv->mm_lock);
9d8baa2b
AO
796 list_del_init(&msm_obj->mm_list);
797 list_add_tail(&msm_obj->mm_list, &gpu->active_list);
d984457b 798 mutex_unlock(&priv->mm_lock);
9d8baa2b 799 }
7198e6b0
RC
800}
801
9d8baa2b 802void msm_gem_active_put(struct drm_gem_object *obj)
7198e6b0 803{
7198e6b0
RC
804 struct msm_gem_object *msm_obj = to_msm_bo(obj);
805
d984457b 806 might_sleep();
ab5c54cb 807 WARN_ON(!msm_gem_is_locked(obj));
7198e6b0 808
ab5c54cb 809 if (--msm_obj->active_count == 0) {
3edfa30f 810 update_inactive(msm_obj);
9d8baa2b 811 }
7198e6b0
RC
812}
813
3edfa30f
RC
814static void update_inactive(struct msm_gem_object *msm_obj)
815{
816 struct msm_drm_private *priv = msm_obj->base.dev->dev_private;
817
818 mutex_lock(&priv->mm_lock);
819 WARN_ON(msm_obj->active_count != 0);
820
821 list_del_init(&msm_obj->mm_list);
822 if (msm_obj->madv == MSM_MADV_WILLNEED)
823 list_add_tail(&msm_obj->mm_list, &priv->inactive_willneed);
824 else
825 list_add_tail(&msm_obj->mm_list, &priv->inactive_dontneed);
826
827 mutex_unlock(&priv->mm_lock);
828}
829
b6295f9a 830int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
7198e6b0 831{
b6295f9a 832 bool write = !!(op & MSM_PREP_WRITE);
f755e227
CW
833 unsigned long remain =
834 op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
835 long ret;
836
52791eee 837 ret = dma_resv_wait_timeout_rcu(obj->resv, write,
f755e227
CW
838 true, remain);
839 if (ret == 0)
840 return remain == 0 ? -EBUSY : -ETIMEDOUT;
841 else if (ret < 0)
842 return ret;
7198e6b0
RC
843
844 /* TODO cache maintenance */
c8afe684 845
b6295f9a 846 return 0;
7198e6b0 847}
c8afe684 848
7198e6b0
RC
849int msm_gem_cpu_fini(struct drm_gem_object *obj)
850{
851 /* TODO cache maintenance */
c8afe684
RC
852 return 0;
853}
854
855#ifdef CONFIG_DEBUG_FS
f54d1867 856static void describe_fence(struct dma_fence *fence, const char *type,
b6295f9a
RC
857 struct seq_file *m)
858{
f54d1867 859 if (!dma_fence_is_signaled(fence))
a3115621 860 seq_printf(m, "\t%9s: %s %s seq %llu\n", type,
b6295f9a
RC
861 fence->ops->get_driver_name(fence),
862 fence->ops->get_timeline_name(fence),
863 fence->seqno);
864}
865
c8afe684
RC
866void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
867{
c8afe684 868 struct msm_gem_object *msm_obj = to_msm_bo(obj);
52791eee
CK
869 struct dma_resv *robj = obj->resv;
870 struct dma_resv_list *fobj;
f54d1867 871 struct dma_fence *fence;
4b85f7f5 872 struct msm_gem_vma *vma;
c8afe684 873 uint64_t off = drm_vma_node_start(&obj->vma_node);
4cd33c48 874 const char *madv;
c8afe684 875
a6ae74c9 876 msm_gem_lock(obj);
b6295f9a 877
4cd33c48
RC
878 switch (msm_obj->madv) {
879 case __MSM_MADV_PURGED:
880 madv = " purged";
881 break;
882 case MSM_MADV_DONTNEED:
883 madv = " purgeable";
884 break;
885 case MSM_MADV_WILLNEED:
886 default:
887 madv = "";
888 break;
889 }
890
575f0485 891 seq_printf(m, "%08x: %c %2d (%2d) %08llx %p",
7198e6b0 892 msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
2c935bc5 893 obj->name, kref_read(&obj->refcount),
667ce33e
RC
894 off, msm_obj->vaddr);
895
0815d774 896 seq_printf(m, " %08zu %9s %-32s\n", obj->size, madv, msm_obj->name);
667ce33e 897
575f0485
JC
898 if (!list_empty(&msm_obj->vmas)) {
899
7ad0e8cf 900 seq_puts(m, " vmas:");
575f0485 901
25faf2f2
RC
902 list_for_each_entry(vma, &msm_obj->vmas, list) {
903 const char *name, *comm;
904 if (vma->aspace) {
905 struct msm_gem_address_space *aspace = vma->aspace;
906 struct task_struct *task =
907 get_pid_task(aspace->pid, PIDTYPE_PID);
908 if (task) {
909 comm = kstrdup(task->comm, GFP_KERNEL);
910 } else {
911 comm = NULL;
912 }
913 name = aspace->name;
914 } else {
915 name = comm = NULL;
916 }
917 seq_printf(m, " [%s%s%s: aspace=%p, %08llx,%s,inuse=%d]",
918 name, comm ? ":" : "", comm ? comm : "",
919 vma->aspace, vma->iova,
920 vma->mapped ? "mapped" : "unmapped",
7ad0e8cf 921 vma->inuse);
25faf2f2
RC
922 kfree(comm);
923 }
575f0485
JC
924
925 seq_puts(m, "\n");
926 }
b6295f9a
RC
927
928 rcu_read_lock();
929 fobj = rcu_dereference(robj->fence);
930 if (fobj) {
931 unsigned int i, shared_count = fobj->shared_count;
932
933 for (i = 0; i < shared_count; i++) {
934 fence = rcu_dereference(fobj->shared[i]);
935 describe_fence(fence, "Shared", m);
936 }
937 }
938
939 fence = rcu_dereference(robj->fence_excl);
940 if (fence)
941 describe_fence(fence, "Exclusive", m);
942 rcu_read_unlock();
0e08270a 943
a6ae74c9 944 msm_gem_unlock(obj);
c8afe684
RC
945}
946
947void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
948{
949 struct msm_gem_object *msm_obj;
950 int count = 0;
951 size_t size = 0;
952
0815d774 953 seq_puts(m, " flags id ref offset kaddr size madv name\n");
c8afe684
RC
954 list_for_each_entry(msm_obj, list, mm_list) {
955 struct drm_gem_object *obj = &msm_obj->base;
575f0485 956 seq_puts(m, " ");
c8afe684
RC
957 msm_gem_describe(obj, m);
958 count++;
959 size += obj->size;
960 }
961
962 seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
963}
964#endif
965
eecd7fd8 966/* don't call directly! Use drm_gem_object_put_locked() and friends */
c8afe684
RC
967void msm_gem_free_object(struct drm_gem_object *obj)
968{
c8afe684 969 struct msm_gem_object *msm_obj = to_msm_bo(obj);
48e7f183
KK
970 struct drm_device *dev = obj->dev;
971 struct msm_drm_private *priv = dev->dev_private;
972
d984457b 973 mutex_lock(&priv->mm_lock);
c8afe684 974 list_del(&msm_obj->mm_list);
d984457b 975 mutex_unlock(&priv->mm_lock);
c8afe684 976
a6ae74c9 977 msm_gem_lock(obj);
0e08270a 978
ab5c54cb
RC
979 /* object should not be on active list: */
980 WARN_ON(is_active(msm_obj));
981
9b73bde3 982 put_iova_spaces(obj);
c8afe684 983
05b84911 984 if (obj->import_attach) {
8b6b7d84 985 WARN_ON(msm_obj->vaddr);
05b84911
RC
986
987 /* Don't drop the pages for imported dmabuf, as they are not
988 * ours, just free the array we allocated:
989 */
990 if (msm_obj->pages)
2098105e 991 kvfree(msm_obj->pages);
c8afe684 992
6c0e3ea2
RC
993 /* dma_buf_detach() grabs resv lock, so we need to unlock
994 * prior to drm_prime_gem_destroy
995 */
996 msm_gem_unlock(obj);
997
f28730c8 998 drm_prime_gem_destroy(obj, msm_obj->sgt);
05b84911 999 } else {
599089c6 1000 msm_gem_vunmap(obj);
05b84911 1001 put_pages(obj);
6c0e3ea2 1002 msm_gem_unlock(obj);
05b84911 1003 }
c8afe684 1004
9b73bde3
IC
1005 put_iova_vmas(obj);
1006
c8afe684
RC
1007 drm_gem_object_release(obj);
1008
1009 kfree(msm_obj);
1010}
1011
1012/* convenience method to construct a GEM buffer object, and userspace handle */
1013int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
0815d774
JC
1014 uint32_t size, uint32_t flags, uint32_t *handle,
1015 char *name)
c8afe684
RC
1016{
1017 struct drm_gem_object *obj;
1018 int ret;
1019
c8afe684
RC
1020 obj = msm_gem_new(dev, size, flags);
1021
c8afe684
RC
1022 if (IS_ERR(obj))
1023 return PTR_ERR(obj);
1024
0815d774
JC
1025 if (name)
1026 msm_gem_object_set_name(obj, "%s", name);
1027
c8afe684
RC
1028 ret = drm_gem_handle_create(file, obj, handle);
1029
1030 /* drop reference from allocate - handle holds it now */
f7d33950 1031 drm_gem_object_put(obj);
c8afe684
RC
1032
1033 return ret;
1034}
1035
05b84911
RC
1036static int msm_gem_new_impl(struct drm_device *dev,
1037 uint32_t size, uint32_t flags,
3cbdc8d8 1038 struct drm_gem_object **obj)
c8afe684 1039{
c8afe684 1040 struct msm_gem_object *msm_obj;
c8afe684
RC
1041
1042 switch (flags & MSM_BO_CACHE_MASK) {
1043 case MSM_BO_UNCACHED:
1044 case MSM_BO_CACHED:
1045 case MSM_BO_WC:
1046 break;
1047 default:
6a41da17 1048 DRM_DEV_ERROR(dev->dev, "invalid cache flag: %x\n",
c8afe684 1049 (flags & MSM_BO_CACHE_MASK));
05b84911 1050 return -EINVAL;
c8afe684
RC
1051 }
1052
667ce33e 1053 msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
05b84911
RC
1054 if (!msm_obj)
1055 return -ENOMEM;
c8afe684
RC
1056
1057 msm_obj->flags = flags;
4cd33c48 1058 msm_obj->madv = MSM_MADV_WILLNEED;
c8afe684 1059
7198e6b0 1060 INIT_LIST_HEAD(&msm_obj->submit_entry);
4b85f7f5
RC
1061 INIT_LIST_HEAD(&msm_obj->vmas);
1062
05b84911
RC
1063 *obj = &msm_obj->base;
1064
1065 return 0;
1066}
1067
0e08270a
SS
1068static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
1069 uint32_t size, uint32_t flags, bool struct_mutex_locked)
05b84911 1070{
f4839bd5 1071 struct msm_drm_private *priv = dev->dev_private;
3cbdc8d8 1072 struct msm_gem_object *msm_obj;
871d812a 1073 struct drm_gem_object *obj = NULL;
f4839bd5 1074 bool use_vram = false;
05b84911
RC
1075 int ret;
1076
05b84911
RC
1077 size = PAGE_ALIGN(size);
1078
c2052a4e 1079 if (!msm_use_mmu(dev))
f4839bd5 1080 use_vram = true;
86f46f25 1081 else if ((flags & (MSM_BO_STOLEN | MSM_BO_SCANOUT)) && priv->vram.size)
f4839bd5
RC
1082 use_vram = true;
1083
1084 if (WARN_ON(use_vram && !priv->vram.size))
1085 return ERR_PTR(-EINVAL);
1086
1a5dff5d
JC
1087 /* Disallow zero sized objects as they make the underlying
1088 * infrastructure grumpy
1089 */
1090 if (size == 0)
1091 return ERR_PTR(-EINVAL);
1092
3cbdc8d8 1093 ret = msm_gem_new_impl(dev, size, flags, &obj);
05b84911
RC
1094 if (ret)
1095 goto fail;
1096
3cbdc8d8
AO
1097 msm_obj = to_msm_bo(obj);
1098
f4839bd5 1099 if (use_vram) {
4b85f7f5 1100 struct msm_gem_vma *vma;
f4839bd5 1101 struct page **pages;
b3949a9a 1102
a6ae74c9 1103 msm_gem_lock(obj);
f4839bd5 1104
4b85f7f5 1105 vma = add_vma(obj, NULL);
a6ae74c9 1106 msm_gem_unlock(obj);
4b85f7f5
RC
1107 if (IS_ERR(vma)) {
1108 ret = PTR_ERR(vma);
1109 goto fail;
1110 }
1111
1112 to_msm_bo(obj)->vram_node = &vma->node;
1113
f4839bd5
RC
1114 drm_gem_private_object_init(dev, obj, size);
1115
f4839bd5
RC
1116 pages = get_pages(obj);
1117 if (IS_ERR(pages)) {
1118 ret = PTR_ERR(pages);
1119 goto fail;
1120 }
4b85f7f5
RC
1121
1122 vma->iova = physaddr(obj);
f4839bd5 1123 } else {
871d812a
RC
1124 ret = drm_gem_object_init(dev, obj, size);
1125 if (ret)
1126 goto fail;
0abdba47
LS
1127 /*
1128 * Our buffers are kept pinned, so allocating them from the
1129 * MOVABLE zone is a really bad idea, and conflicts with CMA.
1130 * See comments above new_inode() why this is required _and_
1131 * expected if you're going to pin these pages.
1132 */
1133 mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER);
871d812a 1134 }
05b84911 1135
d984457b 1136 mutex_lock(&priv->mm_lock);
3edfa30f
RC
1137 /* Initially obj is idle, obj->madv == WILLNEED: */
1138 list_add_tail(&msm_obj->mm_list, &priv->inactive_willneed);
d984457b 1139 mutex_unlock(&priv->mm_lock);
3cbdc8d8 1140
05b84911
RC
1141 return obj;
1142
1143fail:
ce0a9dc0
RC
1144 if (struct_mutex_locked) {
1145 drm_gem_object_put_locked(obj);
1146 } else {
1147 drm_gem_object_put(obj);
1148 }
05b84911
RC
1149 return ERR_PTR(ret);
1150}
1151
0e08270a
SS
1152struct drm_gem_object *msm_gem_new_locked(struct drm_device *dev,
1153 uint32_t size, uint32_t flags)
1154{
1155 return _msm_gem_new(dev, size, flags, true);
1156}
1157
1158struct drm_gem_object *msm_gem_new(struct drm_device *dev,
1159 uint32_t size, uint32_t flags)
1160{
1161 return _msm_gem_new(dev, size, flags, false);
1162}
1163
05b84911 1164struct drm_gem_object *msm_gem_import(struct drm_device *dev,
79f0e202 1165 struct dma_buf *dmabuf, struct sg_table *sgt)
05b84911 1166{
3cbdc8d8 1167 struct msm_drm_private *priv = dev->dev_private;
05b84911
RC
1168 struct msm_gem_object *msm_obj;
1169 struct drm_gem_object *obj;
79f0e202 1170 uint32_t size;
05b84911
RC
1171 int ret, npages;
1172
871d812a 1173 /* if we don't have IOMMU, don't bother pretending we can import: */
c2052a4e 1174 if (!msm_use_mmu(dev)) {
6a41da17 1175 DRM_DEV_ERROR(dev->dev, "cannot import without IOMMU\n");
871d812a
RC
1176 return ERR_PTR(-EINVAL);
1177 }
1178
79f0e202 1179 size = PAGE_ALIGN(dmabuf->size);
05b84911 1180
3cbdc8d8 1181 ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj);
05b84911
RC
1182 if (ret)
1183 goto fail;
1184
1185 drm_gem_private_object_init(dev, obj, size);
1186
1187 npages = size / PAGE_SIZE;
1188
1189 msm_obj = to_msm_bo(obj);
a6ae74c9 1190 msm_gem_lock(obj);
05b84911 1191 msm_obj->sgt = sgt;
2098105e 1192 msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
05b84911 1193 if (!msm_obj->pages) {
a6ae74c9 1194 msm_gem_unlock(obj);
05b84911
RC
1195 ret = -ENOMEM;
1196 goto fail;
1197 }
1198
1199 ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages);
0e08270a 1200 if (ret) {
a6ae74c9 1201 msm_gem_unlock(obj);
05b84911 1202 goto fail;
0e08270a 1203 }
05b84911 1204
a6ae74c9 1205 msm_gem_unlock(obj);
3cbdc8d8 1206
d984457b 1207 mutex_lock(&priv->mm_lock);
3edfa30f 1208 list_add_tail(&msm_obj->mm_list, &priv->inactive_willneed);
d984457b 1209 mutex_unlock(&priv->mm_lock);
3cbdc8d8 1210
c8afe684
RC
1211 return obj;
1212
1213fail:
f7d33950 1214 drm_gem_object_put(obj);
c8afe684
RC
1215 return ERR_PTR(ret);
1216}
8223286d
JC
1217
1218static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
1219 uint32_t flags, struct msm_gem_address_space *aspace,
1220 struct drm_gem_object **bo, uint64_t *iova, bool locked)
1221{
1222 void *vaddr;
1223 struct drm_gem_object *obj = _msm_gem_new(dev, size, flags, locked);
1224 int ret;
1225
1226 if (IS_ERR(obj))
1227 return ERR_CAST(obj);
1228
1229 if (iova) {
9fe041f6 1230 ret = msm_gem_get_and_pin_iova(obj, aspace, iova);
93f7abf1
JC
1231 if (ret)
1232 goto err;
8223286d
JC
1233 }
1234
1235 vaddr = msm_gem_get_vaddr(obj);
c9811d0f 1236 if (IS_ERR(vaddr)) {
7ad0e8cf 1237 msm_gem_unpin_iova(obj, aspace);
93f7abf1
JC
1238 ret = PTR_ERR(vaddr);
1239 goto err;
8223286d
JC
1240 }
1241
1242 if (bo)
1243 *bo = obj;
1244
1245 return vaddr;
93f7abf1
JC
1246err:
1247 if (locked)
eecd7fd8 1248 drm_gem_object_put_locked(obj);
93f7abf1 1249 else
f7d33950 1250 drm_gem_object_put(obj);
93f7abf1
JC
1251
1252 return ERR_PTR(ret);
1253
8223286d
JC
1254}
1255
1256void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
1257 uint32_t flags, struct msm_gem_address_space *aspace,
1258 struct drm_gem_object **bo, uint64_t *iova)
1259{
1260 return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, false);
1261}
1262
1263void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size,
1264 uint32_t flags, struct msm_gem_address_space *aspace,
1265 struct drm_gem_object **bo, uint64_t *iova)
1266{
1267 return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, true);
1268}
1e29dff0
JC
1269
1270void msm_gem_kernel_put(struct drm_gem_object *bo,
1271 struct msm_gem_address_space *aspace, bool locked)
1272{
1273 if (IS_ERR_OR_NULL(bo))
1274 return;
1275
1276 msm_gem_put_vaddr(bo);
7ad0e8cf 1277 msm_gem_unpin_iova(bo, aspace);
1e29dff0
JC
1278
1279 if (locked)
eecd7fd8 1280 drm_gem_object_put_locked(bo);
1e29dff0 1281 else
f7d33950 1282 drm_gem_object_put(bo);
1e29dff0 1283}
0815d774
JC
1284
1285void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...)
1286{
1287 struct msm_gem_object *msm_obj = to_msm_bo(bo);
1288 va_list ap;
1289
1290 if (!fmt)
1291 return;
1292
1293 va_start(ap, fmt);
1294 vsnprintf(msm_obj->name, sizeof(msm_obj->name), fmt, ap);
1295 va_end(ap);
1296}