]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/gpu/drm/msm/msm_gem.c
drm/msm: Add missing put_task_struct() in debugfs path
[mirror_ubuntu-jammy-kernel.git] / drivers / gpu / drm / msm / msm_gem.c
CommitLineData
caab277b 1// SPDX-License-Identifier: GPL-2.0-only
c8afe684
RC
2/*
3 * Copyright (C) 2013 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com>
c8afe684
RC
5 */
6
0a0f0d8b 7#include <linux/dma-map-ops.h>
c8afe684
RC
8#include <linux/spinlock.h>
9#include <linux/shmem_fs.h>
05b84911 10#include <linux/dma-buf.h>
01c8f1c4 11#include <linux/pfn_t.h>
c8afe684 12
feea39a8
SR
13#include <drm/drm_prime.h>
14
c8afe684 15#include "msm_drv.h"
fde5de6c 16#include "msm_fence.h"
c8afe684 17#include "msm_gem.h"
7198e6b0 18#include "msm_gpu.h"
871d812a 19#include "msm_mmu.h"
c8afe684 20
3edfa30f 21static void update_inactive(struct msm_gem_object *msm_obj);
0e08270a 22
871d812a
RC
23static dma_addr_t physaddr(struct drm_gem_object *obj)
24{
25 struct msm_gem_object *msm_obj = to_msm_bo(obj);
26 struct msm_drm_private *priv = obj->dev->dev_private;
27 return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) +
28 priv->vram.paddr;
29}
30
072f1f91
RC
31static bool use_pages(struct drm_gem_object *obj)
32{
33 struct msm_gem_object *msm_obj = to_msm_bo(obj);
34 return !msm_obj->vram_node;
35}
36
3de433c5
RC
37/*
38 * Cache sync.. this is a bit over-complicated, to fit dma-mapping
39 * API. Really GPU cache is out of scope here (handled on cmdstream)
40 * and all we need to do is invalidate newly allocated pages before
41 * mapping to CPU as uncached/writecombine.
42 *
43 * On top of this, we have the added headache, that depending on
44 * display generation, the display's iommu may be wired up to either
45 * the toplevel drm device (mdss), or to the mdp sub-node, meaning
46 * that here we either have dma-direct or iommu ops.
47 *
48 * Let this be a cautionary tail of abstraction gone wrong.
49 */
50
51static void sync_for_device(struct msm_gem_object *msm_obj)
52{
53 struct device *dev = msm_obj->base.dev->dev;
54
91d0ca3d 55 dma_map_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
3de433c5
RC
56}
57
58static void sync_for_cpu(struct msm_gem_object *msm_obj)
59{
60 struct device *dev = msm_obj->base.dev->dev;
61
91d0ca3d 62 dma_unmap_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
3de433c5
RC
63}
64
871d812a 65/* allocate pages from VRAM carveout, used when no IOMMU: */
0e08270a 66static struct page **get_pages_vram(struct drm_gem_object *obj, int npages)
871d812a
RC
67{
68 struct msm_gem_object *msm_obj = to_msm_bo(obj);
69 struct msm_drm_private *priv = obj->dev->dev_private;
70 dma_addr_t paddr;
71 struct page **p;
72 int ret, i;
73
2098105e 74 p = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
871d812a
RC
75 if (!p)
76 return ERR_PTR(-ENOMEM);
77
0e08270a 78 spin_lock(&priv->vram.lock);
4e64e553 79 ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages);
0e08270a 80 spin_unlock(&priv->vram.lock);
871d812a 81 if (ret) {
2098105e 82 kvfree(p);
871d812a
RC
83 return ERR_PTR(ret);
84 }
85
86 paddr = physaddr(obj);
87 for (i = 0; i < npages; i++) {
88 p[i] = phys_to_page(paddr);
89 paddr += PAGE_SIZE;
90 }
91
92 return p;
93}
c8afe684 94
c8afe684
RC
95static struct page **get_pages(struct drm_gem_object *obj)
96{
97 struct msm_gem_object *msm_obj = to_msm_bo(obj);
98
90643a24 99 GEM_WARN_ON(!msm_gem_is_locked(obj));
07fcad0d 100
c8afe684
RC
101 if (!msm_obj->pages) {
102 struct drm_device *dev = obj->dev;
871d812a 103 struct page **p;
c8afe684
RC
104 int npages = obj->size >> PAGE_SHIFT;
105
072f1f91 106 if (use_pages(obj))
0cdbe8ac 107 p = drm_gem_get_pages(obj);
871d812a
RC
108 else
109 p = get_pages_vram(obj, npages);
110
c8afe684 111 if (IS_ERR(p)) {
6a41da17 112 DRM_DEV_ERROR(dev->dev, "could not get pages: %ld\n",
c8afe684
RC
113 PTR_ERR(p));
114 return p;
115 }
116
62e3a3e3
PK
117 msm_obj->pages = p;
118
707d561f 119 msm_obj->sgt = drm_prime_pages_to_sg(obj->dev, p, npages);
1f70e079 120 if (IS_ERR(msm_obj->sgt)) {
62e3a3e3
PK
121 void *ptr = ERR_CAST(msm_obj->sgt);
122
6a41da17 123 DRM_DEV_ERROR(dev->dev, "failed to allocate sgt\n");
62e3a3e3
PK
124 msm_obj->sgt = NULL;
125 return ptr;
c8afe684
RC
126 }
127
c8afe684
RC
128 /* For non-cached buffers, ensure the new pages are clean
129 * because display controller, GPU, etc. are not coherent:
130 */
131 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
3de433c5 132 sync_for_device(msm_obj);
64fcbde7 133
64fcbde7 134 update_inactive(msm_obj);
c8afe684
RC
135 }
136
137 return msm_obj->pages;
138}
139
0e08270a
SS
140static void put_pages_vram(struct drm_gem_object *obj)
141{
142 struct msm_gem_object *msm_obj = to_msm_bo(obj);
143 struct msm_drm_private *priv = obj->dev->dev_private;
144
145 spin_lock(&priv->vram.lock);
146 drm_mm_remove_node(msm_obj->vram_node);
147 spin_unlock(&priv->vram.lock);
148
149 kvfree(msm_obj->pages);
150}
151
c8afe684
RC
152static void put_pages(struct drm_gem_object *obj)
153{
154 struct msm_gem_object *msm_obj = to_msm_bo(obj);
155
156 if (msm_obj->pages) {
3976626e
BH
157 if (msm_obj->sgt) {
158 /* For non-cached buffers, ensure the new
159 * pages are clean because display controller,
160 * GPU, etc. are not coherent:
161 */
162 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
3de433c5 163 sync_for_cpu(msm_obj);
62e3a3e3 164
62e3a3e3 165 sg_free_table(msm_obj->sgt);
3976626e 166 kfree(msm_obj->sgt);
b9a31d0d 167 msm_obj->sgt = NULL;
3976626e 168 }
c8afe684 169
072f1f91 170 if (use_pages(obj))
871d812a 171 drm_gem_put_pages(obj, msm_obj->pages, true, false);
0e08270a
SS
172 else
173 put_pages_vram(obj);
871d812a 174
c8afe684
RC
175 msm_obj->pages = NULL;
176 }
177}
178
05b84911
RC
179struct page **msm_gem_get_pages(struct drm_gem_object *obj)
180{
0e08270a 181 struct msm_gem_object *msm_obj = to_msm_bo(obj);
05b84911 182 struct page **p;
0e08270a 183
a6ae74c9 184 msm_gem_lock(obj);
0e08270a 185
90643a24 186 if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
a6ae74c9 187 msm_gem_unlock(obj);
0e08270a
SS
188 return ERR_PTR(-EBUSY);
189 }
190
05b84911 191 p = get_pages(obj);
10f76165
RC
192
193 if (!IS_ERR(p)) {
194 msm_obj->pin_count++;
195 update_inactive(msm_obj);
196 }
197
a6ae74c9 198 msm_gem_unlock(obj);
05b84911
RC
199 return p;
200}
201
202void msm_gem_put_pages(struct drm_gem_object *obj)
203{
10f76165
RC
204 struct msm_gem_object *msm_obj = to_msm_bo(obj);
205
206 msm_gem_lock(obj);
207 msm_obj->pin_count--;
208 GEM_WARN_ON(msm_obj->pin_count < 0);
209 update_inactive(msm_obj);
210 msm_gem_unlock(obj);
05b84911
RC
211}
212
af9b3547
JM
213static pgprot_t msm_gem_pgprot(struct msm_gem_object *msm_obj, pgprot_t prot)
214{
9ef36443 215 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
af9b3547 216 return pgprot_writecombine(prot);
af9b3547
JM
217 return prot;
218}
219
3c9edd9c 220static vm_fault_t msm_gem_fault(struct vm_fault *vmf)
c8afe684 221{
11bac800 222 struct vm_area_struct *vma = vmf->vma;
c8afe684 223 struct drm_gem_object *obj = vma->vm_private_data;
0e08270a 224 struct msm_gem_object *msm_obj = to_msm_bo(obj);
c8afe684
RC
225 struct page **pages;
226 unsigned long pfn;
227 pgoff_t pgoff;
a5f74ec7
SJ
228 int err;
229 vm_fault_t ret;
c8afe684 230
0e08270a
SS
231 /*
232 * vm_ops.open/drm_gem_mmap_obj and close get and put
233 * a reference on obj. So, we dont need to hold one here.
c8afe684 234 */
a6ae74c9 235 err = msm_gem_lock_interruptible(obj);
a5f74ec7
SJ
236 if (err) {
237 ret = VM_FAULT_NOPAGE;
c8afe684 238 goto out;
a5f74ec7 239 }
c8afe684 240
90643a24 241 if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
a6ae74c9 242 msm_gem_unlock(obj);
0e08270a
SS
243 return VM_FAULT_SIGBUS;
244 }
245
c8afe684
RC
246 /* make sure we have pages attached now */
247 pages = get_pages(obj);
248 if (IS_ERR(pages)) {
a5f74ec7 249 ret = vmf_error(PTR_ERR(pages));
c8afe684
RC
250 goto out_unlock;
251 }
252
253 /* We don't use vmf->pgoff since that has the fake offset: */
1a29d85e 254 pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
c8afe684 255
871d812a 256 pfn = page_to_pfn(pages[pgoff]);
c8afe684 257
1a29d85e 258 VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
c8afe684
RC
259 pfn, pfn << PAGE_SHIFT);
260
a5f74ec7 261 ret = vmf_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
c8afe684 262out_unlock:
a6ae74c9 263 msm_gem_unlock(obj);
c8afe684 264out:
a5f74ec7 265 return ret;
c8afe684
RC
266}
267
268/** get mmap offset */
269static uint64_t mmap_offset(struct drm_gem_object *obj)
270{
271 struct drm_device *dev = obj->dev;
272 int ret;
273
90643a24 274 GEM_WARN_ON(!msm_gem_is_locked(obj));
c8afe684
RC
275
276 /* Make it mmapable */
277 ret = drm_gem_create_mmap_offset(obj);
278
279 if (ret) {
6a41da17 280 DRM_DEV_ERROR(dev->dev, "could not allocate mmap offset\n");
c8afe684
RC
281 return 0;
282 }
283
284 return drm_vma_node_offset_addr(&obj->vma_node);
285}
286
287uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
288{
289 uint64_t offset;
0e08270a 290
a6ae74c9 291 msm_gem_lock(obj);
c8afe684 292 offset = mmap_offset(obj);
a6ae74c9 293 msm_gem_unlock(obj);
c8afe684
RC
294 return offset;
295}
296
4b85f7f5
RC
297static struct msm_gem_vma *add_vma(struct drm_gem_object *obj,
298 struct msm_gem_address_space *aspace)
299{
300 struct msm_gem_object *msm_obj = to_msm_bo(obj);
301 struct msm_gem_vma *vma;
302
90643a24 303 GEM_WARN_ON(!msm_gem_is_locked(obj));
0e08270a 304
4b85f7f5
RC
305 vma = kzalloc(sizeof(*vma), GFP_KERNEL);
306 if (!vma)
307 return ERR_PTR(-ENOMEM);
308
309 vma->aspace = aspace;
310
311 list_add_tail(&vma->list, &msm_obj->vmas);
312
313 return vma;
314}
315
316static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj,
317 struct msm_gem_address_space *aspace)
318{
319 struct msm_gem_object *msm_obj = to_msm_bo(obj);
320 struct msm_gem_vma *vma;
321
90643a24 322 GEM_WARN_ON(!msm_gem_is_locked(obj));
4b85f7f5
RC
323
324 list_for_each_entry(vma, &msm_obj->vmas, list) {
325 if (vma->aspace == aspace)
326 return vma;
327 }
328
329 return NULL;
330}
331
332static void del_vma(struct msm_gem_vma *vma)
333{
334 if (!vma)
335 return;
336
337 list_del(&vma->list);
338 kfree(vma);
339}
340
37c68900 341/*
20d0ae2f
RC
342 * If close is true, this also closes the VMA (releasing the allocated
343 * iova range) in addition to removing the iommu mapping. In the eviction
344 * case (!close), we keep the iova allocated, but only remove the iommu
345 * mapping.
346 */
4fe5f65e 347static void
20d0ae2f 348put_iova_spaces(struct drm_gem_object *obj, bool close)
4fe5f65e 349{
4fe5f65e 350 struct msm_gem_object *msm_obj = to_msm_bo(obj);
9b73bde3 351 struct msm_gem_vma *vma;
4fe5f65e 352
90643a24 353 GEM_WARN_ON(!msm_gem_is_locked(obj));
4fe5f65e 354
9b73bde3 355 list_for_each_entry(vma, &msm_obj->vmas, list) {
d67f1b6d
BM
356 if (vma->aspace) {
357 msm_gem_purge_vma(vma->aspace, vma);
20d0ae2f
RC
358 if (close)
359 msm_gem_close_vma(vma->aspace, vma);
d67f1b6d 360 }
9b73bde3
IC
361 }
362}
363
364/* Called with msm_obj locked */
365static void
366put_iova_vmas(struct drm_gem_object *obj)
367{
368 struct msm_gem_object *msm_obj = to_msm_bo(obj);
369 struct msm_gem_vma *vma, *tmp;
370
90643a24 371 GEM_WARN_ON(!msm_gem_is_locked(obj));
9b73bde3
IC
372
373 list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) {
4b85f7f5 374 del_vma(vma);
4fe5f65e
RC
375 }
376}
377
8117e5e5 378static int get_iova_locked(struct drm_gem_object *obj,
d3b8877e
JM
379 struct msm_gem_address_space *aspace, uint64_t *iova,
380 u64 range_start, u64 range_end)
c8afe684 381{
4b85f7f5 382 struct msm_gem_vma *vma;
c8afe684
RC
383 int ret = 0;
384
90643a24 385 GEM_WARN_ON(!msm_gem_is_locked(obj));
cb1e3818 386
4b85f7f5 387 vma = lookup_vma(obj, aspace);
871d812a 388
4b85f7f5 389 if (!vma) {
4b85f7f5 390 vma = add_vma(obj, aspace);
c0ee9794
JC
391 if (IS_ERR(vma))
392 return PTR_ERR(vma);
4b85f7f5 393
d3b8877e
JM
394 ret = msm_gem_init_vma(aspace, vma, obj->size >> PAGE_SHIFT,
395 range_start, range_end);
c0ee9794
JC
396 if (ret) {
397 del_vma(vma);
398 return ret;
4b85f7f5 399 }
c8afe684
RC
400 }
401
4b85f7f5
RC
402 *iova = vma->iova;
403 return 0;
c0ee9794
JC
404}
405
406static int msm_gem_pin_iova(struct drm_gem_object *obj,
407 struct msm_gem_address_space *aspace)
408{
409 struct msm_gem_object *msm_obj = to_msm_bo(obj);
410 struct msm_gem_vma *vma;
411 struct page **pages;
64fcbde7 412 int ret, prot = IOMMU_READ;
bbc2cd07
RC
413
414 if (!(msm_obj->flags & MSM_BO_GPU_READONLY))
415 prot |= IOMMU_WRITE;
c0ee9794 416
0b462d7a
JM
417 if (msm_obj->flags & MSM_BO_MAP_PRIV)
418 prot |= IOMMU_PRIV;
419
d12e3390
JM
420 if (msm_obj->flags & MSM_BO_CACHED_COHERENT)
421 prot |= IOMMU_CACHE;
422
90643a24 423 GEM_WARN_ON(!msm_gem_is_locked(obj));
c0ee9794 424
90643a24 425 if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED))
c0ee9794
JC
426 return -EBUSY;
427
428 vma = lookup_vma(obj, aspace);
90643a24 429 if (GEM_WARN_ON(!vma))
c0ee9794
JC
430 return -EINVAL;
431
432 pages = get_pages(obj);
433 if (IS_ERR(pages))
434 return PTR_ERR(pages);
435
64fcbde7 436 ret = msm_gem_map_vma(aspace, vma, prot,
bbc2cd07 437 msm_obj->sgt, obj->size >> PAGE_SHIFT);
64fcbde7
RC
438
439 if (!ret)
440 msm_obj->pin_count++;
441
442 return ret;
c0ee9794
JC
443}
444
e4b87d22 445static int get_and_pin_iova_range_locked(struct drm_gem_object *obj,
d3b8877e
JM
446 struct msm_gem_address_space *aspace, uint64_t *iova,
447 u64 range_start, u64 range_end)
c0ee9794 448{
c0ee9794
JC
449 u64 local;
450 int ret;
451
90643a24 452 GEM_WARN_ON(!msm_gem_is_locked(obj));
c0ee9794 453
8117e5e5 454 ret = get_iova_locked(obj, aspace, &local,
d3b8877e 455 range_start, range_end);
c0ee9794
JC
456
457 if (!ret)
458 ret = msm_gem_pin_iova(obj, aspace);
459
460 if (!ret)
461 *iova = local;
4b85f7f5 462
c8afe684
RC
463 return ret;
464}
465
e4b87d22
RC
466/*
467 * get iova and pin it. Should have a matching put
468 * limits iova to specified range (in pages)
469 */
470int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj,
471 struct msm_gem_address_space *aspace, uint64_t *iova,
472 u64 range_start, u64 range_end)
473{
474 int ret;
475
476 msm_gem_lock(obj);
477 ret = get_and_pin_iova_range_locked(obj, aspace, iova, range_start, range_end);
a6ae74c9 478 msm_gem_unlock(obj);
e4b87d22 479
c8afe684
RC
480 return ret;
481}
482
e4b87d22
RC
483int msm_gem_get_and_pin_iova_locked(struct drm_gem_object *obj,
484 struct msm_gem_address_space *aspace, uint64_t *iova)
485{
486 return get_and_pin_iova_range_locked(obj, aspace, iova, 0, U64_MAX);
487}
488
d3b8877e
JM
489/* get iova and pin it. Should have a matching put */
490int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
491 struct msm_gem_address_space *aspace, uint64_t *iova)
492{
493 return msm_gem_get_and_pin_iova_range(obj, aspace, iova, 0, U64_MAX);
494}
495
7ad0e8cf
JC
496/*
497 * Get an iova but don't pin it. Doesn't need a put because iovas are currently
498 * valid for the life of the object
499 */
9fe041f6
JC
500int msm_gem_get_iova(struct drm_gem_object *obj,
501 struct msm_gem_address_space *aspace, uint64_t *iova)
502{
9fe041f6
JC
503 int ret;
504
a6ae74c9 505 msm_gem_lock(obj);
8117e5e5 506 ret = get_iova_locked(obj, aspace, iova, 0, U64_MAX);
a6ae74c9 507 msm_gem_unlock(obj);
9fe041f6
JC
508
509 return ret;
510}
511
2638d90a 512/* get iova without taking a reference, used in places where you have
9fe041f6 513 * already done a 'msm_gem_get_and_pin_iova' or 'msm_gem_get_iova'
2638d90a 514 */
8bdcd949
RC
515uint64_t msm_gem_iova(struct drm_gem_object *obj,
516 struct msm_gem_address_space *aspace)
2638d90a 517{
4b85f7f5
RC
518 struct msm_gem_vma *vma;
519
a6ae74c9 520 msm_gem_lock(obj);
4b85f7f5 521 vma = lookup_vma(obj, aspace);
a6ae74c9 522 msm_gem_unlock(obj);
90643a24 523 GEM_WARN_ON(!vma);
4b85f7f5
RC
524
525 return vma ? vma->iova : 0;
2638d90a
RC
526}
527
7ad0e8cf 528/*
e4b87d22 529 * Locked variant of msm_gem_unpin_iova()
7ad0e8cf 530 */
e4b87d22 531void msm_gem_unpin_iova_locked(struct drm_gem_object *obj,
8bdcd949 532 struct msm_gem_address_space *aspace)
c8afe684 533{
64fcbde7 534 struct msm_gem_object *msm_obj = to_msm_bo(obj);
7ad0e8cf
JC
535 struct msm_gem_vma *vma;
536
90643a24 537 GEM_WARN_ON(!msm_gem_is_locked(obj));
e4b87d22 538
7ad0e8cf
JC
539 vma = lookup_vma(obj, aspace);
540
64fcbde7 541 if (!GEM_WARN_ON(!vma)) {
7ad0e8cf 542 msm_gem_unmap_vma(aspace, vma);
64fcbde7
RC
543
544 msm_obj->pin_count--;
545 GEM_WARN_ON(msm_obj->pin_count < 0);
546
547 update_inactive(msm_obj);
548 }
e4b87d22 549}
7ad0e8cf 550
e4b87d22
RC
551/*
552 * Unpin a iova by updating the reference counts. The memory isn't actually
553 * purged until something else (shrinker, mm_notifier, destroy, etc) decides
554 * to get rid of it
555 */
556void msm_gem_unpin_iova(struct drm_gem_object *obj,
557 struct msm_gem_address_space *aspace)
558{
559 msm_gem_lock(obj);
560 msm_gem_unpin_iova_locked(obj, aspace);
a6ae74c9 561 msm_gem_unlock(obj);
c8afe684
RC
562}
563
564int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
565 struct drm_mode_create_dumb *args)
566{
567 args->pitch = align_pitch(args->width, args->bpp);
568 args->size = PAGE_ALIGN(args->pitch * args->height);
569 return msm_gem_new_handle(dev, file, args->size,
0815d774 570 MSM_BO_SCANOUT | MSM_BO_WC, &args->handle, "dumb");
c8afe684
RC
571}
572
c8afe684
RC
573int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
574 uint32_t handle, uint64_t *offset)
575{
576 struct drm_gem_object *obj;
577 int ret = 0;
578
579 /* GEM does all our handle to object mapping */
a8ad0bd8 580 obj = drm_gem_object_lookup(file, handle);
c8afe684
RC
581 if (obj == NULL) {
582 ret = -ENOENT;
583 goto fail;
584 }
585
586 *offset = msm_gem_mmap_offset(obj);
587
f7d33950 588 drm_gem_object_put(obj);
c8afe684
RC
589
590fail:
591 return ret;
592}
593
fad33f4b 594static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
c8afe684
RC
595{
596 struct msm_gem_object *msm_obj = to_msm_bo(obj);
0e08270a
SS
597 int ret = 0;
598
90643a24 599 GEM_WARN_ON(!msm_gem_is_locked(obj));
e4b87d22 600
8b6b7d84
DV
601 if (obj->import_attach)
602 return ERR_PTR(-ENODEV);
603
90643a24 604 if (GEM_WARN_ON(msm_obj->madv > madv)) {
6a41da17 605 DRM_DEV_ERROR(obj->dev->dev, "Invalid madv state: %u vs %u\n",
fad33f4b 606 msm_obj->madv, madv);
0e08270a
SS
607 return ERR_PTR(-EBUSY);
608 }
609
610 /* increment vmap_count *before* vmap() call, so shrinker can
a6ae74c9 611 * check vmap_count (is_vunmapable()) outside of msm_obj lock.
0e08270a
SS
612 * This guarantees that we won't try to msm_gem_vunmap() this
613 * same object from within the vmap() call (while we already
a6ae74c9 614 * hold msm_obj lock)
0e08270a
SS
615 */
616 msm_obj->vmap_count++;
617
c8afe684
RC
618 if (!msm_obj->vaddr) {
619 struct page **pages = get_pages(obj);
0e08270a
SS
620 if (IS_ERR(pages)) {
621 ret = PTR_ERR(pages);
622 goto fail;
623 }
c8afe684 624 msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
af9b3547 625 VM_MAP, msm_gem_pgprot(msm_obj, PAGE_KERNEL));
0e08270a
SS
626 if (msm_obj->vaddr == NULL) {
627 ret = -ENOMEM;
628 goto fail;
629 }
10f76165
RC
630
631 update_inactive(msm_obj);
c8afe684 632 }
0e08270a 633
c8afe684 634 return msm_obj->vaddr;
c8afe684 635
0e08270a
SS
636fail:
637 msm_obj->vmap_count--;
0e08270a 638 return ERR_PTR(ret);
c8afe684
RC
639}
640
e4b87d22 641void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj)
fad33f4b
RC
642{
643 return get_vaddr(obj, MSM_MADV_WILLNEED);
644}
645
e4b87d22
RC
646void *msm_gem_get_vaddr(struct drm_gem_object *obj)
647{
648 void *ret;
649
650 msm_gem_lock(obj);
651 ret = msm_gem_get_vaddr_locked(obj);
652 msm_gem_unlock(obj);
653
654 return ret;
655}
656
fad33f4b
RC
657/*
658 * Don't use this! It is for the very special case of dumping
659 * submits from GPU hangs or faults, were the bo may already
660 * be MSM_MADV_DONTNEED, but we know the buffer is still on the
661 * active list.
662 */
663void *msm_gem_get_vaddr_active(struct drm_gem_object *obj)
664{
665 return get_vaddr(obj, __MSM_MADV_PURGED);
666}
667
e4b87d22 668void msm_gem_put_vaddr_locked(struct drm_gem_object *obj)
18f23049 669{
e1e9db2c 670 struct msm_gem_object *msm_obj = to_msm_bo(obj);
0e08270a 671
90643a24
RC
672 GEM_WARN_ON(!msm_gem_is_locked(obj));
673 GEM_WARN_ON(msm_obj->vmap_count < 1);
e4b87d22 674
e1e9db2c 675 msm_obj->vmap_count--;
e4b87d22
RC
676}
677
678void msm_gem_put_vaddr(struct drm_gem_object *obj)
679{
680 msm_gem_lock(obj);
681 msm_gem_put_vaddr_locked(obj);
a6ae74c9 682 msm_gem_unlock(obj);
18f23049
RC
683}
684
4cd33c48
RC
685/* Update madvise status, returns true if not purged, else
686 * false or -errno.
687 */
688int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
689{
690 struct msm_gem_object *msm_obj = to_msm_bo(obj);
691
a6ae74c9 692 msm_gem_lock(obj);
4cd33c48
RC
693
694 if (msm_obj->madv != __MSM_MADV_PURGED)
695 msm_obj->madv = madv;
696
0e08270a
SS
697 madv = msm_obj->madv;
698
3edfa30f
RC
699 /* If the obj is inactive, we might need to move it
700 * between inactive lists
701 */
702 if (msm_obj->active_count == 0)
703 update_inactive(msm_obj);
704
a6ae74c9 705 msm_gem_unlock(obj);
0e08270a
SS
706
707 return (madv != __MSM_MADV_PURGED);
4cd33c48
RC
708}
709
599089c6 710void msm_gem_purge(struct drm_gem_object *obj)
68209390
RC
711{
712 struct drm_device *dev = obj->dev;
713 struct msm_gem_object *msm_obj = to_msm_bo(obj);
714
81d4d597 715 GEM_WARN_ON(!msm_gem_is_locked(obj));
90643a24 716 GEM_WARN_ON(!is_purgeable(msm_obj));
68209390 717
20d0ae2f
RC
718 /* Get rid of any iommu mapping(s): */
719 put_iova_spaces(obj, true);
0e08270a 720
599089c6 721 msm_gem_vunmap(obj);
68209390 722
81d4d597
RC
723 drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
724
68209390
RC
725 put_pages(obj);
726
9b73bde3
IC
727 put_iova_vmas(obj);
728
68209390 729 msm_obj->madv = __MSM_MADV_PURGED;
25ed38b3 730 update_inactive(msm_obj);
68209390 731
68209390
RC
732 drm_gem_free_mmap_offset(obj);
733
734 /* Our goal here is to return as much of the memory as
735 * is possible back to the system as we are called from OOM.
736 * To do this we must instruct the shmfs to drop all of its
737 * backing pages, *now*.
738 */
739 shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
740
741 invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
742 0, (loff_t)-1);
743}
744
37c68900 745/*
63f17ef8
RC
746 * Unpin the backing pages and make them available to be swapped out.
747 */
748void msm_gem_evict(struct drm_gem_object *obj)
749{
750 struct drm_device *dev = obj->dev;
751 struct msm_gem_object *msm_obj = to_msm_bo(obj);
752
753 GEM_WARN_ON(!msm_gem_is_locked(obj));
754 GEM_WARN_ON(is_unevictable(msm_obj));
755 GEM_WARN_ON(!msm_obj->evictable);
756 GEM_WARN_ON(msm_obj->active_count);
757
758 /* Get rid of any iommu mapping(s): */
759 put_iova_spaces(obj, false);
760
761 drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
762
763 put_pages(obj);
764
765 update_inactive(msm_obj);
766}
767
599089c6 768void msm_gem_vunmap(struct drm_gem_object *obj)
e1e9db2c
RC
769{
770 struct msm_gem_object *msm_obj = to_msm_bo(obj);
771
90643a24 772 GEM_WARN_ON(!msm_gem_is_locked(obj));
0e08270a 773
90643a24 774 if (!msm_obj->vaddr || GEM_WARN_ON(!is_vunmapable(msm_obj)))
e1e9db2c
RC
775 return;
776
777 vunmap(msm_obj->vaddr);
778 msm_obj->vaddr = NULL;
779}
780
9d8baa2b 781void msm_gem_active_get(struct drm_gem_object *obj, struct msm_gpu *gpu)
7198e6b0
RC
782{
783 struct msm_gem_object *msm_obj = to_msm_bo(obj);
d984457b
RC
784 struct msm_drm_private *priv = obj->dev->dev_private;
785
786 might_sleep();
90643a24
RC
787 GEM_WARN_ON(!msm_gem_is_locked(obj));
788 GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
789 GEM_WARN_ON(msm_obj->dontneed);
9d8baa2b 790
ab5c54cb 791 if (msm_obj->active_count++ == 0) {
d984457b 792 mutex_lock(&priv->mm_lock);
64fcbde7
RC
793 if (msm_obj->evictable)
794 mark_unevictable(msm_obj);
a83cc4fb 795 list_move_tail(&msm_obj->mm_list, &gpu->active_list);
d984457b 796 mutex_unlock(&priv->mm_lock);
9d8baa2b 797 }
7198e6b0
RC
798}
799
9d8baa2b 800void msm_gem_active_put(struct drm_gem_object *obj)
7198e6b0 801{
7198e6b0
RC
802 struct msm_gem_object *msm_obj = to_msm_bo(obj);
803
d984457b 804 might_sleep();
90643a24 805 GEM_WARN_ON(!msm_gem_is_locked(obj));
7198e6b0 806
ab5c54cb 807 if (--msm_obj->active_count == 0) {
3edfa30f 808 update_inactive(msm_obj);
9d8baa2b 809 }
7198e6b0
RC
810}
811
3edfa30f
RC
812static void update_inactive(struct msm_gem_object *msm_obj)
813{
814 struct msm_drm_private *priv = msm_obj->base.dev->dev_private;
815
64fcbde7
RC
816 GEM_WARN_ON(!msm_gem_is_locked(&msm_obj->base));
817
818 if (msm_obj->active_count != 0)
819 return;
820
3edfa30f 821 mutex_lock(&priv->mm_lock);
3edfa30f 822
cc8a4d5a 823 if (msm_obj->dontneed)
0054eeb7 824 mark_unpurgeable(msm_obj);
64fcbde7
RC
825 if (msm_obj->evictable)
826 mark_unevictable(msm_obj);
cc8a4d5a
RC
827
828 list_del(&msm_obj->mm_list);
64fcbde7 829 if ((msm_obj->madv == MSM_MADV_WILLNEED) && msm_obj->sgt) {
3edfa30f 830 list_add_tail(&msm_obj->mm_list, &priv->inactive_willneed);
64fcbde7 831 mark_evictable(msm_obj);
cc8a4d5a 832 } else if (msm_obj->madv == MSM_MADV_DONTNEED) {
3edfa30f 833 list_add_tail(&msm_obj->mm_list, &priv->inactive_dontneed);
0054eeb7 834 mark_purgeable(msm_obj);
cc8a4d5a 835 } else {
64fcbde7
RC
836 GEM_WARN_ON((msm_obj->madv != __MSM_MADV_PURGED) && msm_obj->sgt);
837 list_add_tail(&msm_obj->mm_list, &priv->inactive_unpinned);
cc8a4d5a 838 }
3edfa30f
RC
839
840 mutex_unlock(&priv->mm_lock);
841}
842
b6295f9a 843int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
7198e6b0 844{
b6295f9a 845 bool write = !!(op & MSM_PREP_WRITE);
f755e227
CW
846 unsigned long remain =
847 op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
848 long ret;
849
d3fae3b3 850 ret = dma_resv_wait_timeout(obj->resv, write, true, remain);
f755e227
CW
851 if (ret == 0)
852 return remain == 0 ? -EBUSY : -ETIMEDOUT;
853 else if (ret < 0)
854 return ret;
7198e6b0
RC
855
856 /* TODO cache maintenance */
c8afe684 857
b6295f9a 858 return 0;
7198e6b0 859}
c8afe684 860
7198e6b0
RC
861int msm_gem_cpu_fini(struct drm_gem_object *obj)
862{
863 /* TODO cache maintenance */
c8afe684
RC
864 return 0;
865}
866
867#ifdef CONFIG_DEBUG_FS
f54d1867 868static void describe_fence(struct dma_fence *fence, const char *type,
b6295f9a
RC
869 struct seq_file *m)
870{
f54d1867 871 if (!dma_fence_is_signaled(fence))
a3115621 872 seq_printf(m, "\t%9s: %s %s seq %llu\n", type,
b6295f9a
RC
873 fence->ops->get_driver_name(fence),
874 fence->ops->get_timeline_name(fence),
875 fence->seqno);
876}
877
528107c8
RC
878void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m,
879 struct msm_gem_stats *stats)
c8afe684 880{
c8afe684 881 struct msm_gem_object *msm_obj = to_msm_bo(obj);
52791eee
CK
882 struct dma_resv *robj = obj->resv;
883 struct dma_resv_list *fobj;
f54d1867 884 struct dma_fence *fence;
4b85f7f5 885 struct msm_gem_vma *vma;
c8afe684 886 uint64_t off = drm_vma_node_start(&obj->vma_node);
4cd33c48 887 const char *madv;
c8afe684 888
a6ae74c9 889 msm_gem_lock(obj);
b6295f9a 890
528107c8
RC
891 stats->all.count++;
892 stats->all.size += obj->size;
893
894 if (is_active(msm_obj)) {
895 stats->active.count++;
896 stats->active.size += obj->size;
897 }
898
f48f3563
RC
899 if (msm_obj->pages) {
900 stats->resident.count++;
901 stats->resident.size += obj->size;
902 }
903
4cd33c48
RC
904 switch (msm_obj->madv) {
905 case __MSM_MADV_PURGED:
528107c8
RC
906 stats->purged.count++;
907 stats->purged.size += obj->size;
4cd33c48
RC
908 madv = " purged";
909 break;
910 case MSM_MADV_DONTNEED:
0054eeb7
RC
911 stats->purgeable.count++;
912 stats->purgeable.size += obj->size;
4cd33c48
RC
913 madv = " purgeable";
914 break;
915 case MSM_MADV_WILLNEED:
916 default:
917 madv = "";
918 break;
919 }
920
575f0485 921 seq_printf(m, "%08x: %c %2d (%2d) %08llx %p",
7198e6b0 922 msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
2c935bc5 923 obj->name, kref_read(&obj->refcount),
667ce33e
RC
924 off, msm_obj->vaddr);
925
0815d774 926 seq_printf(m, " %08zu %9s %-32s\n", obj->size, madv, msm_obj->name);
667ce33e 927
575f0485
JC
928 if (!list_empty(&msm_obj->vmas)) {
929
7ad0e8cf 930 seq_puts(m, " vmas:");
575f0485 931
25faf2f2
RC
932 list_for_each_entry(vma, &msm_obj->vmas, list) {
933 const char *name, *comm;
934 if (vma->aspace) {
935 struct msm_gem_address_space *aspace = vma->aspace;
936 struct task_struct *task =
937 get_pid_task(aspace->pid, PIDTYPE_PID);
938 if (task) {
939 comm = kstrdup(task->comm, GFP_KERNEL);
29c7d952 940 put_task_struct(task);
25faf2f2
RC
941 } else {
942 comm = NULL;
943 }
944 name = aspace->name;
945 } else {
946 name = comm = NULL;
947 }
948 seq_printf(m, " [%s%s%s: aspace=%p, %08llx,%s,inuse=%d]",
949 name, comm ? ":" : "", comm ? comm : "",
950 vma->aspace, vma->iova,
951 vma->mapped ? "mapped" : "unmapped",
7ad0e8cf 952 vma->inuse);
25faf2f2
RC
953 kfree(comm);
954 }
575f0485
JC
955
956 seq_puts(m, "\n");
957 }
b6295f9a
RC
958
959 rcu_read_lock();
fb5ce730 960 fobj = dma_resv_shared_list(robj);
b6295f9a
RC
961 if (fobj) {
962 unsigned int i, shared_count = fobj->shared_count;
963
964 for (i = 0; i < shared_count; i++) {
965 fence = rcu_dereference(fobj->shared[i]);
966 describe_fence(fence, "Shared", m);
967 }
968 }
969
6edbd6ab 970 fence = dma_resv_excl_fence(robj);
b6295f9a
RC
971 if (fence)
972 describe_fence(fence, "Exclusive", m);
973 rcu_read_unlock();
0e08270a 974
a6ae74c9 975 msm_gem_unlock(obj);
c8afe684
RC
976}
977
978void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
979{
528107c8 980 struct msm_gem_stats stats = {};
c8afe684 981 struct msm_gem_object *msm_obj;
c8afe684 982
0815d774 983 seq_puts(m, " flags id ref offset kaddr size madv name\n");
6ed0897c 984 list_for_each_entry(msm_obj, list, node) {
c8afe684 985 struct drm_gem_object *obj = &msm_obj->base;
575f0485 986 seq_puts(m, " ");
528107c8 987 msm_gem_describe(obj, m, &stats);
c8afe684
RC
988 }
989
f1902c6b 990 seq_printf(m, "Total: %4d objects, %9zu bytes\n",
528107c8 991 stats.all.count, stats.all.size);
f1902c6b 992 seq_printf(m, "Active: %4d objects, %9zu bytes\n",
528107c8 993 stats.active.count, stats.active.size);
f48f3563
RC
994 seq_printf(m, "Resident: %4d objects, %9zu bytes\n",
995 stats.resident.count, stats.resident.size);
f1902c6b 996 seq_printf(m, "Purgeable: %4d objects, %9zu bytes\n",
0054eeb7 997 stats.purgeable.count, stats.purgeable.size);
f1902c6b 998 seq_printf(m, "Purged: %4d objects, %9zu bytes\n",
528107c8 999 stats.purged.count, stats.purged.size);
c8afe684
RC
1000}
1001#endif
1002
030af2b0 1003/* don't call directly! Use drm_gem_object_put() */
c8afe684
RC
1004void msm_gem_free_object(struct drm_gem_object *obj)
1005{
c8afe684 1006 struct msm_gem_object *msm_obj = to_msm_bo(obj);
48e7f183
KK
1007 struct drm_device *dev = obj->dev;
1008 struct msm_drm_private *priv = dev->dev_private;
1009
6ed0897c
RC
1010 mutex_lock(&priv->obj_lock);
1011 list_del(&msm_obj->node);
1012 mutex_unlock(&priv->obj_lock);
1013
d984457b 1014 mutex_lock(&priv->mm_lock);
cc8a4d5a 1015 if (msm_obj->dontneed)
0054eeb7 1016 mark_unpurgeable(msm_obj);
c8afe684 1017 list_del(&msm_obj->mm_list);
d984457b 1018 mutex_unlock(&priv->mm_lock);
c8afe684 1019
a6ae74c9 1020 msm_gem_lock(obj);
c8afe684 1021
7198e6b0 1022 /* object should not be on active list: */
90643a24 1023 GEM_WARN_ON(is_active(msm_obj));
7198e6b0 1024
20d0ae2f 1025 put_iova_spaces(obj, true);
c8afe684 1026
05b84911 1027 if (obj->import_attach) {
90643a24 1028 GEM_WARN_ON(msm_obj->vaddr);
05b84911
RC
1029
1030 /* Don't drop the pages for imported dmabuf, as they are not
1031 * ours, just free the array we allocated:
1032 */
dd5d08b5 1033 kvfree(msm_obj->pages);
c8afe684 1034
57f04815
RC
1035 put_iova_vmas(obj);
1036
6c0e3ea2
RC
1037 /* dma_buf_detach() grabs resv lock, so we need to unlock
1038 * prior to drm_prime_gem_destroy
1039 */
1040 msm_gem_unlock(obj);
1041
f28730c8 1042 drm_prime_gem_destroy(obj, msm_obj->sgt);
05b84911 1043 } else {
599089c6 1044 msm_gem_vunmap(obj);
05b84911 1045 put_pages(obj);
57f04815 1046 put_iova_vmas(obj);
6c0e3ea2 1047 msm_gem_unlock(obj);
05b84911 1048 }
c8afe684
RC
1049
1050 drm_gem_object_release(obj);
1051
1052 kfree(msm_obj);
1053}
1054
510410bf
TZ
1055static int msm_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
1056{
1057 struct msm_gem_object *msm_obj = to_msm_bo(obj);
1058
fc39ae15 1059 vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP;
510410bf
TZ
1060 vma->vm_page_prot = msm_gem_pgprot(msm_obj, vm_get_page_prot(vma->vm_flags));
1061
1062 return 0;
1063}
1064
c8afe684
RC
1065/* convenience method to construct a GEM buffer object, and userspace handle */
1066int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
0815d774
JC
1067 uint32_t size, uint32_t flags, uint32_t *handle,
1068 char *name)
c8afe684
RC
1069{
1070 struct drm_gem_object *obj;
1071 int ret;
1072
c8afe684
RC
1073 obj = msm_gem_new(dev, size, flags);
1074
c8afe684
RC
1075 if (IS_ERR(obj))
1076 return PTR_ERR(obj);
1077
0815d774
JC
1078 if (name)
1079 msm_gem_object_set_name(obj, "%s", name);
1080
c8afe684
RC
1081 ret = drm_gem_handle_create(file, obj, handle);
1082
1083 /* drop reference from allocate - handle holds it now */
f7d33950 1084 drm_gem_object_put(obj);
c8afe684
RC
1085
1086 return ret;
1087}
1088
3c9edd9c
TZ
1089static const struct vm_operations_struct vm_ops = {
1090 .fault = msm_gem_fault,
1091 .open = drm_gem_vm_open,
1092 .close = drm_gem_vm_close,
1093};
1094
1095static const struct drm_gem_object_funcs msm_gem_object_funcs = {
1096 .free = msm_gem_free_object,
1097 .pin = msm_gem_prime_pin,
1098 .unpin = msm_gem_prime_unpin,
1099 .get_sg_table = msm_gem_prime_get_sg_table,
1100 .vmap = msm_gem_prime_vmap,
1101 .vunmap = msm_gem_prime_vunmap,
510410bf 1102 .mmap = msm_gem_object_mmap,
3c9edd9c
TZ
1103 .vm_ops = &vm_ops,
1104};
1105
05b84911
RC
1106static int msm_gem_new_impl(struct drm_device *dev,
1107 uint32_t size, uint32_t flags,
3cbdc8d8 1108 struct drm_gem_object **obj)
c8afe684 1109{
d12e3390 1110 struct msm_drm_private *priv = dev->dev_private;
c8afe684 1111 struct msm_gem_object *msm_obj;
c8afe684
RC
1112
1113 switch (flags & MSM_BO_CACHE_MASK) {
1114 case MSM_BO_UNCACHED:
1115 case MSM_BO_CACHED:
1116 case MSM_BO_WC:
1117 break;
d12e3390
JM
1118 case MSM_BO_CACHED_COHERENT:
1119 if (priv->has_cached_coherent)
1120 break;
e181ad43 1121 fallthrough;
c8afe684 1122 default:
6a41da17 1123 DRM_DEV_ERROR(dev->dev, "invalid cache flag: %x\n",
c8afe684 1124 (flags & MSM_BO_CACHE_MASK));
05b84911 1125 return -EINVAL;
c8afe684
RC
1126 }
1127
667ce33e 1128 msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
05b84911
RC
1129 if (!msm_obj)
1130 return -ENOMEM;
c8afe684
RC
1131
1132 msm_obj->flags = flags;
4cd33c48 1133 msm_obj->madv = MSM_MADV_WILLNEED;
c8afe684 1134
388dc431 1135 INIT_LIST_HEAD(&msm_obj->node);
4b85f7f5
RC
1136 INIT_LIST_HEAD(&msm_obj->vmas);
1137
05b84911 1138 *obj = &msm_obj->base;
3c9edd9c 1139 (*obj)->funcs = &msm_gem_object_funcs;
05b84911
RC
1140
1141 return 0;
1142}
1143
030af2b0 1144struct drm_gem_object *msm_gem_new(struct drm_device *dev, uint32_t size, uint32_t flags)
05b84911 1145{
f4839bd5 1146 struct msm_drm_private *priv = dev->dev_private;
3cbdc8d8 1147 struct msm_gem_object *msm_obj;
871d812a 1148 struct drm_gem_object *obj = NULL;
f4839bd5 1149 bool use_vram = false;
05b84911
RC
1150 int ret;
1151
05b84911
RC
1152 size = PAGE_ALIGN(size);
1153
c2052a4e 1154 if (!msm_use_mmu(dev))
f4839bd5 1155 use_vram = true;
86f46f25 1156 else if ((flags & (MSM_BO_STOLEN | MSM_BO_SCANOUT)) && priv->vram.size)
f4839bd5
RC
1157 use_vram = true;
1158
90643a24 1159 if (GEM_WARN_ON(use_vram && !priv->vram.size))
f4839bd5
RC
1160 return ERR_PTR(-EINVAL);
1161
1a5dff5d
JC
1162 /* Disallow zero sized objects as they make the underlying
1163 * infrastructure grumpy
1164 */
1165 if (size == 0)
1166 return ERR_PTR(-EINVAL);
1167
3cbdc8d8 1168 ret = msm_gem_new_impl(dev, size, flags, &obj);
05b84911 1169 if (ret)
5200e228 1170 return ERR_PTR(ret);
05b84911 1171
3cbdc8d8
AO
1172 msm_obj = to_msm_bo(obj);
1173
f4839bd5 1174 if (use_vram) {
4b85f7f5 1175 struct msm_gem_vma *vma;
f4839bd5 1176 struct page **pages;
b3949a9a 1177
a694ffed
IC
1178 drm_gem_private_object_init(dev, obj, size);
1179
a6ae74c9 1180 msm_gem_lock(obj);
f4839bd5 1181
4b85f7f5 1182 vma = add_vma(obj, NULL);
a6ae74c9 1183 msm_gem_unlock(obj);
4b85f7f5
RC
1184 if (IS_ERR(vma)) {
1185 ret = PTR_ERR(vma);
1186 goto fail;
1187 }
1188
1189 to_msm_bo(obj)->vram_node = &vma->node;
1190
45f56690
AM
1191 /* Call chain get_pages() -> update_inactive() tries to
1192 * access msm_obj->mm_list, but it is not initialized yet.
1193 * To avoid NULL pointer dereference error, initialize
1194 * mm_list to be empty.
1195 */
1196 INIT_LIST_HEAD(&msm_obj->mm_list);
1197
07fcad0d 1198 msm_gem_lock(obj);
f4839bd5 1199 pages = get_pages(obj);
07fcad0d 1200 msm_gem_unlock(obj);
f4839bd5
RC
1201 if (IS_ERR(pages)) {
1202 ret = PTR_ERR(pages);
1203 goto fail;
1204 }
4b85f7f5
RC
1205
1206 vma->iova = physaddr(obj);
f4839bd5 1207 } else {
871d812a
RC
1208 ret = drm_gem_object_init(dev, obj, size);
1209 if (ret)
1210 goto fail;
0abdba47
LS
1211 /*
1212 * Our buffers are kept pinned, so allocating them from the
1213 * MOVABLE zone is a really bad idea, and conflicts with CMA.
1214 * See comments above new_inode() why this is required _and_
1215 * expected if you're going to pin these pages.
1216 */
1217 mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER);
871d812a 1218 }
05b84911 1219
d984457b 1220 mutex_lock(&priv->mm_lock);
64fcbde7 1221 list_add_tail(&msm_obj->mm_list, &priv->inactive_unpinned);
d984457b 1222 mutex_unlock(&priv->mm_lock);
3cbdc8d8 1223
6ed0897c
RC
1224 mutex_lock(&priv->obj_lock);
1225 list_add_tail(&msm_obj->node, &priv->objects);
1226 mutex_unlock(&priv->obj_lock);
1227
05b84911
RC
1228 return obj;
1229
1230fail:
030af2b0 1231 drm_gem_object_put(obj);
05b84911
RC
1232 return ERR_PTR(ret);
1233}
1234
1235struct drm_gem_object *msm_gem_import(struct drm_device *dev,
79f0e202 1236 struct dma_buf *dmabuf, struct sg_table *sgt)
05b84911 1237{
3cbdc8d8 1238 struct msm_drm_private *priv = dev->dev_private;
05b84911
RC
1239 struct msm_gem_object *msm_obj;
1240 struct drm_gem_object *obj;
79f0e202 1241 uint32_t size;
05b84911
RC
1242 int ret, npages;
1243
871d812a 1244 /* if we don't have IOMMU, don't bother pretending we can import: */
c2052a4e 1245 if (!msm_use_mmu(dev)) {
6a41da17 1246 DRM_DEV_ERROR(dev->dev, "cannot import without IOMMU\n");
871d812a
RC
1247 return ERR_PTR(-EINVAL);
1248 }
1249
79f0e202 1250 size = PAGE_ALIGN(dmabuf->size);
05b84911 1251
3cbdc8d8 1252 ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj);
05b84911 1253 if (ret)
5200e228 1254 return ERR_PTR(ret);
05b84911
RC
1255
1256 drm_gem_private_object_init(dev, obj, size);
1257
1258 npages = size / PAGE_SIZE;
1259
1260 msm_obj = to_msm_bo(obj);
a6ae74c9 1261 msm_gem_lock(obj);
05b84911 1262 msm_obj->sgt = sgt;
2098105e 1263 msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
05b84911 1264 if (!msm_obj->pages) {
a6ae74c9 1265 msm_gem_unlock(obj);
05b84911
RC
1266 ret = -ENOMEM;
1267 goto fail;
1268 }
1269
c67e6279 1270 ret = drm_prime_sg_to_page_array(sgt, msm_obj->pages, npages);
0e08270a 1271 if (ret) {
a6ae74c9 1272 msm_gem_unlock(obj);
05b84911 1273 goto fail;
0e08270a 1274 }
05b84911 1275
a6ae74c9 1276 msm_gem_unlock(obj);
3cbdc8d8 1277
d984457b 1278 mutex_lock(&priv->mm_lock);
64fcbde7 1279 list_add_tail(&msm_obj->mm_list, &priv->inactive_unpinned);
d984457b 1280 mutex_unlock(&priv->mm_lock);
3cbdc8d8 1281
6ed0897c
RC
1282 mutex_lock(&priv->obj_lock);
1283 list_add_tail(&msm_obj->node, &priv->objects);
1284 mutex_unlock(&priv->obj_lock);
1285
c8afe684
RC
1286 return obj;
1287
1288fail:
f7d33950 1289 drm_gem_object_put(obj);
c8afe684
RC
1290 return ERR_PTR(ret);
1291}
8223286d 1292
030af2b0 1293void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
8223286d 1294 uint32_t flags, struct msm_gem_address_space *aspace,
030af2b0 1295 struct drm_gem_object **bo, uint64_t *iova)
8223286d
JC
1296{
1297 void *vaddr;
030af2b0 1298 struct drm_gem_object *obj = msm_gem_new(dev, size, flags);
8223286d
JC
1299 int ret;
1300
1301 if (IS_ERR(obj))
1302 return ERR_CAST(obj);
1303
1304 if (iova) {
9fe041f6 1305 ret = msm_gem_get_and_pin_iova(obj, aspace, iova);
93f7abf1
JC
1306 if (ret)
1307 goto err;
8223286d
JC
1308 }
1309
1310 vaddr = msm_gem_get_vaddr(obj);
c9811d0f 1311 if (IS_ERR(vaddr)) {
7ad0e8cf 1312 msm_gem_unpin_iova(obj, aspace);
93f7abf1
JC
1313 ret = PTR_ERR(vaddr);
1314 goto err;
8223286d
JC
1315 }
1316
1317 if (bo)
1318 *bo = obj;
1319
1320 return vaddr;
93f7abf1 1321err:
030af2b0 1322 drm_gem_object_put(obj);
93f7abf1
JC
1323
1324 return ERR_PTR(ret);
1325
8223286d
JC
1326}
1327
1e29dff0 1328void msm_gem_kernel_put(struct drm_gem_object *bo,
030af2b0 1329 struct msm_gem_address_space *aspace)
1e29dff0
JC
1330{
1331 if (IS_ERR_OR_NULL(bo))
1332 return;
1333
1334 msm_gem_put_vaddr(bo);
7ad0e8cf 1335 msm_gem_unpin_iova(bo, aspace);
030af2b0 1336 drm_gem_object_put(bo);
1e29dff0 1337}
0815d774
JC
1338
1339void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...)
1340{
1341 struct msm_gem_object *msm_obj = to_msm_bo(bo);
1342 va_list ap;
1343
1344 if (!fmt)
1345 return;
1346
1347 va_start(ap, fmt);
1348 vsnprintf(msm_obj->name, sizeof(msm_obj->name), fmt, ap);
1349 va_end(ap);
1350}