]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/gpu/drm/etnaviv/etnaviv_gem.c
Merge tag 's390-5.15-2' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux
[mirror_ubuntu-jammy-kernel.git] / drivers / gpu / drm / etnaviv / etnaviv_gem.c
CommitLineData
f6ffbd4f 1// SPDX-License-Identifier: GPL-2.0
a8c21a54 2/*
f6ffbd4f 3 * Copyright (C) 2015-2018 Etnaviv Project
a8c21a54
T
4 */
5
6eae41fe
SR
6#include <drm/drm_prime.h>
7#include <linux/dma-mapping.h>
a8c21a54 8#include <linux/shmem_fs.h>
6eae41fe
SR
9#include <linux/spinlock.h>
10#include <linux/vmalloc.h>
a8c21a54
T
11
12#include "etnaviv_drv.h"
13#include "etnaviv_gem.h"
14#include "etnaviv_gpu.h"
15#include "etnaviv_mmu.h"
16
d6a8743d
LS
17static struct lock_class_key etnaviv_shm_lock_class;
18static struct lock_class_key etnaviv_userptr_lock_class;
19
a8c21a54
T
20static void etnaviv_gem_scatter_map(struct etnaviv_gem_object *etnaviv_obj)
21{
22 struct drm_device *dev = etnaviv_obj->base.dev;
23 struct sg_table *sgt = etnaviv_obj->sgt;
24
25 /*
26 * For non-cached buffers, ensure the new pages are clean
27 * because display controller, GPU, etc. are not coherent.
28 */
29 if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
182354a5 30 dma_map_sgtable(dev->dev, sgt, DMA_BIDIRECTIONAL, 0);
a8c21a54
T
31}
32
33static void etnaviv_gem_scatterlist_unmap(struct etnaviv_gem_object *etnaviv_obj)
34{
35 struct drm_device *dev = etnaviv_obj->base.dev;
36 struct sg_table *sgt = etnaviv_obj->sgt;
37
38 /*
39 * For non-cached buffers, ensure the new pages are clean
40 * because display controller, GPU, etc. are not coherent:
41 *
42 * WARNING: The DMA API does not support concurrent CPU
43 * and device access to the memory area. With BIDIRECTIONAL,
44 * we will clean the cache lines which overlap the region,
45 * and invalidate all cache lines (partially) contained in
46 * the region.
47 *
48 * If you have dirty data in the overlapping cache lines,
49 * that will corrupt the GPU-written data. If you have
50 * written into the remainder of the region, this can
51 * discard those writes.
52 */
53 if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
182354a5 54 dma_unmap_sgtable(dev->dev, sgt, DMA_BIDIRECTIONAL, 0);
a8c21a54
T
55}
56
57/* called with etnaviv_obj->lock held */
58static int etnaviv_gem_shmem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
59{
60 struct drm_device *dev = etnaviv_obj->base.dev;
61 struct page **p = drm_gem_get_pages(&etnaviv_obj->base);
62
63 if (IS_ERR(p)) {
f91ac470 64 dev_dbg(dev->dev, "could not get pages: %ld\n", PTR_ERR(p));
a8c21a54
T
65 return PTR_ERR(p);
66 }
67
68 etnaviv_obj->pages = p;
69
70 return 0;
71}
72
73static void put_pages(struct etnaviv_gem_object *etnaviv_obj)
74{
75 if (etnaviv_obj->sgt) {
76 etnaviv_gem_scatterlist_unmap(etnaviv_obj);
77 sg_free_table(etnaviv_obj->sgt);
78 kfree(etnaviv_obj->sgt);
79 etnaviv_obj->sgt = NULL;
80 }
81 if (etnaviv_obj->pages) {
82 drm_gem_put_pages(&etnaviv_obj->base, etnaviv_obj->pages,
83 true, false);
84
85 etnaviv_obj->pages = NULL;
86 }
87}
88
89struct page **etnaviv_gem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
90{
91 int ret;
92
93 lockdep_assert_held(&etnaviv_obj->lock);
94
95 if (!etnaviv_obj->pages) {
96 ret = etnaviv_obj->ops->get_pages(etnaviv_obj);
97 if (ret < 0)
98 return ERR_PTR(ret);
99 }
100
101 if (!etnaviv_obj->sgt) {
102 struct drm_device *dev = etnaviv_obj->base.dev;
103 int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
104 struct sg_table *sgt;
105
707d561f
GH
106 sgt = drm_prime_pages_to_sg(etnaviv_obj->base.dev,
107 etnaviv_obj->pages, npages);
a8c21a54
T
108 if (IS_ERR(sgt)) {
109 dev_err(dev->dev, "failed to allocate sgt: %ld\n",
110 PTR_ERR(sgt));
111 return ERR_CAST(sgt);
112 }
113
114 etnaviv_obj->sgt = sgt;
115
116 etnaviv_gem_scatter_map(etnaviv_obj);
117 }
118
119 return etnaviv_obj->pages;
120}
121
122void etnaviv_gem_put_pages(struct etnaviv_gem_object *etnaviv_obj)
123{
124 lockdep_assert_held(&etnaviv_obj->lock);
125 /* when we start tracking the pin count, then do something here */
126}
127
0e7f26e6 128static int etnaviv_gem_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
a8c21a54
T
129 struct vm_area_struct *vma)
130{
a8c21a54
T
131 pgprot_t vm_page_prot;
132
81fd23e2 133 vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP;
a8c21a54
T
134
135 vm_page_prot = vm_get_page_prot(vma->vm_flags);
136
137 if (etnaviv_obj->flags & ETNA_BO_WC) {
138 vma->vm_page_prot = pgprot_writecombine(vm_page_prot);
139 } else if (etnaviv_obj->flags & ETNA_BO_UNCACHED) {
140 vma->vm_page_prot = pgprot_noncached(vm_page_prot);
141 } else {
142 /*
143 * Shunt off cached objs to shmem file so they have their own
144 * address_space (so unmap_mapping_range does what we want,
145 * in particular in the case of mmap'd dmabufs)
146 */
a8c21a54 147 vma->vm_pgoff = 0;
295992fb 148 vma_set_file(vma, etnaviv_obj->base.filp);
a8c21a54
T
149
150 vma->vm_page_prot = vm_page_prot;
151 }
152
153 return 0;
154}
155
81fd23e2 156static int etnaviv_gem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
a8c21a54 157{
81fd23e2 158 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
a8c21a54 159
81fd23e2 160 return etnaviv_obj->ops->mmap(etnaviv_obj, vma);
a8c21a54
T
161}
162
a7730627 163static vm_fault_t etnaviv_gem_fault(struct vm_fault *vmf)
a8c21a54 164{
11bac800 165 struct vm_area_struct *vma = vmf->vma;
a8c21a54
T
166 struct drm_gem_object *obj = vma->vm_private_data;
167 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
168 struct page **pages, *page;
169 pgoff_t pgoff;
cfad05a2 170 int err;
a8c21a54
T
171
172 /*
173 * Make sure we don't parallel update on a fault, nor move or remove
cfad05a2 174 * something from beneath our feet. Note that vmf_insert_page() is
a8c21a54
T
175 * specifically coded to take care of this, so we don't have to.
176 */
cfad05a2
SJ
177 err = mutex_lock_interruptible(&etnaviv_obj->lock);
178 if (err)
179 return VM_FAULT_NOPAGE;
a8c21a54
T
180 /* make sure we have pages attached now */
181 pages = etnaviv_gem_get_pages(etnaviv_obj);
182 mutex_unlock(&etnaviv_obj->lock);
183
184 if (IS_ERR(pages)) {
cfad05a2
SJ
185 err = PTR_ERR(pages);
186 return vmf_error(err);
a8c21a54
T
187 }
188
189 /* We don't use vmf->pgoff since that has the fake offset: */
1a29d85e 190 pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
a8c21a54
T
191
192 page = pages[pgoff];
193
1a29d85e 194 VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
a8c21a54
T
195 page_to_pfn(page), page_to_pfn(page) << PAGE_SHIFT);
196
cfad05a2 197 return vmf_insert_page(vma, vmf->address, page);
a8c21a54
T
198}
199
200int etnaviv_gem_mmap_offset(struct drm_gem_object *obj, u64 *offset)
201{
202 int ret;
203
204 /* Make it mmapable */
205 ret = drm_gem_create_mmap_offset(obj);
206 if (ret)
207 dev_err(obj->dev->dev, "could not allocate mmap offset\n");
208 else
209 *offset = drm_vma_node_offset_addr(&obj->vma_node);
210
211 return ret;
212}
213
214static struct etnaviv_vram_mapping *
215etnaviv_gem_get_vram_mapping(struct etnaviv_gem_object *obj,
27b67278 216 struct etnaviv_iommu_context *context)
a8c21a54
T
217{
218 struct etnaviv_vram_mapping *mapping;
219
220 list_for_each_entry(mapping, &obj->vram_list, obj_node) {
27b67278 221 if (mapping->context == context)
a8c21a54
T
222 return mapping;
223 }
224
225 return NULL;
226}
227
b6325f40
RK
228void etnaviv_gem_mapping_unreference(struct etnaviv_vram_mapping *mapping)
229{
230 struct etnaviv_gem_object *etnaviv_obj = mapping->object;
231
232 mutex_lock(&etnaviv_obj->lock);
233 WARN_ON(mapping->use == 0);
234 mapping->use -= 1;
235 mutex_unlock(&etnaviv_obj->lock);
236
6780bf32 237 drm_gem_object_put(&etnaviv_obj->base);
b6325f40
RK
238}
239
240struct etnaviv_vram_mapping *etnaviv_gem_mapping_get(
088880dd
LS
241 struct drm_gem_object *obj, struct etnaviv_iommu_context *mmu_context,
242 u64 va)
a8c21a54
T
243{
244 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
245 struct etnaviv_vram_mapping *mapping;
246 struct page **pages;
247 int ret = 0;
248
249 mutex_lock(&etnaviv_obj->lock);
e6364d70 250 mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, mmu_context);
a8c21a54
T
251 if (mapping) {
252 /*
253 * Holding the object lock prevents the use count changing
254 * beneath us. If the use count is zero, the MMU might be
255 * reaping this object, so take the lock and re-check that
256 * the MMU owns this mapping to close this race.
257 */
258 if (mapping->use == 0) {
e6364d70
LS
259 mutex_lock(&mmu_context->lock);
260 if (mapping->context == mmu_context)
a8c21a54
T
261 mapping->use += 1;
262 else
263 mapping = NULL;
e6364d70 264 mutex_unlock(&mmu_context->lock);
a8c21a54
T
265 if (mapping)
266 goto out;
267 } else {
268 mapping->use += 1;
269 goto out;
270 }
271 }
272
273 pages = etnaviv_gem_get_pages(etnaviv_obj);
274 if (IS_ERR(pages)) {
275 ret = PTR_ERR(pages);
276 goto out;
277 }
278
279 /*
280 * See if we have a reaped vram mapping we can re-use before
281 * allocating a fresh mapping.
282 */
283 mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, NULL);
284 if (!mapping) {
285 mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
ed94add0
DC
286 if (!mapping) {
287 ret = -ENOMEM;
288 goto out;
289 }
a8c21a54
T
290
291 INIT_LIST_HEAD(&mapping->scan_node);
292 mapping->object = etnaviv_obj;
293 } else {
294 list_del(&mapping->obj_node);
295 }
296
e6364d70
LS
297 etnaviv_iommu_context_get(mmu_context);
298 mapping->context = mmu_context;
a8c21a54
T
299 mapping->use = 1;
300
17e4660a 301 ret = etnaviv_iommu_map_gem(mmu_context, etnaviv_obj,
17eae23b 302 mmu_context->global->memory_base,
088880dd 303 mapping, va);
e6364d70
LS
304 if (ret < 0) {
305 etnaviv_iommu_context_put(mmu_context);
a8c21a54 306 kfree(mapping);
e6364d70 307 } else {
a8c21a54 308 list_add_tail(&mapping->obj_node, &etnaviv_obj->vram_list);
e6364d70 309 }
a8c21a54
T
310
311out:
312 mutex_unlock(&etnaviv_obj->lock);
313
b6325f40
RK
314 if (ret)
315 return ERR_PTR(ret);
a8c21a54 316
b6325f40 317 /* Take a reference on the object */
23d1dd03 318 drm_gem_object_get(obj);
b6325f40 319 return mapping;
a8c21a54
T
320}
321
ce3088fd 322void *etnaviv_gem_vmap(struct drm_gem_object *obj)
a8c21a54
T
323{
324 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
325
a0a5ab3e
LS
326 if (etnaviv_obj->vaddr)
327 return etnaviv_obj->vaddr;
a8c21a54 328
a0a5ab3e
LS
329 mutex_lock(&etnaviv_obj->lock);
330 /*
331 * Need to check again, as we might have raced with another thread
332 * while waiting for the mutex.
333 */
334 if (!etnaviv_obj->vaddr)
335 etnaviv_obj->vaddr = etnaviv_obj->ops->vmap(etnaviv_obj);
a8c21a54
T
336 mutex_unlock(&etnaviv_obj->lock);
337
338 return etnaviv_obj->vaddr;
339}
340
a0a5ab3e
LS
341static void *etnaviv_gem_vmap_impl(struct etnaviv_gem_object *obj)
342{
343 struct page **pages;
344
345 lockdep_assert_held(&obj->lock);
346
347 pages = etnaviv_gem_get_pages(obj);
348 if (IS_ERR(pages))
349 return NULL;
350
351 return vmap(pages, obj->base.size >> PAGE_SHIFT,
352 VM_MAP, pgprot_writecombine(PAGE_KERNEL));
353}
354
a8c21a54
T
355static inline enum dma_data_direction etnaviv_op_to_dma_dir(u32 op)
356{
357 if (op & ETNA_PREP_READ)
358 return DMA_FROM_DEVICE;
359 else if (op & ETNA_PREP_WRITE)
360 return DMA_TO_DEVICE;
361 else
362 return DMA_BIDIRECTIONAL;
363}
364
365int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
38c4a4cf 366 struct drm_etnaviv_timespec *timeout)
a8c21a54
T
367{
368 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
369 struct drm_device *dev = obj->dev;
370 bool write = !!(op & ETNA_PREP_WRITE);
46a269da
LS
371 int ret;
372
8cc47b3e
LS
373 if (!etnaviv_obj->sgt) {
374 void *ret;
375
376 mutex_lock(&etnaviv_obj->lock);
377 ret = etnaviv_gem_get_pages(etnaviv_obj);
378 mutex_unlock(&etnaviv_obj->lock);
379 if (IS_ERR(ret))
380 return PTR_ERR(ret);
381 }
382
46a269da 383 if (op & ETNA_PREP_NOSYNC) {
d3fae3b3 384 if (!dma_resv_test_signaled(obj->resv, write))
46a269da
LS
385 return -EBUSY;
386 } else {
387 unsigned long remain = etnaviv_timeout_to_jiffies(timeout);
388
d3fae3b3 389 ret = dma_resv_wait_timeout(obj->resv, write, true, remain);
46a269da
LS
390 if (ret <= 0)
391 return ret == 0 ? -ETIMEDOUT : ret;
392 }
a8c21a54
T
393
394 if (etnaviv_obj->flags & ETNA_BO_CACHED) {
182354a5
MS
395 dma_sync_sgtable_for_cpu(dev->dev, etnaviv_obj->sgt,
396 etnaviv_op_to_dma_dir(op));
a8c21a54
T
397 etnaviv_obj->last_cpu_prep_op = op;
398 }
399
400 return 0;
401}
402
403int etnaviv_gem_cpu_fini(struct drm_gem_object *obj)
404{
405 struct drm_device *dev = obj->dev;
406 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
407
408 if (etnaviv_obj->flags & ETNA_BO_CACHED) {
409 /* fini without a prep is almost certainly a userspace error */
410 WARN_ON(etnaviv_obj->last_cpu_prep_op == 0);
182354a5 411 dma_sync_sgtable_for_device(dev->dev, etnaviv_obj->sgt,
a8c21a54
T
412 etnaviv_op_to_dma_dir(etnaviv_obj->last_cpu_prep_op));
413 etnaviv_obj->last_cpu_prep_op = 0;
414 }
415
416 return 0;
417}
418
419int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj,
38c4a4cf 420 struct drm_etnaviv_timespec *timeout)
a8c21a54
T
421{
422 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
423
424 return etnaviv_gpu_wait_obj_inactive(gpu, etnaviv_obj, timeout);
425}
426
427#ifdef CONFIG_DEBUG_FS
f54d1867 428static void etnaviv_gem_describe_fence(struct dma_fence *fence,
a8c21a54
T
429 const char *type, struct seq_file *m)
430{
f54d1867 431 if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
3415701a 432 seq_printf(m, "\t%9s: %s %s seq %llu\n",
a8c21a54
T
433 type,
434 fence->ops->get_driver_name(fence),
435 fence->ops->get_timeline_name(fence),
436 fence->seqno);
437}
438
439static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
440{
441 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
52791eee
CK
442 struct dma_resv *robj = obj->resv;
443 struct dma_resv_list *fobj;
f54d1867 444 struct dma_fence *fence;
a8c21a54
T
445 unsigned long off = drm_vma_node_start(&obj->vma_node);
446
447 seq_printf(m, "%08x: %c %2d (%2d) %08lx %p %zd\n",
448 etnaviv_obj->flags, is_active(etnaviv_obj) ? 'A' : 'I',
2c935bc5 449 obj->name, kref_read(&obj->refcount),
a8c21a54
T
450 off, etnaviv_obj->vaddr, obj->size);
451
452 rcu_read_lock();
fb5ce730 453 fobj = dma_resv_shared_list(robj);
a8c21a54
T
454 if (fobj) {
455 unsigned int i, shared_count = fobj->shared_count;
456
457 for (i = 0; i < shared_count; i++) {
458 fence = rcu_dereference(fobj->shared[i]);
459 etnaviv_gem_describe_fence(fence, "Shared", m);
460 }
461 }
462
6edbd6ab 463 fence = dma_resv_excl_fence(robj);
a8c21a54
T
464 if (fence)
465 etnaviv_gem_describe_fence(fence, "Exclusive", m);
466 rcu_read_unlock();
467}
468
469void etnaviv_gem_describe_objects(struct etnaviv_drm_private *priv,
470 struct seq_file *m)
471{
472 struct etnaviv_gem_object *etnaviv_obj;
473 int count = 0;
474 size_t size = 0;
475
476 mutex_lock(&priv->gem_lock);
477 list_for_each_entry(etnaviv_obj, &priv->gem_list, gem_node) {
478 struct drm_gem_object *obj = &etnaviv_obj->base;
479
480 seq_puts(m, " ");
481 etnaviv_gem_describe(obj, m);
482 count++;
483 size += obj->size;
484 }
485 mutex_unlock(&priv->gem_lock);
486
487 seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
488}
489#endif
490
491static void etnaviv_gem_shmem_release(struct etnaviv_gem_object *etnaviv_obj)
492{
8c6e6188 493 vunmap(etnaviv_obj->vaddr);
a8c21a54
T
494 put_pages(etnaviv_obj);
495}
496
497static const struct etnaviv_gem_ops etnaviv_gem_shmem_ops = {
498 .get_pages = etnaviv_gem_shmem_get_pages,
499 .release = etnaviv_gem_shmem_release,
a0a5ab3e 500 .vmap = etnaviv_gem_vmap_impl,
a10e2bde 501 .mmap = etnaviv_gem_mmap_obj,
a8c21a54
T
502};
503
504void etnaviv_gem_free_object(struct drm_gem_object *obj)
505{
506 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
51841752 507 struct etnaviv_drm_private *priv = obj->dev->dev_private;
a8c21a54
T
508 struct etnaviv_vram_mapping *mapping, *tmp;
509
510 /* object should not be active */
511 WARN_ON(is_active(etnaviv_obj));
512
51841752 513 mutex_lock(&priv->gem_lock);
a8c21a54 514 list_del(&etnaviv_obj->gem_node);
51841752 515 mutex_unlock(&priv->gem_lock);
a8c21a54
T
516
517 list_for_each_entry_safe(mapping, tmp, &etnaviv_obj->vram_list,
518 obj_node) {
27b67278 519 struct etnaviv_iommu_context *context = mapping->context;
a8c21a54
T
520
521 WARN_ON(mapping->use);
522
e6364d70 523 if (context) {
27b67278 524 etnaviv_iommu_unmap_gem(context, mapping);
e6364d70
LS
525 etnaviv_iommu_context_put(context);
526 }
a8c21a54
T
527
528 list_del(&mapping->obj_node);
529 kfree(mapping);
530 }
531
532 drm_gem_free_mmap_offset(obj);
533 etnaviv_obj->ops->release(etnaviv_obj);
a8c21a54
T
534 drm_gem_object_release(obj);
535
536 kfree(etnaviv_obj);
537}
538
54f09288 539void etnaviv_gem_obj_add(struct drm_device *dev, struct drm_gem_object *obj)
a8c21a54
T
540{
541 struct etnaviv_drm_private *priv = dev->dev_private;
542 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
543
544 mutex_lock(&priv->gem_lock);
545 list_add_tail(&etnaviv_obj->gem_node, &priv->gem_list);
546 mutex_unlock(&priv->gem_lock);
a8c21a54
T
547}
548
a7730627
TZ
549static const struct vm_operations_struct vm_ops = {
550 .fault = etnaviv_gem_fault,
551 .open = drm_gem_vm_open,
552 .close = drm_gem_vm_close,
553};
554
555static const struct drm_gem_object_funcs etnaviv_gem_object_funcs = {
556 .free = etnaviv_gem_free_object,
557 .pin = etnaviv_gem_prime_pin,
558 .unpin = etnaviv_gem_prime_unpin,
559 .get_sg_table = etnaviv_gem_prime_get_sg_table,
560 .vmap = etnaviv_gem_prime_vmap,
81fd23e2 561 .mmap = etnaviv_gem_mmap,
a7730627
TZ
562 .vm_ops = &vm_ops,
563};
564
a8c21a54 565static int etnaviv_gem_new_impl(struct drm_device *dev, u32 size, u32 flags,
c6be8086 566 const struct etnaviv_gem_ops *ops, struct drm_gem_object **obj)
a8c21a54
T
567{
568 struct etnaviv_gem_object *etnaviv_obj;
569 unsigned sz = sizeof(*etnaviv_obj);
570 bool valid = true;
571
572 /* validate flags */
573 switch (flags & ETNA_BO_CACHE_MASK) {
574 case ETNA_BO_UNCACHED:
575 case ETNA_BO_CACHED:
576 case ETNA_BO_WC:
577 break;
578 default:
579 valid = false;
580 }
581
582 if (!valid) {
583 dev_err(dev->dev, "invalid cache flag: %x\n",
584 (flags & ETNA_BO_CACHE_MASK));
585 return -EINVAL;
586 }
587
588 etnaviv_obj = kzalloc(sz, GFP_KERNEL);
589 if (!etnaviv_obj)
590 return -ENOMEM;
591
592 etnaviv_obj->flags = flags;
593 etnaviv_obj->ops = ops;
a8c21a54
T
594
595 mutex_init(&etnaviv_obj->lock);
596 INIT_LIST_HEAD(&etnaviv_obj->vram_list);
597
598 *obj = &etnaviv_obj->base;
a7730627 599 (*obj)->funcs = &etnaviv_gem_object_funcs;
a8c21a54
T
600
601 return 0;
602}
603
cdd32563
LS
604/* convenience method to construct a GEM buffer object, and userspace handle */
605int etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file,
606 u32 size, u32 flags, u32 *handle)
a8c21a54 607{
b72af445 608 struct etnaviv_drm_private *priv = dev->dev_private;
a8c21a54
T
609 struct drm_gem_object *obj = NULL;
610 int ret;
611
612 size = PAGE_ALIGN(size);
613
c6be8086 614 ret = etnaviv_gem_new_impl(dev, size, flags,
a8c21a54
T
615 &etnaviv_gem_shmem_ops, &obj);
616 if (ret)
617 goto fail;
618
d6a8743d
LS
619 lockdep_set_class(&to_etnaviv_bo(obj)->lock, &etnaviv_shm_lock_class);
620
a8c21a54 621 ret = drm_gem_object_init(dev, obj, size);
a8c21a54
T
622 if (ret)
623 goto fail;
624
fd2450a7
LS
625 /*
626 * Our buffers are kept pinned, so allocating them from the MOVABLE
627 * zone is a really bad idea, and conflicts with CMA. See comments
628 * above new_inode() why this is required _and_ expected if you're
629 * going to pin these pages.
630 */
b72af445 631 mapping_set_gfp_mask(obj->filp->f_mapping, priv->shm_gfp_mask);
fd2450a7 632
54f09288 633 etnaviv_gem_obj_add(dev, obj);
a8c21a54
T
634
635 ret = drm_gem_handle_create(file, obj, handle);
636
637 /* drop reference from allocate - handle holds it now */
cdd32563 638fail:
6780bf32 639 drm_gem_object_put(obj);
a8c21a54
T
640
641 return ret;
642}
643
a8c21a54 644int etnaviv_gem_new_private(struct drm_device *dev, size_t size, u32 flags,
c6be8086 645 const struct etnaviv_gem_ops *ops, struct etnaviv_gem_object **res)
a8c21a54
T
646{
647 struct drm_gem_object *obj;
648 int ret;
649
c6be8086 650 ret = etnaviv_gem_new_impl(dev, size, flags, ops, &obj);
a8c21a54
T
651 if (ret)
652 return ret;
653
654 drm_gem_private_object_init(dev, obj, size);
655
656 *res = to_etnaviv_bo(obj);
657
658 return 0;
659}
660
a8c21a54
T
661static int etnaviv_gem_userptr_get_pages(struct etnaviv_gem_object *etnaviv_obj)
662{
663 struct page **pvec = NULL;
b2295c24
LS
664 struct etnaviv_gem_userptr *userptr = &etnaviv_obj->userptr;
665 int ret, pinned = 0, npages = etnaviv_obj->base.size >> PAGE_SHIFT;
a8c21a54 666
da1c55f1 667 might_lock_read(&current->mm->mmap_lock);
783c06cb 668
b2295c24
LS
669 if (userptr->mm != current->mm)
670 return -EPERM;
a8c21a54 671
b2295c24
LS
672 pvec = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
673 if (!pvec)
674 return -ENOMEM;
675
676 do {
677 unsigned num_pages = npages - pinned;
678 uint64_t ptr = userptr->ptr + pinned * PAGE_SIZE;
679 struct page **pages = pvec + pinned;
a8c21a54 680
86824e60 681 ret = pin_user_pages_fast(ptr, num_pages,
50891bea
DV
682 FOLL_WRITE | FOLL_FORCE | FOLL_LONGTERM,
683 pages);
b2295c24 684 if (ret < 0) {
86824e60 685 unpin_user_pages(pvec, pinned);
2098105e 686 kvfree(pvec);
b2295c24 687 return ret;
a8c21a54
T
688 }
689
b2295c24 690 pinned += ret;
a8c21a54 691
b2295c24 692 } while (pinned < npages);
a8c21a54 693
b2295c24 694 etnaviv_obj->pages = pvec;
a8c21a54 695
b2295c24 696 return 0;
a8c21a54
T
697}
698
699static void etnaviv_gem_userptr_release(struct etnaviv_gem_object *etnaviv_obj)
700{
701 if (etnaviv_obj->sgt) {
702 etnaviv_gem_scatterlist_unmap(etnaviv_obj);
703 sg_free_table(etnaviv_obj->sgt);
704 kfree(etnaviv_obj->sgt);
705 }
706 if (etnaviv_obj->pages) {
707 int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
708
86824e60 709 unpin_user_pages(etnaviv_obj->pages, npages);
2098105e 710 kvfree(etnaviv_obj->pages);
a8c21a54 711 }
a8c21a54
T
712}
713
a10e2bde
LS
714static int etnaviv_gem_userptr_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
715 struct vm_area_struct *vma)
716{
717 return -EINVAL;
718}
719
a8c21a54
T
720static const struct etnaviv_gem_ops etnaviv_gem_userptr_ops = {
721 .get_pages = etnaviv_gem_userptr_get_pages,
722 .release = etnaviv_gem_userptr_release,
a0a5ab3e 723 .vmap = etnaviv_gem_vmap_impl,
a10e2bde 724 .mmap = etnaviv_gem_userptr_mmap_obj,
a8c21a54
T
725};
726
727int etnaviv_gem_new_userptr(struct drm_device *dev, struct drm_file *file,
728 uintptr_t ptr, u32 size, u32 flags, u32 *handle)
729{
730 struct etnaviv_gem_object *etnaviv_obj;
731 int ret;
732
c6be8086 733 ret = etnaviv_gem_new_private(dev, size, ETNA_BO_CACHED,
a8c21a54
T
734 &etnaviv_gem_userptr_ops, &etnaviv_obj);
735 if (ret)
736 return ret;
737
d6a8743d
LS
738 lockdep_set_class(&etnaviv_obj->lock, &etnaviv_userptr_lock_class);
739
a8c21a54 740 etnaviv_obj->userptr.ptr = ptr;
b2295c24 741 etnaviv_obj->userptr.mm = current->mm;
a8c21a54 742 etnaviv_obj->userptr.ro = !(flags & ETNA_USERPTR_WRITE);
a8c21a54 743
54f09288 744 etnaviv_gem_obj_add(dev, &etnaviv_obj->base);
a8c21a54
T
745
746 ret = drm_gem_handle_create(file, &etnaviv_obj->base, handle);
54f09288 747
a8c21a54 748 /* drop reference from allocate - handle holds it now */
6780bf32 749 drm_gem_object_put(&etnaviv_obj->base);
a8c21a54
T
750 return ret;
751}