]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/gpu/drm/etnaviv/etnaviv_gem.c
Merge branch 'WIP.x86/boot' into x86/boot, to pick up ready branch
[mirror_ubuntu-bionic-kernel.git] / drivers / gpu / drm / etnaviv / etnaviv_gem.c
CommitLineData
a8c21a54
T
1/*
2 * Copyright (C) 2015 Etnaviv Project
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License version 2 as published by
6 * the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17#include <linux/spinlock.h>
18#include <linux/shmem_fs.h>
6e84f315 19#include <linux/sched/mm.h>
0881e7bd 20#include <linux/sched/task.h>
a8c21a54
T
21
22#include "etnaviv_drv.h"
23#include "etnaviv_gem.h"
24#include "etnaviv_gpu.h"
25#include "etnaviv_mmu.h"
26
27static void etnaviv_gem_scatter_map(struct etnaviv_gem_object *etnaviv_obj)
28{
29 struct drm_device *dev = etnaviv_obj->base.dev;
30 struct sg_table *sgt = etnaviv_obj->sgt;
31
32 /*
33 * For non-cached buffers, ensure the new pages are clean
34 * because display controller, GPU, etc. are not coherent.
35 */
36 if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
37 dma_map_sg(dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL);
38}
39
40static void etnaviv_gem_scatterlist_unmap(struct etnaviv_gem_object *etnaviv_obj)
41{
42 struct drm_device *dev = etnaviv_obj->base.dev;
43 struct sg_table *sgt = etnaviv_obj->sgt;
44
45 /*
46 * For non-cached buffers, ensure the new pages are clean
47 * because display controller, GPU, etc. are not coherent:
48 *
49 * WARNING: The DMA API does not support concurrent CPU
50 * and device access to the memory area. With BIDIRECTIONAL,
51 * we will clean the cache lines which overlap the region,
52 * and invalidate all cache lines (partially) contained in
53 * the region.
54 *
55 * If you have dirty data in the overlapping cache lines,
56 * that will corrupt the GPU-written data. If you have
57 * written into the remainder of the region, this can
58 * discard those writes.
59 */
60 if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
61 dma_unmap_sg(dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL);
62}
63
64/* called with etnaviv_obj->lock held */
65static int etnaviv_gem_shmem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
66{
67 struct drm_device *dev = etnaviv_obj->base.dev;
68 struct page **p = drm_gem_get_pages(&etnaviv_obj->base);
69
70 if (IS_ERR(p)) {
71 dev_err(dev->dev, "could not get pages: %ld\n", PTR_ERR(p));
72 return PTR_ERR(p);
73 }
74
75 etnaviv_obj->pages = p;
76
77 return 0;
78}
79
80static void put_pages(struct etnaviv_gem_object *etnaviv_obj)
81{
82 if (etnaviv_obj->sgt) {
83 etnaviv_gem_scatterlist_unmap(etnaviv_obj);
84 sg_free_table(etnaviv_obj->sgt);
85 kfree(etnaviv_obj->sgt);
86 etnaviv_obj->sgt = NULL;
87 }
88 if (etnaviv_obj->pages) {
89 drm_gem_put_pages(&etnaviv_obj->base, etnaviv_obj->pages,
90 true, false);
91
92 etnaviv_obj->pages = NULL;
93 }
94}
95
96struct page **etnaviv_gem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
97{
98 int ret;
99
100 lockdep_assert_held(&etnaviv_obj->lock);
101
102 if (!etnaviv_obj->pages) {
103 ret = etnaviv_obj->ops->get_pages(etnaviv_obj);
104 if (ret < 0)
105 return ERR_PTR(ret);
106 }
107
108 if (!etnaviv_obj->sgt) {
109 struct drm_device *dev = etnaviv_obj->base.dev;
110 int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
111 struct sg_table *sgt;
112
113 sgt = drm_prime_pages_to_sg(etnaviv_obj->pages, npages);
114 if (IS_ERR(sgt)) {
115 dev_err(dev->dev, "failed to allocate sgt: %ld\n",
116 PTR_ERR(sgt));
117 return ERR_CAST(sgt);
118 }
119
120 etnaviv_obj->sgt = sgt;
121
122 etnaviv_gem_scatter_map(etnaviv_obj);
123 }
124
125 return etnaviv_obj->pages;
126}
127
128void etnaviv_gem_put_pages(struct etnaviv_gem_object *etnaviv_obj)
129{
130 lockdep_assert_held(&etnaviv_obj->lock);
131 /* when we start tracking the pin count, then do something here */
132}
133
0e7f26e6 134static int etnaviv_gem_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
a8c21a54
T
135 struct vm_area_struct *vma)
136{
a8c21a54
T
137 pgprot_t vm_page_prot;
138
139 vma->vm_flags &= ~VM_PFNMAP;
140 vma->vm_flags |= VM_MIXEDMAP;
141
142 vm_page_prot = vm_get_page_prot(vma->vm_flags);
143
144 if (etnaviv_obj->flags & ETNA_BO_WC) {
145 vma->vm_page_prot = pgprot_writecombine(vm_page_prot);
146 } else if (etnaviv_obj->flags & ETNA_BO_UNCACHED) {
147 vma->vm_page_prot = pgprot_noncached(vm_page_prot);
148 } else {
149 /*
150 * Shunt off cached objs to shmem file so they have their own
151 * address_space (so unmap_mapping_range does what we want,
152 * in particular in the case of mmap'd dmabufs)
153 */
154 fput(vma->vm_file);
0e7f26e6 155 get_file(etnaviv_obj->base.filp);
a8c21a54 156 vma->vm_pgoff = 0;
0e7f26e6 157 vma->vm_file = etnaviv_obj->base.filp;
a8c21a54
T
158
159 vma->vm_page_prot = vm_page_prot;
160 }
161
162 return 0;
163}
164
165int etnaviv_gem_mmap(struct file *filp, struct vm_area_struct *vma)
166{
167 struct etnaviv_gem_object *obj;
168 int ret;
169
170 ret = drm_gem_mmap(filp, vma);
171 if (ret) {
172 DBG("mmap failed: %d", ret);
173 return ret;
174 }
175
176 obj = to_etnaviv_bo(vma->vm_private_data);
a10e2bde 177 return obj->ops->mmap(obj, vma);
a8c21a54
T
178}
179
11bac800 180int etnaviv_gem_fault(struct vm_fault *vmf)
a8c21a54 181{
11bac800 182 struct vm_area_struct *vma = vmf->vma;
a8c21a54
T
183 struct drm_gem_object *obj = vma->vm_private_data;
184 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
185 struct page **pages, *page;
186 pgoff_t pgoff;
187 int ret;
188
189 /*
190 * Make sure we don't parallel update on a fault, nor move or remove
191 * something from beneath our feet. Note that vm_insert_page() is
192 * specifically coded to take care of this, so we don't have to.
193 */
194 ret = mutex_lock_interruptible(&etnaviv_obj->lock);
195 if (ret)
196 goto out;
197
198 /* make sure we have pages attached now */
199 pages = etnaviv_gem_get_pages(etnaviv_obj);
200 mutex_unlock(&etnaviv_obj->lock);
201
202 if (IS_ERR(pages)) {
203 ret = PTR_ERR(pages);
204 goto out;
205 }
206
207 /* We don't use vmf->pgoff since that has the fake offset: */
1a29d85e 208 pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
a8c21a54
T
209
210 page = pages[pgoff];
211
1a29d85e 212 VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
a8c21a54
T
213 page_to_pfn(page), page_to_pfn(page) << PAGE_SHIFT);
214
1a29d85e 215 ret = vm_insert_page(vma, vmf->address, page);
a8c21a54
T
216
217out:
218 switch (ret) {
219 case -EAGAIN:
220 case 0:
221 case -ERESTARTSYS:
222 case -EINTR:
223 case -EBUSY:
224 /*
225 * EBUSY is ok: this just means that another thread
226 * already did the job.
227 */
228 return VM_FAULT_NOPAGE;
229 case -ENOMEM:
230 return VM_FAULT_OOM;
231 default:
232 return VM_FAULT_SIGBUS;
233 }
234}
235
236int etnaviv_gem_mmap_offset(struct drm_gem_object *obj, u64 *offset)
237{
238 int ret;
239
240 /* Make it mmapable */
241 ret = drm_gem_create_mmap_offset(obj);
242 if (ret)
243 dev_err(obj->dev->dev, "could not allocate mmap offset\n");
244 else
245 *offset = drm_vma_node_offset_addr(&obj->vma_node);
246
247 return ret;
248}
249
250static struct etnaviv_vram_mapping *
251etnaviv_gem_get_vram_mapping(struct etnaviv_gem_object *obj,
252 struct etnaviv_iommu *mmu)
253{
254 struct etnaviv_vram_mapping *mapping;
255
256 list_for_each_entry(mapping, &obj->vram_list, obj_node) {
257 if (mapping->mmu == mmu)
258 return mapping;
259 }
260
261 return NULL;
262}
263
b6325f40
RK
264void etnaviv_gem_mapping_reference(struct etnaviv_vram_mapping *mapping)
265{
266 struct etnaviv_gem_object *etnaviv_obj = mapping->object;
267
268 drm_gem_object_reference(&etnaviv_obj->base);
269
270 mutex_lock(&etnaviv_obj->lock);
271 WARN_ON(mapping->use == 0);
272 mapping->use += 1;
273 mutex_unlock(&etnaviv_obj->lock);
274}
275
276void etnaviv_gem_mapping_unreference(struct etnaviv_vram_mapping *mapping)
277{
278 struct etnaviv_gem_object *etnaviv_obj = mapping->object;
279
280 mutex_lock(&etnaviv_obj->lock);
281 WARN_ON(mapping->use == 0);
282 mapping->use -= 1;
283 mutex_unlock(&etnaviv_obj->lock);
284
285 drm_gem_object_unreference_unlocked(&etnaviv_obj->base);
286}
287
288struct etnaviv_vram_mapping *etnaviv_gem_mapping_get(
289 struct drm_gem_object *obj, struct etnaviv_gpu *gpu)
a8c21a54
T
290{
291 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
292 struct etnaviv_vram_mapping *mapping;
293 struct page **pages;
294 int ret = 0;
295
296 mutex_lock(&etnaviv_obj->lock);
297 mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, gpu->mmu);
298 if (mapping) {
299 /*
300 * Holding the object lock prevents the use count changing
301 * beneath us. If the use count is zero, the MMU might be
302 * reaping this object, so take the lock and re-check that
303 * the MMU owns this mapping to close this race.
304 */
305 if (mapping->use == 0) {
306 mutex_lock(&gpu->mmu->lock);
307 if (mapping->mmu == gpu->mmu)
308 mapping->use += 1;
309 else
310 mapping = NULL;
311 mutex_unlock(&gpu->mmu->lock);
312 if (mapping)
313 goto out;
314 } else {
315 mapping->use += 1;
316 goto out;
317 }
318 }
319
320 pages = etnaviv_gem_get_pages(etnaviv_obj);
321 if (IS_ERR(pages)) {
322 ret = PTR_ERR(pages);
323 goto out;
324 }
325
326 /*
327 * See if we have a reaped vram mapping we can re-use before
328 * allocating a fresh mapping.
329 */
330 mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, NULL);
331 if (!mapping) {
332 mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
ed94add0
DC
333 if (!mapping) {
334 ret = -ENOMEM;
335 goto out;
336 }
a8c21a54
T
337
338 INIT_LIST_HEAD(&mapping->scan_node);
339 mapping->object = etnaviv_obj;
340 } else {
341 list_del(&mapping->obj_node);
342 }
343
344 mapping->mmu = gpu->mmu;
345 mapping->use = 1;
346
347 ret = etnaviv_iommu_map_gem(gpu->mmu, etnaviv_obj, gpu->memory_base,
348 mapping);
349 if (ret < 0)
350 kfree(mapping);
351 else
352 list_add_tail(&mapping->obj_node, &etnaviv_obj->vram_list);
353
354out:
355 mutex_unlock(&etnaviv_obj->lock);
356
b6325f40
RK
357 if (ret)
358 return ERR_PTR(ret);
a8c21a54 359
b6325f40
RK
360 /* Take a reference on the object */
361 drm_gem_object_reference(obj);
362 return mapping;
a8c21a54
T
363}
364
ce3088fd 365void *etnaviv_gem_vmap(struct drm_gem_object *obj)
a8c21a54
T
366{
367 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
368
a0a5ab3e
LS
369 if (etnaviv_obj->vaddr)
370 return etnaviv_obj->vaddr;
a8c21a54 371
a0a5ab3e
LS
372 mutex_lock(&etnaviv_obj->lock);
373 /*
374 * Need to check again, as we might have raced with another thread
375 * while waiting for the mutex.
376 */
377 if (!etnaviv_obj->vaddr)
378 etnaviv_obj->vaddr = etnaviv_obj->ops->vmap(etnaviv_obj);
a8c21a54
T
379 mutex_unlock(&etnaviv_obj->lock);
380
381 return etnaviv_obj->vaddr;
382}
383
a0a5ab3e
LS
384static void *etnaviv_gem_vmap_impl(struct etnaviv_gem_object *obj)
385{
386 struct page **pages;
387
388 lockdep_assert_held(&obj->lock);
389
390 pages = etnaviv_gem_get_pages(obj);
391 if (IS_ERR(pages))
392 return NULL;
393
394 return vmap(pages, obj->base.size >> PAGE_SHIFT,
395 VM_MAP, pgprot_writecombine(PAGE_KERNEL));
396}
397
a8c21a54
T
398static inline enum dma_data_direction etnaviv_op_to_dma_dir(u32 op)
399{
400 if (op & ETNA_PREP_READ)
401 return DMA_FROM_DEVICE;
402 else if (op & ETNA_PREP_WRITE)
403 return DMA_TO_DEVICE;
404 else
405 return DMA_BIDIRECTIONAL;
406}
407
408int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
409 struct timespec *timeout)
410{
411 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
412 struct drm_device *dev = obj->dev;
413 bool write = !!(op & ETNA_PREP_WRITE);
cd34db4a
CW
414 unsigned long remain =
415 op & ETNA_PREP_NOSYNC ? 0 : etnaviv_timeout_to_jiffies(timeout);
416 long lret;
417
418 lret = reservation_object_wait_timeout_rcu(etnaviv_obj->resv,
419 write, true, remain);
420 if (lret < 0)
421 return lret;
422 else if (lret == 0)
423 return remain == 0 ? -EBUSY : -ETIMEDOUT;
a8c21a54
T
424
425 if (etnaviv_obj->flags & ETNA_BO_CACHED) {
426 if (!etnaviv_obj->sgt) {
427 void *ret;
428
429 mutex_lock(&etnaviv_obj->lock);
430 ret = etnaviv_gem_get_pages(etnaviv_obj);
431 mutex_unlock(&etnaviv_obj->lock);
432 if (IS_ERR(ret))
433 return PTR_ERR(ret);
434 }
435
436 dma_sync_sg_for_cpu(dev->dev, etnaviv_obj->sgt->sgl,
437 etnaviv_obj->sgt->nents,
438 etnaviv_op_to_dma_dir(op));
439 etnaviv_obj->last_cpu_prep_op = op;
440 }
441
442 return 0;
443}
444
445int etnaviv_gem_cpu_fini(struct drm_gem_object *obj)
446{
447 struct drm_device *dev = obj->dev;
448 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
449
450 if (etnaviv_obj->flags & ETNA_BO_CACHED) {
451 /* fini without a prep is almost certainly a userspace error */
452 WARN_ON(etnaviv_obj->last_cpu_prep_op == 0);
453 dma_sync_sg_for_device(dev->dev, etnaviv_obj->sgt->sgl,
454 etnaviv_obj->sgt->nents,
455 etnaviv_op_to_dma_dir(etnaviv_obj->last_cpu_prep_op));
456 etnaviv_obj->last_cpu_prep_op = 0;
457 }
458
459 return 0;
460}
461
462int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj,
463 struct timespec *timeout)
464{
465 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
466
467 return etnaviv_gpu_wait_obj_inactive(gpu, etnaviv_obj, timeout);
468}
469
470#ifdef CONFIG_DEBUG_FS
f54d1867 471static void etnaviv_gem_describe_fence(struct dma_fence *fence,
a8c21a54
T
472 const char *type, struct seq_file *m)
473{
f54d1867 474 if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
a8c21a54
T
475 seq_printf(m, "\t%9s: %s %s seq %u\n",
476 type,
477 fence->ops->get_driver_name(fence),
478 fence->ops->get_timeline_name(fence),
479 fence->seqno);
480}
481
482static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
483{
484 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
485 struct reservation_object *robj = etnaviv_obj->resv;
486 struct reservation_object_list *fobj;
f54d1867 487 struct dma_fence *fence;
a8c21a54
T
488 unsigned long off = drm_vma_node_start(&obj->vma_node);
489
490 seq_printf(m, "%08x: %c %2d (%2d) %08lx %p %zd\n",
491 etnaviv_obj->flags, is_active(etnaviv_obj) ? 'A' : 'I',
2c935bc5 492 obj->name, kref_read(&obj->refcount),
a8c21a54
T
493 off, etnaviv_obj->vaddr, obj->size);
494
495 rcu_read_lock();
496 fobj = rcu_dereference(robj->fence);
497 if (fobj) {
498 unsigned int i, shared_count = fobj->shared_count;
499
500 for (i = 0; i < shared_count; i++) {
501 fence = rcu_dereference(fobj->shared[i]);
502 etnaviv_gem_describe_fence(fence, "Shared", m);
503 }
504 }
505
506 fence = rcu_dereference(robj->fence_excl);
507 if (fence)
508 etnaviv_gem_describe_fence(fence, "Exclusive", m);
509 rcu_read_unlock();
510}
511
512void etnaviv_gem_describe_objects(struct etnaviv_drm_private *priv,
513 struct seq_file *m)
514{
515 struct etnaviv_gem_object *etnaviv_obj;
516 int count = 0;
517 size_t size = 0;
518
519 mutex_lock(&priv->gem_lock);
520 list_for_each_entry(etnaviv_obj, &priv->gem_list, gem_node) {
521 struct drm_gem_object *obj = &etnaviv_obj->base;
522
523 seq_puts(m, " ");
524 etnaviv_gem_describe(obj, m);
525 count++;
526 size += obj->size;
527 }
528 mutex_unlock(&priv->gem_lock);
529
530 seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
531}
532#endif
533
534static void etnaviv_gem_shmem_release(struct etnaviv_gem_object *etnaviv_obj)
535{
8c6e6188 536 vunmap(etnaviv_obj->vaddr);
a8c21a54
T
537 put_pages(etnaviv_obj);
538}
539
540static const struct etnaviv_gem_ops etnaviv_gem_shmem_ops = {
541 .get_pages = etnaviv_gem_shmem_get_pages,
542 .release = etnaviv_gem_shmem_release,
a0a5ab3e 543 .vmap = etnaviv_gem_vmap_impl,
a10e2bde 544 .mmap = etnaviv_gem_mmap_obj,
a8c21a54
T
545};
546
547void etnaviv_gem_free_object(struct drm_gem_object *obj)
548{
549 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
550 struct etnaviv_vram_mapping *mapping, *tmp;
551
552 /* object should not be active */
553 WARN_ON(is_active(etnaviv_obj));
554
555 list_del(&etnaviv_obj->gem_node);
556
557 list_for_each_entry_safe(mapping, tmp, &etnaviv_obj->vram_list,
558 obj_node) {
559 struct etnaviv_iommu *mmu = mapping->mmu;
560
561 WARN_ON(mapping->use);
562
563 if (mmu)
564 etnaviv_iommu_unmap_gem(mmu, mapping);
565
566 list_del(&mapping->obj_node);
567 kfree(mapping);
568 }
569
570 drm_gem_free_mmap_offset(obj);
571 etnaviv_obj->ops->release(etnaviv_obj);
572 if (etnaviv_obj->resv == &etnaviv_obj->_resv)
573 reservation_object_fini(&etnaviv_obj->_resv);
574 drm_gem_object_release(obj);
575
576 kfree(etnaviv_obj);
577}
578
579int etnaviv_gem_obj_add(struct drm_device *dev, struct drm_gem_object *obj)
580{
581 struct etnaviv_drm_private *priv = dev->dev_private;
582 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
583
584 mutex_lock(&priv->gem_lock);
585 list_add_tail(&etnaviv_obj->gem_node, &priv->gem_list);
586 mutex_unlock(&priv->gem_lock);
587
588 return 0;
589}
590
591static int etnaviv_gem_new_impl(struct drm_device *dev, u32 size, u32 flags,
592 struct reservation_object *robj, const struct etnaviv_gem_ops *ops,
593 struct drm_gem_object **obj)
594{
595 struct etnaviv_gem_object *etnaviv_obj;
596 unsigned sz = sizeof(*etnaviv_obj);
597 bool valid = true;
598
599 /* validate flags */
600 switch (flags & ETNA_BO_CACHE_MASK) {
601 case ETNA_BO_UNCACHED:
602 case ETNA_BO_CACHED:
603 case ETNA_BO_WC:
604 break;
605 default:
606 valid = false;
607 }
608
609 if (!valid) {
610 dev_err(dev->dev, "invalid cache flag: %x\n",
611 (flags & ETNA_BO_CACHE_MASK));
612 return -EINVAL;
613 }
614
615 etnaviv_obj = kzalloc(sz, GFP_KERNEL);
616 if (!etnaviv_obj)
617 return -ENOMEM;
618
619 etnaviv_obj->flags = flags;
620 etnaviv_obj->ops = ops;
621 if (robj) {
622 etnaviv_obj->resv = robj;
623 } else {
624 etnaviv_obj->resv = &etnaviv_obj->_resv;
625 reservation_object_init(&etnaviv_obj->_resv);
626 }
627
628 mutex_init(&etnaviv_obj->lock);
629 INIT_LIST_HEAD(&etnaviv_obj->vram_list);
630
631 *obj = &etnaviv_obj->base;
632
633 return 0;
634}
635
636static struct drm_gem_object *__etnaviv_gem_new(struct drm_device *dev,
637 u32 size, u32 flags)
638{
639 struct drm_gem_object *obj = NULL;
640 int ret;
641
642 size = PAGE_ALIGN(size);
643
644 ret = etnaviv_gem_new_impl(dev, size, flags, NULL,
645 &etnaviv_gem_shmem_ops, &obj);
646 if (ret)
647 goto fail;
648
649 ret = drm_gem_object_init(dev, obj, size);
650 if (ret == 0) {
651 struct address_space *mapping;
652
653 /*
654 * Our buffers are kept pinned, so allocating them
655 * from the MOVABLE zone is a really bad idea, and
656 * conflicts with CMA. See coments above new_inode()
657 * why this is required _and_ expected if you're
658 * going to pin these pages.
659 */
93c76a3d 660 mapping = obj->filp->f_mapping;
a8c21a54
T
661 mapping_set_gfp_mask(mapping, GFP_HIGHUSER);
662 }
663
664 if (ret)
665 goto fail;
666
667 return obj;
668
669fail:
8c6e6188 670 drm_gem_object_unreference_unlocked(obj);
a8c21a54
T
671 return ERR_PTR(ret);
672}
673
674/* convenience method to construct a GEM buffer object, and userspace handle */
675int etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file,
676 u32 size, u32 flags, u32 *handle)
677{
678 struct drm_gem_object *obj;
679 int ret;
680
681 obj = __etnaviv_gem_new(dev, size, flags);
682 if (IS_ERR(obj))
683 return PTR_ERR(obj);
684
685 ret = etnaviv_gem_obj_add(dev, obj);
686 if (ret < 0) {
687 drm_gem_object_unreference_unlocked(obj);
688 return ret;
689 }
690
691 ret = drm_gem_handle_create(file, obj, handle);
692
693 /* drop reference from allocate - handle holds it now */
694 drm_gem_object_unreference_unlocked(obj);
695
696 return ret;
697}
698
699struct drm_gem_object *etnaviv_gem_new(struct drm_device *dev,
700 u32 size, u32 flags)
701{
702 struct drm_gem_object *obj;
703 int ret;
704
705 obj = __etnaviv_gem_new(dev, size, flags);
706 if (IS_ERR(obj))
707 return obj;
708
709 ret = etnaviv_gem_obj_add(dev, obj);
710 if (ret < 0) {
711 drm_gem_object_unreference_unlocked(obj);
712 return ERR_PTR(ret);
713 }
714
715 return obj;
716}
717
718int etnaviv_gem_new_private(struct drm_device *dev, size_t size, u32 flags,
719 struct reservation_object *robj, const struct etnaviv_gem_ops *ops,
720 struct etnaviv_gem_object **res)
721{
722 struct drm_gem_object *obj;
723 int ret;
724
725 ret = etnaviv_gem_new_impl(dev, size, flags, robj, ops, &obj);
726 if (ret)
727 return ret;
728
729 drm_gem_private_object_init(dev, obj, size);
730
731 *res = to_etnaviv_bo(obj);
732
733 return 0;
734}
735
736struct get_pages_work {
737 struct work_struct work;
738 struct mm_struct *mm;
739 struct task_struct *task;
740 struct etnaviv_gem_object *etnaviv_obj;
741};
742
743static struct page **etnaviv_gem_userptr_do_get_pages(
744 struct etnaviv_gem_object *etnaviv_obj, struct mm_struct *mm, struct task_struct *task)
745{
746 int ret = 0, pinned, npages = etnaviv_obj->base.size >> PAGE_SHIFT;
747 struct page **pvec;
748 uintptr_t ptr;
9beae1ea 749 unsigned int flags = 0;
a8c21a54
T
750
751 pvec = drm_malloc_ab(npages, sizeof(struct page *));
752 if (!pvec)
753 return ERR_PTR(-ENOMEM);
754
9beae1ea
LS
755 if (!etnaviv_obj->userptr.ro)
756 flags |= FOLL_WRITE;
757
a8c21a54
T
758 pinned = 0;
759 ptr = etnaviv_obj->userptr.ptr;
760
761 down_read(&mm->mmap_sem);
762 while (pinned < npages) {
1e987790 763 ret = get_user_pages_remote(task, mm, ptr, npages - pinned,
5b56d49f 764 flags, pvec + pinned, NULL, NULL);
a8c21a54
T
765 if (ret < 0)
766 break;
767
768 ptr += ret * PAGE_SIZE;
769 pinned += ret;
770 }
771 up_read(&mm->mmap_sem);
772
773 if (ret < 0) {
774 release_pages(pvec, pinned, 0);
775 drm_free_large(pvec);
776 return ERR_PTR(ret);
777 }
778
779 return pvec;
780}
781
782static void __etnaviv_gem_userptr_get_pages(struct work_struct *_work)
783{
784 struct get_pages_work *work = container_of(_work, typeof(*work), work);
785 struct etnaviv_gem_object *etnaviv_obj = work->etnaviv_obj;
786 struct page **pvec;
787
788 pvec = etnaviv_gem_userptr_do_get_pages(etnaviv_obj, work->mm, work->task);
789
790 mutex_lock(&etnaviv_obj->lock);
791 if (IS_ERR(pvec)) {
792 etnaviv_obj->userptr.work = ERR_CAST(pvec);
793 } else {
794 etnaviv_obj->userptr.work = NULL;
795 etnaviv_obj->pages = pvec;
796 }
797
798 mutex_unlock(&etnaviv_obj->lock);
799 drm_gem_object_unreference_unlocked(&etnaviv_obj->base);
800
801 mmput(work->mm);
802 put_task_struct(work->task);
803 kfree(work);
804}
805
806static int etnaviv_gem_userptr_get_pages(struct etnaviv_gem_object *etnaviv_obj)
807{
808 struct page **pvec = NULL;
809 struct get_pages_work *work;
810 struct mm_struct *mm;
811 int ret, pinned, npages = etnaviv_obj->base.size >> PAGE_SHIFT;
812
813 if (etnaviv_obj->userptr.work) {
814 if (IS_ERR(etnaviv_obj->userptr.work)) {
815 ret = PTR_ERR(etnaviv_obj->userptr.work);
816 etnaviv_obj->userptr.work = NULL;
817 } else {
818 ret = -EAGAIN;
819 }
820 return ret;
821 }
822
823 mm = get_task_mm(etnaviv_obj->userptr.task);
824 pinned = 0;
825 if (mm == current->mm) {
826 pvec = drm_malloc_ab(npages, sizeof(struct page *));
827 if (!pvec) {
828 mmput(mm);
829 return -ENOMEM;
830 }
831
832 pinned = __get_user_pages_fast(etnaviv_obj->userptr.ptr, npages,
833 !etnaviv_obj->userptr.ro, pvec);
834 if (pinned < 0) {
835 drm_free_large(pvec);
836 mmput(mm);
837 return pinned;
838 }
839
840 if (pinned == npages) {
841 etnaviv_obj->pages = pvec;
842 mmput(mm);
843 return 0;
844 }
845 }
846
847 release_pages(pvec, pinned, 0);
848 drm_free_large(pvec);
849
850 work = kmalloc(sizeof(*work), GFP_KERNEL);
851 if (!work) {
852 mmput(mm);
853 return -ENOMEM;
854 }
855
856 get_task_struct(current);
857 drm_gem_object_reference(&etnaviv_obj->base);
858
859 work->mm = mm;
860 work->task = current;
861 work->etnaviv_obj = etnaviv_obj;
862
863 etnaviv_obj->userptr.work = &work->work;
864 INIT_WORK(&work->work, __etnaviv_gem_userptr_get_pages);
865
866 etnaviv_queue_work(etnaviv_obj->base.dev, &work->work);
867
868 return -EAGAIN;
869}
870
871static void etnaviv_gem_userptr_release(struct etnaviv_gem_object *etnaviv_obj)
872{
873 if (etnaviv_obj->sgt) {
874 etnaviv_gem_scatterlist_unmap(etnaviv_obj);
875 sg_free_table(etnaviv_obj->sgt);
876 kfree(etnaviv_obj->sgt);
877 }
878 if (etnaviv_obj->pages) {
879 int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
880
881 release_pages(etnaviv_obj->pages, npages, 0);
882 drm_free_large(etnaviv_obj->pages);
883 }
884 put_task_struct(etnaviv_obj->userptr.task);
885}
886
a10e2bde
LS
887static int etnaviv_gem_userptr_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
888 struct vm_area_struct *vma)
889{
890 return -EINVAL;
891}
892
a8c21a54
T
893static const struct etnaviv_gem_ops etnaviv_gem_userptr_ops = {
894 .get_pages = etnaviv_gem_userptr_get_pages,
895 .release = etnaviv_gem_userptr_release,
a0a5ab3e 896 .vmap = etnaviv_gem_vmap_impl,
a10e2bde 897 .mmap = etnaviv_gem_userptr_mmap_obj,
a8c21a54
T
898};
899
900int etnaviv_gem_new_userptr(struct drm_device *dev, struct drm_file *file,
901 uintptr_t ptr, u32 size, u32 flags, u32 *handle)
902{
903 struct etnaviv_gem_object *etnaviv_obj;
904 int ret;
905
906 ret = etnaviv_gem_new_private(dev, size, ETNA_BO_CACHED, NULL,
907 &etnaviv_gem_userptr_ops, &etnaviv_obj);
908 if (ret)
909 return ret;
910
911 etnaviv_obj->userptr.ptr = ptr;
912 etnaviv_obj->userptr.task = current;
913 etnaviv_obj->userptr.ro = !(flags & ETNA_USERPTR_WRITE);
914 get_task_struct(current);
915
916 ret = etnaviv_gem_obj_add(dev, &etnaviv_obj->base);
d9a7ed77
ME
917 if (ret)
918 goto unreference;
a8c21a54
T
919
920 ret = drm_gem_handle_create(file, &etnaviv_obj->base, handle);
d9a7ed77 921unreference:
a8c21a54
T
922 /* drop reference from allocate - handle holds it now */
923 drm_gem_object_unreference_unlocked(&etnaviv_obj->base);
a8c21a54
T
924 return ret;
925}