]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/gpu/drm/msm/msm_gem.c
mtd: nand: atmel: Relax tADL_min constraint
[mirror_ubuntu-artful-kernel.git] / drivers / gpu / drm / msm / msm_gem.c
1 /*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18 #include <linux/spinlock.h>
19 #include <linux/shmem_fs.h>
20 #include <linux/dma-buf.h>
21 #include <linux/pfn_t.h>
22
23 #include "msm_drv.h"
24 #include "msm_fence.h"
25 #include "msm_gem.h"
26 #include "msm_gpu.h"
27 #include "msm_mmu.h"
28
29 static void msm_gem_vunmap_locked(struct drm_gem_object *obj);
30
31
32 static dma_addr_t physaddr(struct drm_gem_object *obj)
33 {
34 struct msm_gem_object *msm_obj = to_msm_bo(obj);
35 struct msm_drm_private *priv = obj->dev->dev_private;
36 return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) +
37 priv->vram.paddr;
38 }
39
40 static bool use_pages(struct drm_gem_object *obj)
41 {
42 struct msm_gem_object *msm_obj = to_msm_bo(obj);
43 return !msm_obj->vram_node;
44 }
45
46 /* allocate pages from VRAM carveout, used when no IOMMU: */
47 static struct page **get_pages_vram(struct drm_gem_object *obj, int npages)
48 {
49 struct msm_gem_object *msm_obj = to_msm_bo(obj);
50 struct msm_drm_private *priv = obj->dev->dev_private;
51 dma_addr_t paddr;
52 struct page **p;
53 int ret, i;
54
55 p = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
56 if (!p)
57 return ERR_PTR(-ENOMEM);
58
59 spin_lock(&priv->vram.lock);
60 ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages);
61 spin_unlock(&priv->vram.lock);
62 if (ret) {
63 kvfree(p);
64 return ERR_PTR(ret);
65 }
66
67 paddr = physaddr(obj);
68 for (i = 0; i < npages; i++) {
69 p[i] = phys_to_page(paddr);
70 paddr += PAGE_SIZE;
71 }
72
73 return p;
74 }
75
76 static struct page **get_pages(struct drm_gem_object *obj)
77 {
78 struct msm_gem_object *msm_obj = to_msm_bo(obj);
79
80 if (!msm_obj->pages) {
81 struct drm_device *dev = obj->dev;
82 struct page **p;
83 int npages = obj->size >> PAGE_SHIFT;
84
85 if (use_pages(obj))
86 p = drm_gem_get_pages(obj);
87 else
88 p = get_pages_vram(obj, npages);
89
90 if (IS_ERR(p)) {
91 dev_err(dev->dev, "could not get pages: %ld\n",
92 PTR_ERR(p));
93 return p;
94 }
95
96 msm_obj->sgt = drm_prime_pages_to_sg(p, npages);
97 if (IS_ERR(msm_obj->sgt)) {
98 dev_err(dev->dev, "failed to allocate sgt\n");
99 return ERR_CAST(msm_obj->sgt);
100 }
101
102 msm_obj->pages = p;
103
104 /* For non-cached buffers, ensure the new pages are clean
105 * because display controller, GPU, etc. are not coherent:
106 */
107 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
108 dma_map_sg(dev->dev, msm_obj->sgt->sgl,
109 msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
110 }
111
112 return msm_obj->pages;
113 }
114
115 static void put_pages_vram(struct drm_gem_object *obj)
116 {
117 struct msm_gem_object *msm_obj = to_msm_bo(obj);
118 struct msm_drm_private *priv = obj->dev->dev_private;
119
120 spin_lock(&priv->vram.lock);
121 drm_mm_remove_node(msm_obj->vram_node);
122 spin_unlock(&priv->vram.lock);
123
124 kvfree(msm_obj->pages);
125 }
126
127 static void put_pages(struct drm_gem_object *obj)
128 {
129 struct msm_gem_object *msm_obj = to_msm_bo(obj);
130
131 if (msm_obj->pages) {
132 /* For non-cached buffers, ensure the new pages are clean
133 * because display controller, GPU, etc. are not coherent:
134 */
135 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
136 dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl,
137 msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
138 sg_free_table(msm_obj->sgt);
139 kfree(msm_obj->sgt);
140
141 if (use_pages(obj))
142 drm_gem_put_pages(obj, msm_obj->pages, true, false);
143 else
144 put_pages_vram(obj);
145
146 msm_obj->pages = NULL;
147 }
148 }
149
150 struct page **msm_gem_get_pages(struct drm_gem_object *obj)
151 {
152 struct msm_gem_object *msm_obj = to_msm_bo(obj);
153 struct page **p;
154
155 mutex_lock(&msm_obj->lock);
156
157 if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
158 mutex_unlock(&msm_obj->lock);
159 return ERR_PTR(-EBUSY);
160 }
161
162 p = get_pages(obj);
163 mutex_unlock(&msm_obj->lock);
164 return p;
165 }
166
167 void msm_gem_put_pages(struct drm_gem_object *obj)
168 {
169 /* when we start tracking the pin count, then do something here */
170 }
171
172 int msm_gem_mmap_obj(struct drm_gem_object *obj,
173 struct vm_area_struct *vma)
174 {
175 struct msm_gem_object *msm_obj = to_msm_bo(obj);
176
177 vma->vm_flags &= ~VM_PFNMAP;
178 vma->vm_flags |= VM_MIXEDMAP;
179
180 if (msm_obj->flags & MSM_BO_WC) {
181 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
182 } else if (msm_obj->flags & MSM_BO_UNCACHED) {
183 vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
184 } else {
185 /*
186 * Shunt off cached objs to shmem file so they have their own
187 * address_space (so unmap_mapping_range does what we want,
188 * in particular in the case of mmap'd dmabufs)
189 */
190 fput(vma->vm_file);
191 get_file(obj->filp);
192 vma->vm_pgoff = 0;
193 vma->vm_file = obj->filp;
194
195 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
196 }
197
198 return 0;
199 }
200
201 int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
202 {
203 int ret;
204
205 ret = drm_gem_mmap(filp, vma);
206 if (ret) {
207 DBG("mmap failed: %d", ret);
208 return ret;
209 }
210
211 return msm_gem_mmap_obj(vma->vm_private_data, vma);
212 }
213
214 int msm_gem_fault(struct vm_fault *vmf)
215 {
216 struct vm_area_struct *vma = vmf->vma;
217 struct drm_gem_object *obj = vma->vm_private_data;
218 struct msm_gem_object *msm_obj = to_msm_bo(obj);
219 struct page **pages;
220 unsigned long pfn;
221 pgoff_t pgoff;
222 int ret;
223
224 /*
225 * vm_ops.open/drm_gem_mmap_obj and close get and put
226 * a reference on obj. So, we dont need to hold one here.
227 */
228 ret = mutex_lock_interruptible(&msm_obj->lock);
229 if (ret)
230 goto out;
231
232 if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
233 mutex_unlock(&msm_obj->lock);
234 return VM_FAULT_SIGBUS;
235 }
236
237 /* make sure we have pages attached now */
238 pages = get_pages(obj);
239 if (IS_ERR(pages)) {
240 ret = PTR_ERR(pages);
241 goto out_unlock;
242 }
243
244 /* We don't use vmf->pgoff since that has the fake offset: */
245 pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
246
247 pfn = page_to_pfn(pages[pgoff]);
248
249 VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
250 pfn, pfn << PAGE_SHIFT);
251
252 ret = vm_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
253
254 out_unlock:
255 mutex_unlock(&msm_obj->lock);
256 out:
257 switch (ret) {
258 case -EAGAIN:
259 case 0:
260 case -ERESTARTSYS:
261 case -EINTR:
262 case -EBUSY:
263 /*
264 * EBUSY is ok: this just means that another thread
265 * already did the job.
266 */
267 return VM_FAULT_NOPAGE;
268 case -ENOMEM:
269 return VM_FAULT_OOM;
270 default:
271 return VM_FAULT_SIGBUS;
272 }
273 }
274
275 /** get mmap offset */
276 static uint64_t mmap_offset(struct drm_gem_object *obj)
277 {
278 struct drm_device *dev = obj->dev;
279 struct msm_gem_object *msm_obj = to_msm_bo(obj);
280 int ret;
281
282 WARN_ON(!mutex_is_locked(&msm_obj->lock));
283
284 /* Make it mmapable */
285 ret = drm_gem_create_mmap_offset(obj);
286
287 if (ret) {
288 dev_err(dev->dev, "could not allocate mmap offset\n");
289 return 0;
290 }
291
292 return drm_vma_node_offset_addr(&obj->vma_node);
293 }
294
295 uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
296 {
297 uint64_t offset;
298 struct msm_gem_object *msm_obj = to_msm_bo(obj);
299
300 mutex_lock(&msm_obj->lock);
301 offset = mmap_offset(obj);
302 mutex_unlock(&msm_obj->lock);
303 return offset;
304 }
305
306 static struct msm_gem_vma *add_vma(struct drm_gem_object *obj,
307 struct msm_gem_address_space *aspace)
308 {
309 struct msm_gem_object *msm_obj = to_msm_bo(obj);
310 struct msm_gem_vma *vma;
311
312 WARN_ON(!mutex_is_locked(&msm_obj->lock));
313
314 vma = kzalloc(sizeof(*vma), GFP_KERNEL);
315 if (!vma)
316 return ERR_PTR(-ENOMEM);
317
318 vma->aspace = aspace;
319
320 list_add_tail(&vma->list, &msm_obj->vmas);
321
322 return vma;
323 }
324
325 static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj,
326 struct msm_gem_address_space *aspace)
327 {
328 struct msm_gem_object *msm_obj = to_msm_bo(obj);
329 struct msm_gem_vma *vma;
330
331 WARN_ON(!mutex_is_locked(&msm_obj->lock));
332
333 list_for_each_entry(vma, &msm_obj->vmas, list) {
334 if (vma->aspace == aspace)
335 return vma;
336 }
337
338 return NULL;
339 }
340
341 static void del_vma(struct msm_gem_vma *vma)
342 {
343 if (!vma)
344 return;
345
346 list_del(&vma->list);
347 kfree(vma);
348 }
349
350 /* Called with msm_obj->lock locked */
351 static void
352 put_iova(struct drm_gem_object *obj)
353 {
354 struct msm_gem_object *msm_obj = to_msm_bo(obj);
355 struct msm_gem_vma *vma, *tmp;
356
357 WARN_ON(!mutex_is_locked(&msm_obj->lock));
358
359 list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) {
360 msm_gem_unmap_vma(vma->aspace, vma, msm_obj->sgt);
361 del_vma(vma);
362 }
363 }
364
365 /* get iova, taking a reference. Should have a matching put */
366 int msm_gem_get_iova(struct drm_gem_object *obj,
367 struct msm_gem_address_space *aspace, uint64_t *iova)
368 {
369 struct msm_gem_object *msm_obj = to_msm_bo(obj);
370 struct msm_gem_vma *vma;
371 int ret = 0;
372
373 mutex_lock(&msm_obj->lock);
374
375 if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
376 mutex_unlock(&msm_obj->lock);
377 return -EBUSY;
378 }
379
380 vma = lookup_vma(obj, aspace);
381
382 if (!vma) {
383 struct page **pages;
384
385 vma = add_vma(obj, aspace);
386 if (IS_ERR(vma))
387 return PTR_ERR(vma);
388
389 pages = get_pages(obj);
390 if (IS_ERR(pages)) {
391 ret = PTR_ERR(pages);
392 goto fail;
393 }
394
395 ret = msm_gem_map_vma(aspace, vma, msm_obj->sgt,
396 obj->size >> PAGE_SHIFT);
397 if (ret)
398 goto fail;
399 }
400
401 *iova = vma->iova;
402
403 mutex_unlock(&msm_obj->lock);
404 return 0;
405
406 fail:
407 del_vma(vma);
408
409 mutex_unlock(&msm_obj->lock);
410 return ret;
411 }
412
413 /* get iova without taking a reference, used in places where you have
414 * already done a 'msm_gem_get_iova()'.
415 */
416 uint64_t msm_gem_iova(struct drm_gem_object *obj,
417 struct msm_gem_address_space *aspace)
418 {
419 struct msm_gem_object *msm_obj = to_msm_bo(obj);
420 struct msm_gem_vma *vma;
421
422 mutex_lock(&msm_obj->lock);
423 vma = lookup_vma(obj, aspace);
424 mutex_unlock(&msm_obj->lock);
425 WARN_ON(!vma);
426
427 return vma ? vma->iova : 0;
428 }
429
430 void msm_gem_put_iova(struct drm_gem_object *obj,
431 struct msm_gem_address_space *aspace)
432 {
433 // XXX TODO ..
434 // NOTE: probably don't need a _locked() version.. we wouldn't
435 // normally unmap here, but instead just mark that it could be
436 // unmapped (if the iova refcnt drops to zero), but then later
437 // if another _get_iova_locked() fails we can start unmapping
438 // things that are no longer needed..
439 }
440
441 int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
442 struct drm_mode_create_dumb *args)
443 {
444 args->pitch = align_pitch(args->width, args->bpp);
445 args->size = PAGE_ALIGN(args->pitch * args->height);
446 return msm_gem_new_handle(dev, file, args->size,
447 MSM_BO_SCANOUT | MSM_BO_WC, &args->handle);
448 }
449
450 int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
451 uint32_t handle, uint64_t *offset)
452 {
453 struct drm_gem_object *obj;
454 int ret = 0;
455
456 /* GEM does all our handle to object mapping */
457 obj = drm_gem_object_lookup(file, handle);
458 if (obj == NULL) {
459 ret = -ENOENT;
460 goto fail;
461 }
462
463 *offset = msm_gem_mmap_offset(obj);
464
465 drm_gem_object_unreference_unlocked(obj);
466
467 fail:
468 return ret;
469 }
470
471 void *msm_gem_get_vaddr(struct drm_gem_object *obj)
472 {
473 struct msm_gem_object *msm_obj = to_msm_bo(obj);
474 int ret = 0;
475
476 mutex_lock(&msm_obj->lock);
477
478 if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
479 mutex_unlock(&msm_obj->lock);
480 return ERR_PTR(-EBUSY);
481 }
482
483 /* increment vmap_count *before* vmap() call, so shrinker can
484 * check vmap_count (is_vunmapable()) outside of msm_obj->lock.
485 * This guarantees that we won't try to msm_gem_vunmap() this
486 * same object from within the vmap() call (while we already
487 * hold msm_obj->lock)
488 */
489 msm_obj->vmap_count++;
490
491 if (!msm_obj->vaddr) {
492 struct page **pages = get_pages(obj);
493 if (IS_ERR(pages)) {
494 ret = PTR_ERR(pages);
495 goto fail;
496 }
497 msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
498 VM_MAP, pgprot_writecombine(PAGE_KERNEL));
499 if (msm_obj->vaddr == NULL) {
500 ret = -ENOMEM;
501 goto fail;
502 }
503 }
504
505 mutex_unlock(&msm_obj->lock);
506 return msm_obj->vaddr;
507
508 fail:
509 msm_obj->vmap_count--;
510 mutex_unlock(&msm_obj->lock);
511 return ERR_PTR(ret);
512 }
513
514 void msm_gem_put_vaddr(struct drm_gem_object *obj)
515 {
516 struct msm_gem_object *msm_obj = to_msm_bo(obj);
517
518 mutex_lock(&msm_obj->lock);
519 WARN_ON(msm_obj->vmap_count < 1);
520 msm_obj->vmap_count--;
521 mutex_unlock(&msm_obj->lock);
522 }
523
524 /* Update madvise status, returns true if not purged, else
525 * false or -errno.
526 */
527 int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
528 {
529 struct msm_gem_object *msm_obj = to_msm_bo(obj);
530
531 mutex_lock(&msm_obj->lock);
532
533 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
534
535 if (msm_obj->madv != __MSM_MADV_PURGED)
536 msm_obj->madv = madv;
537
538 madv = msm_obj->madv;
539
540 mutex_unlock(&msm_obj->lock);
541
542 return (madv != __MSM_MADV_PURGED);
543 }
544
545 void msm_gem_purge(struct drm_gem_object *obj, enum msm_gem_lock subclass)
546 {
547 struct drm_device *dev = obj->dev;
548 struct msm_gem_object *msm_obj = to_msm_bo(obj);
549
550 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
551 WARN_ON(!is_purgeable(msm_obj));
552 WARN_ON(obj->import_attach);
553
554 mutex_lock_nested(&msm_obj->lock, subclass);
555
556 put_iova(obj);
557
558 msm_gem_vunmap_locked(obj);
559
560 put_pages(obj);
561
562 msm_obj->madv = __MSM_MADV_PURGED;
563
564 drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
565 drm_gem_free_mmap_offset(obj);
566
567 /* Our goal here is to return as much of the memory as
568 * is possible back to the system as we are called from OOM.
569 * To do this we must instruct the shmfs to drop all of its
570 * backing pages, *now*.
571 */
572 shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
573
574 invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
575 0, (loff_t)-1);
576
577 mutex_unlock(&msm_obj->lock);
578 }
579
580 static void msm_gem_vunmap_locked(struct drm_gem_object *obj)
581 {
582 struct msm_gem_object *msm_obj = to_msm_bo(obj);
583
584 WARN_ON(!mutex_is_locked(&msm_obj->lock));
585
586 if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj)))
587 return;
588
589 vunmap(msm_obj->vaddr);
590 msm_obj->vaddr = NULL;
591 }
592
593 void msm_gem_vunmap(struct drm_gem_object *obj, enum msm_gem_lock subclass)
594 {
595 struct msm_gem_object *msm_obj = to_msm_bo(obj);
596
597 mutex_lock_nested(&msm_obj->lock, subclass);
598 msm_gem_vunmap_locked(obj);
599 mutex_unlock(&msm_obj->lock);
600 }
601
602 /* must be called before _move_to_active().. */
603 int msm_gem_sync_object(struct drm_gem_object *obj,
604 struct msm_fence_context *fctx, bool exclusive)
605 {
606 struct msm_gem_object *msm_obj = to_msm_bo(obj);
607 struct reservation_object_list *fobj;
608 struct dma_fence *fence;
609 int i, ret;
610
611 if (!exclusive) {
612 /* NOTE: _reserve_shared() must happen before _add_shared_fence(),
613 * which makes this a slightly strange place to call it. OTOH this
614 * is a convenient can-fail point to hook it in. (And similar to
615 * how etnaviv and nouveau handle this.)
616 */
617 ret = reservation_object_reserve_shared(msm_obj->resv);
618 if (ret)
619 return ret;
620 }
621
622 fobj = reservation_object_get_list(msm_obj->resv);
623 if (!fobj || (fobj->shared_count == 0)) {
624 fence = reservation_object_get_excl(msm_obj->resv);
625 /* don't need to wait on our own fences, since ring is fifo */
626 if (fence && (fence->context != fctx->context)) {
627 ret = dma_fence_wait(fence, true);
628 if (ret)
629 return ret;
630 }
631 }
632
633 if (!exclusive || !fobj)
634 return 0;
635
636 for (i = 0; i < fobj->shared_count; i++) {
637 fence = rcu_dereference_protected(fobj->shared[i],
638 reservation_object_held(msm_obj->resv));
639 if (fence->context != fctx->context) {
640 ret = dma_fence_wait(fence, true);
641 if (ret)
642 return ret;
643 }
644 }
645
646 return 0;
647 }
648
649 void msm_gem_move_to_active(struct drm_gem_object *obj,
650 struct msm_gpu *gpu, bool exclusive, struct dma_fence *fence)
651 {
652 struct msm_gem_object *msm_obj = to_msm_bo(obj);
653 WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
654 msm_obj->gpu = gpu;
655 if (exclusive)
656 reservation_object_add_excl_fence(msm_obj->resv, fence);
657 else
658 reservation_object_add_shared_fence(msm_obj->resv, fence);
659 list_del_init(&msm_obj->mm_list);
660 list_add_tail(&msm_obj->mm_list, &gpu->active_list);
661 }
662
663 void msm_gem_move_to_inactive(struct drm_gem_object *obj)
664 {
665 struct drm_device *dev = obj->dev;
666 struct msm_drm_private *priv = dev->dev_private;
667 struct msm_gem_object *msm_obj = to_msm_bo(obj);
668
669 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
670
671 msm_obj->gpu = NULL;
672 list_del_init(&msm_obj->mm_list);
673 list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
674 }
675
676 int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
677 {
678 struct msm_gem_object *msm_obj = to_msm_bo(obj);
679 bool write = !!(op & MSM_PREP_WRITE);
680 unsigned long remain =
681 op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
682 long ret;
683
684 ret = reservation_object_wait_timeout_rcu(msm_obj->resv, write,
685 true, remain);
686 if (ret == 0)
687 return remain == 0 ? -EBUSY : -ETIMEDOUT;
688 else if (ret < 0)
689 return ret;
690
691 /* TODO cache maintenance */
692
693 return 0;
694 }
695
696 int msm_gem_cpu_fini(struct drm_gem_object *obj)
697 {
698 /* TODO cache maintenance */
699 return 0;
700 }
701
702 #ifdef CONFIG_DEBUG_FS
703 static void describe_fence(struct dma_fence *fence, const char *type,
704 struct seq_file *m)
705 {
706 if (!dma_fence_is_signaled(fence))
707 seq_printf(m, "\t%9s: %s %s seq %u\n", type,
708 fence->ops->get_driver_name(fence),
709 fence->ops->get_timeline_name(fence),
710 fence->seqno);
711 }
712
713 void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
714 {
715 struct msm_gem_object *msm_obj = to_msm_bo(obj);
716 struct reservation_object *robj = msm_obj->resv;
717 struct reservation_object_list *fobj;
718 struct dma_fence *fence;
719 struct msm_gem_vma *vma;
720 uint64_t off = drm_vma_node_start(&obj->vma_node);
721 const char *madv;
722
723 mutex_lock(&msm_obj->lock);
724
725 switch (msm_obj->madv) {
726 case __MSM_MADV_PURGED:
727 madv = " purged";
728 break;
729 case MSM_MADV_DONTNEED:
730 madv = " purgeable";
731 break;
732 case MSM_MADV_WILLNEED:
733 default:
734 madv = "";
735 break;
736 }
737
738 seq_printf(m, "%08x: %c %2d (%2d) %08llx %p\t",
739 msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
740 obj->name, kref_read(&obj->refcount),
741 off, msm_obj->vaddr);
742
743 /* FIXME: we need to print the address space here too */
744 list_for_each_entry(vma, &msm_obj->vmas, list)
745 seq_printf(m, " %08llx", vma->iova);
746
747 seq_printf(m, " %zu%s\n", obj->size, madv);
748
749 rcu_read_lock();
750 fobj = rcu_dereference(robj->fence);
751 if (fobj) {
752 unsigned int i, shared_count = fobj->shared_count;
753
754 for (i = 0; i < shared_count; i++) {
755 fence = rcu_dereference(fobj->shared[i]);
756 describe_fence(fence, "Shared", m);
757 }
758 }
759
760 fence = rcu_dereference(robj->fence_excl);
761 if (fence)
762 describe_fence(fence, "Exclusive", m);
763 rcu_read_unlock();
764
765 mutex_unlock(&msm_obj->lock);
766 }
767
768 void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
769 {
770 struct msm_gem_object *msm_obj;
771 int count = 0;
772 size_t size = 0;
773
774 list_for_each_entry(msm_obj, list, mm_list) {
775 struct drm_gem_object *obj = &msm_obj->base;
776 seq_printf(m, " ");
777 msm_gem_describe(obj, m);
778 count++;
779 size += obj->size;
780 }
781
782 seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
783 }
784 #endif
785
786 void msm_gem_free_object(struct drm_gem_object *obj)
787 {
788 struct drm_device *dev = obj->dev;
789 struct msm_gem_object *msm_obj = to_msm_bo(obj);
790
791 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
792
793 /* object should not be on active list: */
794 WARN_ON(is_active(msm_obj));
795
796 list_del(&msm_obj->mm_list);
797
798 mutex_lock(&msm_obj->lock);
799
800 put_iova(obj);
801
802 if (obj->import_attach) {
803 if (msm_obj->vaddr)
804 dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr);
805
806 /* Don't drop the pages for imported dmabuf, as they are not
807 * ours, just free the array we allocated:
808 */
809 if (msm_obj->pages)
810 kvfree(msm_obj->pages);
811
812 drm_prime_gem_destroy(obj, msm_obj->sgt);
813 } else {
814 msm_gem_vunmap_locked(obj);
815 put_pages(obj);
816 }
817
818 if (msm_obj->resv == &msm_obj->_resv)
819 reservation_object_fini(msm_obj->resv);
820
821 drm_gem_object_release(obj);
822
823 mutex_unlock(&msm_obj->lock);
824 kfree(msm_obj);
825 }
826
827 /* convenience method to construct a GEM buffer object, and userspace handle */
828 int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
829 uint32_t size, uint32_t flags, uint32_t *handle)
830 {
831 struct drm_gem_object *obj;
832 int ret;
833
834 obj = msm_gem_new(dev, size, flags);
835
836 if (IS_ERR(obj))
837 return PTR_ERR(obj);
838
839 ret = drm_gem_handle_create(file, obj, handle);
840
841 /* drop reference from allocate - handle holds it now */
842 drm_gem_object_unreference_unlocked(obj);
843
844 return ret;
845 }
846
847 static int msm_gem_new_impl(struct drm_device *dev,
848 uint32_t size, uint32_t flags,
849 struct reservation_object *resv,
850 struct drm_gem_object **obj,
851 bool struct_mutex_locked)
852 {
853 struct msm_drm_private *priv = dev->dev_private;
854 struct msm_gem_object *msm_obj;
855
856 switch (flags & MSM_BO_CACHE_MASK) {
857 case MSM_BO_UNCACHED:
858 case MSM_BO_CACHED:
859 case MSM_BO_WC:
860 break;
861 default:
862 dev_err(dev->dev, "invalid cache flag: %x\n",
863 (flags & MSM_BO_CACHE_MASK));
864 return -EINVAL;
865 }
866
867 msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
868 if (!msm_obj)
869 return -ENOMEM;
870
871 mutex_init(&msm_obj->lock);
872
873 msm_obj->flags = flags;
874 msm_obj->madv = MSM_MADV_WILLNEED;
875
876 if (resv) {
877 msm_obj->resv = resv;
878 } else {
879 msm_obj->resv = &msm_obj->_resv;
880 reservation_object_init(msm_obj->resv);
881 }
882
883 INIT_LIST_HEAD(&msm_obj->submit_entry);
884 INIT_LIST_HEAD(&msm_obj->vmas);
885
886 if (struct_mutex_locked) {
887 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
888 list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
889 } else {
890 mutex_lock(&dev->struct_mutex);
891 list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
892 mutex_unlock(&dev->struct_mutex);
893 }
894
895 *obj = &msm_obj->base;
896
897 return 0;
898 }
899
900 static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
901 uint32_t size, uint32_t flags, bool struct_mutex_locked)
902 {
903 struct msm_drm_private *priv = dev->dev_private;
904 struct drm_gem_object *obj = NULL;
905 bool use_vram = false;
906 int ret;
907
908 size = PAGE_ALIGN(size);
909
910 if (!iommu_present(&platform_bus_type))
911 use_vram = true;
912 else if ((flags & MSM_BO_STOLEN) && priv->vram.size)
913 use_vram = true;
914
915 if (WARN_ON(use_vram && !priv->vram.size))
916 return ERR_PTR(-EINVAL);
917
918 /* Disallow zero sized objects as they make the underlying
919 * infrastructure grumpy
920 */
921 if (size == 0)
922 return ERR_PTR(-EINVAL);
923
924 ret = msm_gem_new_impl(dev, size, flags, NULL, &obj, struct_mutex_locked);
925 if (ret)
926 goto fail;
927
928 if (use_vram) {
929 struct msm_gem_vma *vma;
930 struct page **pages;
931
932 vma = add_vma(obj, NULL);
933 if (IS_ERR(vma)) {
934 ret = PTR_ERR(vma);
935 goto fail;
936 }
937
938 to_msm_bo(obj)->vram_node = &vma->node;
939
940 drm_gem_private_object_init(dev, obj, size);
941
942 pages = get_pages(obj);
943 if (IS_ERR(pages)) {
944 ret = PTR_ERR(pages);
945 goto fail;
946 }
947
948 vma->iova = physaddr(obj);
949 } else {
950 ret = drm_gem_object_init(dev, obj, size);
951 if (ret)
952 goto fail;
953 }
954
955 return obj;
956
957 fail:
958 drm_gem_object_unreference_unlocked(obj);
959 return ERR_PTR(ret);
960 }
961
962 struct drm_gem_object *msm_gem_new_locked(struct drm_device *dev,
963 uint32_t size, uint32_t flags)
964 {
965 return _msm_gem_new(dev, size, flags, true);
966 }
967
968 struct drm_gem_object *msm_gem_new(struct drm_device *dev,
969 uint32_t size, uint32_t flags)
970 {
971 return _msm_gem_new(dev, size, flags, false);
972 }
973
974 struct drm_gem_object *msm_gem_import(struct drm_device *dev,
975 struct dma_buf *dmabuf, struct sg_table *sgt)
976 {
977 struct msm_gem_object *msm_obj;
978 struct drm_gem_object *obj;
979 uint32_t size;
980 int ret, npages;
981
982 /* if we don't have IOMMU, don't bother pretending we can import: */
983 if (!iommu_present(&platform_bus_type)) {
984 dev_err(dev->dev, "cannot import without IOMMU\n");
985 return ERR_PTR(-EINVAL);
986 }
987
988 size = PAGE_ALIGN(dmabuf->size);
989
990 ret = msm_gem_new_impl(dev, size, MSM_BO_WC, dmabuf->resv, &obj, false);
991 if (ret)
992 goto fail;
993
994 drm_gem_private_object_init(dev, obj, size);
995
996 npages = size / PAGE_SIZE;
997
998 msm_obj = to_msm_bo(obj);
999 mutex_lock(&msm_obj->lock);
1000 msm_obj->sgt = sgt;
1001 msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
1002 if (!msm_obj->pages) {
1003 mutex_unlock(&msm_obj->lock);
1004 ret = -ENOMEM;
1005 goto fail;
1006 }
1007
1008 ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages);
1009 if (ret) {
1010 mutex_unlock(&msm_obj->lock);
1011 goto fail;
1012 }
1013
1014 mutex_unlock(&msm_obj->lock);
1015 return obj;
1016
1017 fail:
1018 drm_gem_object_unreference_unlocked(obj);
1019 return ERR_PTR(ret);
1020 }