]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/gpu/drm/omapdrm/omap_gem.c
drm: omapdrm: gem: Mask out private flags passed from userspace
[mirror_ubuntu-artful-kernel.git] / drivers / gpu / drm / omapdrm / omap_gem.c
CommitLineData
cd5351f4 1/*
8bb0daff 2 * drivers/gpu/drm/omapdrm/omap_gem.c
cd5351f4
RC
3 *
4 * Copyright (C) 2011 Texas Instruments
5 * Author: Rob Clark <rob.clark@linaro.org>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as published by
9 * the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
cd5351f4 20#include <linux/shmem_fs.h>
2d278f54
LP
21#include <linux/spinlock.h>
22
0de23977 23#include <drm/drm_vma_manager.h>
cd5351f4
RC
24
25#include "omap_drv.h"
f7f9f453 26#include "omap_dmm_tiler.h"
cd5351f4 27
cd5351f4
RC
28/*
29 * GEM buffer object implementation.
30 */
31
cd5351f4 32/* note: we use upper 8 bits of flags for driver-internal flags: */
7ef93b0a 33#define OMAP_BO_DMA 0x01000000 /* actually is physically contiguous */
cd5351f4
RC
34#define OMAP_BO_EXT_SYNC 0x02000000 /* externally allocated sync object */
35#define OMAP_BO_EXT_MEM 0x04000000 /* externally allocated memory */
36
cd5351f4
RC
37struct omap_gem_object {
38 struct drm_gem_object base;
39
f6b6036e
RC
40 struct list_head mm_list;
41
cd5351f4
RC
42 uint32_t flags;
43
f7f9f453
RC
44 /** width/height for tiled formats (rounded up to slot boundaries) */
45 uint16_t width, height;
46
a6a91827
RC
47 /** roll applied when mapping to DMM */
48 uint32_t roll;
49
cd5351f4
RC
50 /**
51 * If buffer is allocated physically contiguous, the OMAP_BO_DMA flag
f7f9f453
RC
52 * is set and the paddr is valid. Also if the buffer is remapped in
53 * TILER and paddr_cnt > 0, then paddr is valid. But if you are using
54 * the physical address and OMAP_BO_DMA is not set, then you should
55 * be going thru omap_gem_{get,put}_paddr() to ensure the mapping is
56 * not removed from under your feet.
cd5351f4
RC
57 *
58 * Note that OMAP_BO_SCANOUT is a hint from userspace that DMA capable
59 * buffer is requested, but doesn't mean that it is. Use the
60 * OMAP_BO_DMA flag to determine if the buffer has a DMA capable
61 * physical address.
62 */
63 dma_addr_t paddr;
64
f7f9f453
RC
65 /**
66 * # of users of paddr
67 */
68 uint32_t paddr_cnt;
69
70 /**
71 * tiler block used when buffer is remapped in DMM/TILER.
72 */
73 struct tiler_block *block;
74
cd5351f4
RC
75 /**
76 * Array of backing pages, if allocated. Note that pages are never
77 * allocated for buffers originally allocated from contiguous memory
78 */
79 struct page **pages;
80
f3bc9d24
RC
81 /** addresses corresponding to pages in above array */
82 dma_addr_t *addrs;
83
cd5351f4
RC
84 /**
85 * Virtual address, if mapped.
86 */
87 void *vaddr;
88
89 /**
90 * sync-object allocated on demand (if needed)
91 *
92 * Per-buffer sync-object for tracking pending and completed hw/dma
93 * read and write operations. The layout in memory is dictated by
94 * the SGX firmware, which uses this information to stall the command
95 * stream if a surface is not ready yet.
96 *
97 * Note that when buffer is used by SGX, the sync-object needs to be
98 * allocated from a special heap of sync-objects. This way many sync
99 * objects can be packed in a page, and not waste GPU virtual address
100 * space. Because of this we have to have a omap_gem_set_sync_object()
101 * API to allow replacement of the syncobj after it has (potentially)
102 * already been allocated. A bit ugly but I haven't thought of a
103 * better alternative.
104 */
105 struct {
106 uint32_t write_pending;
107 uint32_t write_complete;
108 uint32_t read_pending;
109 uint32_t read_complete;
110 } *sync;
111};
112
7ef93b0a 113#define to_omap_bo(x) container_of(x, struct omap_gem_object, base)
c5b1247b 114
f7f9f453
RC
115/* To deal with userspace mmap'ings of 2d tiled buffers, which (a) are
116 * not necessarily pinned in TILER all the time, and (b) when they are
117 * they are not necessarily page aligned, we reserve one or more small
118 * regions in each of the 2d containers to use as a user-GART where we
119 * can create a second page-aligned mapping of parts of the buffer
120 * being accessed from userspace.
121 *
122 * Note that we could optimize slightly when we know that multiple
123 * tiler containers are backed by the same PAT.. but I'll leave that
124 * for later..
125 */
126#define NUM_USERGART_ENTRIES 2
f4302747 127struct omap_drm_usergart_entry {
f7f9f453
RC
128 struct tiler_block *block; /* the reserved tiler block */
129 dma_addr_t paddr;
130 struct drm_gem_object *obj; /* the current pinned obj */
131 pgoff_t obj_pgoff; /* page offset of obj currently
132 mapped in */
133};
f4302747
LP
134
135struct omap_drm_usergart {
136 struct omap_drm_usergart_entry entry[NUM_USERGART_ENTRIES];
f7f9f453
RC
137 int height; /* height in rows */
138 int height_shift; /* ilog2(height in rows) */
139 int slot_shift; /* ilog2(width per slot) */
140 int stride_pfn; /* stride in pages */
141 int last; /* index of last used entry */
f4302747 142};
f7f9f453 143
b902f8f4
LP
144/* -----------------------------------------------------------------------------
145 * Helpers
146 */
147
148/** get mmap offset */
149static uint64_t mmap_offset(struct drm_gem_object *obj)
150{
151 struct drm_device *dev = obj->dev;
152 int ret;
153 size_t size;
154
155 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
156
157 /* Make it mmapable */
158 size = omap_gem_mmap_size(obj);
159 ret = drm_gem_create_mmap_offset_size(obj, size);
160 if (ret) {
161 dev_err(dev->dev, "could not allocate mmap offset\n");
162 return 0;
163 }
164
165 return drm_vma_node_offset_addr(&obj->vma_node);
166}
167
7ef93b0a
LP
168/* GEM objects can either be allocated from contiguous memory (in which
169 * case obj->filp==NULL), or w/ shmem backing (obj->filp!=NULL). But non
170 * contiguous buffers can be remapped in TILER/DMM if they need to be
171 * contiguous... but we don't do this all the time to reduce pressure
172 * on TILER/DMM space when we know at allocation time that the buffer
173 * will need to be scanned out.
174 */
175static inline bool is_shmem(struct drm_gem_object *obj)
176{
177 return obj->filp != NULL;
178}
179
180/* -----------------------------------------------------------------------------
181 * Eviction
182 */
183
f7f9f453 184static void evict_entry(struct drm_gem_object *obj,
f4302747 185 enum tiler_fmt fmt, struct omap_drm_usergart_entry *entry)
f7f9f453 186{
6796cb16 187 struct omap_gem_object *omap_obj = to_omap_bo(obj);
f4302747
LP
188 struct omap_drm_private *priv = obj->dev->dev_private;
189 int n = priv->usergart[fmt].height;
6796cb16
DH
190 size_t size = PAGE_SIZE * n;
191 loff_t off = mmap_offset(obj) +
192 (entry->obj_pgoff << PAGE_SHIFT);
193 const int m = 1 + ((omap_obj->width << fmt) / PAGE_SIZE);
194
195 if (m > 1) {
196 int i;
197 /* if stride > than PAGE_SIZE then sparse mapping: */
198 for (i = n; i > 0; i--) {
199 unmap_mapping_range(obj->dev->anon_inode->i_mapping,
200 off, PAGE_SIZE, 1);
201 off += PAGE_SIZE * m;
e559895a 202 }
6796cb16
DH
203 } else {
204 unmap_mapping_range(obj->dev->anon_inode->i_mapping,
205 off, size, 1);
f7f9f453
RC
206 }
207
208 entry->obj = NULL;
209}
210
211/* Evict a buffer from usergart, if it is mapped there */
212static void evict(struct drm_gem_object *obj)
213{
214 struct omap_gem_object *omap_obj = to_omap_bo(obj);
f4302747 215 struct omap_drm_private *priv = obj->dev->dev_private;
f7f9f453
RC
216
217 if (omap_obj->flags & OMAP_BO_TILED) {
218 enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
219 int i;
220
f4302747 221 if (!priv->usergart)
f7f9f453
RC
222 return;
223
224 for (i = 0; i < NUM_USERGART_ENTRIES; i++) {
f4302747
LP
225 struct omap_drm_usergart_entry *entry =
226 &priv->usergart[fmt].entry[i];
227
f7f9f453
RC
228 if (entry->obj == obj)
229 evict_entry(obj, fmt, entry);
230 }
231 }
232}
233
7ef93b0a
LP
234/* -----------------------------------------------------------------------------
235 * Page Management
8b6b569e 236 */
cd5351f4
RC
237
238/** ensure backing pages are allocated */
239static int omap_gem_attach_pages(struct drm_gem_object *obj)
240{
8b6b569e 241 struct drm_device *dev = obj->dev;
cd5351f4
RC
242 struct omap_gem_object *omap_obj = to_omap_bo(obj);
243 struct page **pages;
d4eb23a9
EG
244 int npages = obj->size >> PAGE_SHIFT;
245 int i, ret;
8b6b569e 246 dma_addr_t *addrs;
cd5351f4
RC
247
248 WARN_ON(omap_obj->pages);
249
0cdbe8ac 250 pages = drm_gem_get_pages(obj);
cd5351f4
RC
251 if (IS_ERR(pages)) {
252 dev_err(obj->dev->dev, "could not get pages: %ld\n", PTR_ERR(pages));
253 return PTR_ERR(pages);
254 }
255
f3bc9d24
RC
256 /* for non-cached buffers, ensure the new pages are clean because
257 * DSS, GPU, etc. are not cache coherent:
258 */
259 if (omap_obj->flags & (OMAP_BO_WC|OMAP_BO_UNCACHED)) {
23d84ed9 260 addrs = kmalloc(npages * sizeof(*addrs), GFP_KERNEL);
d4eb23a9
EG
261 if (!addrs) {
262 ret = -ENOMEM;
263 goto free_pages;
264 }
265
f3bc9d24 266 for (i = 0; i < npages; i++) {
8b6b569e 267 addrs[i] = dma_map_page(dev->dev, pages[i],
f3bc9d24
RC
268 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
269 }
8b6b569e 270 } else {
23d84ed9 271 addrs = kzalloc(npages * sizeof(*addrs), GFP_KERNEL);
d4eb23a9
EG
272 if (!addrs) {
273 ret = -ENOMEM;
274 goto free_pages;
275 }
f3bc9d24
RC
276 }
277
8b6b569e 278 omap_obj->addrs = addrs;
cd5351f4 279 omap_obj->pages = pages;
8b6b569e 280
cd5351f4 281 return 0;
d4eb23a9
EG
282
283free_pages:
ddcd09d6 284 drm_gem_put_pages(obj, pages, true, false);
d4eb23a9
EG
285
286 return ret;
cd5351f4
RC
287}
288
b902f8f4
LP
289/* acquire pages when needed (for example, for DMA where physically
290 * contiguous buffer is not required
291 */
292static int get_pages(struct drm_gem_object *obj, struct page ***pages)
293{
294 struct omap_gem_object *omap_obj = to_omap_bo(obj);
295 int ret = 0;
296
297 if (is_shmem(obj) && !omap_obj->pages) {
298 ret = omap_gem_attach_pages(obj);
299 if (ret) {
300 dev_err(obj->dev->dev, "could not attach pages\n");
301 return ret;
302 }
303 }
304
305 /* TODO: even phys-contig.. we should have a list of pages? */
306 *pages = omap_obj->pages;
307
308 return 0;
309}
310
cd5351f4
RC
311/** release backing pages */
312static void omap_gem_detach_pages(struct drm_gem_object *obj)
313{
314 struct omap_gem_object *omap_obj = to_omap_bo(obj);
f3bc9d24
RC
315
316 /* for non-cached buffers, ensure the new pages are clean because
317 * DSS, GPU, etc. are not cache coherent:
318 */
319 if (omap_obj->flags & (OMAP_BO_WC|OMAP_BO_UNCACHED)) {
320 int i, npages = obj->size >> PAGE_SHIFT;
321 for (i = 0; i < npages; i++) {
322 dma_unmap_page(obj->dev->dev, omap_obj->addrs[i],
323 PAGE_SIZE, DMA_BIDIRECTIONAL);
324 }
f3bc9d24
RC
325 }
326
8b6b569e
RC
327 kfree(omap_obj->addrs);
328 omap_obj->addrs = NULL;
329
ddcd09d6 330 drm_gem_put_pages(obj, omap_obj->pages, true, false);
cd5351f4
RC
331 omap_obj->pages = NULL;
332}
333
6ad11bc3
RC
334/* get buffer flags */
335uint32_t omap_gem_flags(struct drm_gem_object *obj)
336{
337 return to_omap_bo(obj)->flags;
338}
339
c5b1247b
RC
340uint64_t omap_gem_mmap_offset(struct drm_gem_object *obj)
341{
342 uint64_t offset;
343 mutex_lock(&obj->dev->struct_mutex);
344 offset = mmap_offset(obj);
345 mutex_unlock(&obj->dev->struct_mutex);
346 return offset;
347}
348
f7f9f453
RC
349/** get mmap size */
350size_t omap_gem_mmap_size(struct drm_gem_object *obj)
351{
352 struct omap_gem_object *omap_obj = to_omap_bo(obj);
353 size_t size = obj->size;
354
355 if (omap_obj->flags & OMAP_BO_TILED) {
356 /* for tiled buffers, the virtual size has stride rounded up
357 * to 4kb.. (to hide the fact that row n+1 might start 16kb or
358 * 32kb later!). But we don't back the entire buffer with
359 * pages, only the valid picture part.. so need to adjust for
360 * this in the size used to mmap and generate mmap offset
361 */
362 size = tiler_vsize(gem2fmt(omap_obj->flags),
363 omap_obj->width, omap_obj->height);
364 }
365
366 return size;
367}
368
3c810c61
RC
369/* get tiled size, returns -EINVAL if not tiled buffer */
370int omap_gem_tiled_size(struct drm_gem_object *obj, uint16_t *w, uint16_t *h)
371{
372 struct omap_gem_object *omap_obj = to_omap_bo(obj);
373 if (omap_obj->flags & OMAP_BO_TILED) {
374 *w = omap_obj->width;
375 *h = omap_obj->height;
376 return 0;
377 }
378 return -EINVAL;
379}
f7f9f453 380
7ef93b0a
LP
381/* -----------------------------------------------------------------------------
382 * Fault Handling
383 */
384
f7f9f453
RC
385/* Normal handling for the case of faulting in non-tiled buffers */
386static int fault_1d(struct drm_gem_object *obj,
387 struct vm_area_struct *vma, struct vm_fault *vmf)
388{
389 struct omap_gem_object *omap_obj = to_omap_bo(obj);
390 unsigned long pfn;
391 pgoff_t pgoff;
392
393 /* We don't use vmf->pgoff since that has the fake offset: */
394 pgoff = ((unsigned long)vmf->virtual_address -
395 vma->vm_start) >> PAGE_SHIFT;
396
397 if (omap_obj->pages) {
8b6b569e 398 omap_gem_cpu_sync(obj, pgoff);
f7f9f453
RC
399 pfn = page_to_pfn(omap_obj->pages[pgoff]);
400 } else {
401 BUG_ON(!(omap_obj->flags & OMAP_BO_DMA));
402 pfn = (omap_obj->paddr >> PAGE_SHIFT) + pgoff;
403 }
404
405 VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
406 pfn, pfn << PAGE_SHIFT);
407
408 return vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn);
409}
410
411/* Special handling for the case of faulting in 2d tiled buffers */
412static int fault_2d(struct drm_gem_object *obj,
413 struct vm_area_struct *vma, struct vm_fault *vmf)
414{
415 struct omap_gem_object *omap_obj = to_omap_bo(obj);
f4302747
LP
416 struct omap_drm_private *priv = obj->dev->dev_private;
417 struct omap_drm_usergart_entry *entry;
f7f9f453
RC
418 enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
419 struct page *pages[64]; /* XXX is this too much to have on stack? */
420 unsigned long pfn;
421 pgoff_t pgoff, base_pgoff;
422 void __user *vaddr;
423 int i, ret, slots;
424
e559895a
RC
425 /*
426 * Note the height of the slot is also equal to the number of pages
427 * that need to be mapped in to fill 4kb wide CPU page. If the slot
428 * height is 64, then 64 pages fill a 4kb wide by 64 row region.
429 */
f4302747
LP
430 const int n = priv->usergart[fmt].height;
431 const int n_shift = priv->usergart[fmt].height_shift;
e559895a
RC
432
433 /*
434 * If buffer width in bytes > PAGE_SIZE then the virtual stride is
435 * rounded up to next multiple of PAGE_SIZE.. this need to be taken
436 * into account in some of the math, so figure out virtual stride
437 * in pages
f7f9f453 438 */
e559895a 439 const int m = 1 + ((omap_obj->width << fmt) / PAGE_SIZE);
f7f9f453
RC
440
441 /* We don't use vmf->pgoff since that has the fake offset: */
442 pgoff = ((unsigned long)vmf->virtual_address -
443 vma->vm_start) >> PAGE_SHIFT;
444
e559895a
RC
445 /*
446 * Actual address we start mapping at is rounded down to previous slot
f7f9f453
RC
447 * boundary in the y direction:
448 */
e559895a 449 base_pgoff = round_down(pgoff, m << n_shift);
f7f9f453 450
e559895a 451 /* figure out buffer width in slots */
f4302747 452 slots = omap_obj->width >> priv->usergart[fmt].slot_shift;
f7f9f453 453
e559895a
RC
454 vaddr = vmf->virtual_address - ((pgoff - base_pgoff) << PAGE_SHIFT);
455
f4302747 456 entry = &priv->usergart[fmt].entry[priv->usergart[fmt].last];
e559895a 457
f7f9f453
RC
458 /* evict previous buffer using this usergart entry, if any: */
459 if (entry->obj)
460 evict_entry(entry->obj, fmt, entry);
461
462 entry->obj = obj;
463 entry->obj_pgoff = base_pgoff;
464
e559895a
RC
465 /* now convert base_pgoff to phys offset from virt offset: */
466 base_pgoff = (base_pgoff >> n_shift) * slots;
467
468 /* for wider-than 4k.. figure out which part of the slot-row we want: */
469 if (m > 1) {
470 int off = pgoff % m;
471 entry->obj_pgoff += off;
472 base_pgoff /= m;
473 slots = min(slots - (off << n_shift), n);
474 base_pgoff += off << n_shift;
475 vaddr += off << PAGE_SHIFT;
476 }
477
478 /*
479 * Map in pages. Beyond the valid pixel part of the buffer, we set
480 * pages[i] to NULL to get a dummy page mapped in.. if someone
481 * reads/writes it they will get random/undefined content, but at
482 * least it won't be corrupting whatever other random page used to
483 * be mapped in, or other undefined behavior.
f7f9f453
RC
484 */
485 memcpy(pages, &omap_obj->pages[base_pgoff],
486 sizeof(struct page *) * slots);
487 memset(pages + slots, 0,
e559895a 488 sizeof(struct page *) * (n - slots));
f7f9f453 489
a6a91827 490 ret = tiler_pin(entry->block, pages, ARRAY_SIZE(pages), 0, true);
f7f9f453
RC
491 if (ret) {
492 dev_err(obj->dev->dev, "failed to pin: %d\n", ret);
493 return ret;
494 }
495
f7f9f453
RC
496 pfn = entry->paddr >> PAGE_SHIFT;
497
498 VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
499 pfn, pfn << PAGE_SHIFT);
500
e559895a 501 for (i = n; i > 0; i--) {
f7f9f453 502 vm_insert_mixed(vma, (unsigned long)vaddr, pfn);
f4302747 503 pfn += priv->usergart[fmt].stride_pfn;
e559895a 504 vaddr += PAGE_SIZE * m;
f7f9f453
RC
505 }
506
507 /* simple round-robin: */
f4302747
LP
508 priv->usergart[fmt].last = (priv->usergart[fmt].last + 1)
509 % NUM_USERGART_ENTRIES;
f7f9f453
RC
510
511 return 0;
512}
513
cd5351f4
RC
514/**
515 * omap_gem_fault - pagefault handler for GEM objects
516 * @vma: the VMA of the GEM object
517 * @vmf: fault detail
518 *
519 * Invoked when a fault occurs on an mmap of a GEM managed area. GEM
520 * does most of the work for us including the actual map/unmap calls
521 * but we need to do the actual page work.
522 *
523 * The VMA was set up by GEM. In doing so it also ensured that the
524 * vma->vm_private_data points to the GEM object that is backing this
525 * mapping.
526 */
527int omap_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
528{
529 struct drm_gem_object *obj = vma->vm_private_data;
530 struct omap_gem_object *omap_obj = to_omap_bo(obj);
531 struct drm_device *dev = obj->dev;
532 struct page **pages;
cd5351f4
RC
533 int ret;
534
535 /* Make sure we don't parallel update on a fault, nor move or remove
536 * something from beneath our feet
537 */
538 mutex_lock(&dev->struct_mutex);
539
540 /* if a shmem backed object, make sure we have pages attached now */
541 ret = get_pages(obj, &pages);
ae053039 542 if (ret)
cd5351f4 543 goto fail;
cd5351f4
RC
544
545 /* where should we do corresponding put_pages().. we are mapping
546 * the original page, rather than thru a GART, so we can't rely
547 * on eviction to trigger this. But munmap() or all mappings should
548 * probably trigger put_pages()?
549 */
550
f7f9f453
RC
551 if (omap_obj->flags & OMAP_BO_TILED)
552 ret = fault_2d(obj, vma, vmf);
553 else
554 ret = fault_1d(obj, vma, vmf);
cd5351f4 555
cd5351f4
RC
556
557fail:
558 mutex_unlock(&dev->struct_mutex);
559 switch (ret) {
560 case 0:
561 case -ERESTARTSYS:
562 case -EINTR:
563 return VM_FAULT_NOPAGE;
564 case -ENOMEM:
565 return VM_FAULT_OOM;
566 default:
567 return VM_FAULT_SIGBUS;
568 }
569}
570
571/** We override mainly to fix up some of the vm mapping flags.. */
572int omap_gem_mmap(struct file *filp, struct vm_area_struct *vma)
573{
cd5351f4
RC
574 int ret;
575
576 ret = drm_gem_mmap(filp, vma);
577 if (ret) {
578 DBG("mmap failed: %d", ret);
579 return ret;
580 }
581
8b6b569e
RC
582 return omap_gem_mmap_obj(vma->vm_private_data, vma);
583}
584
585int omap_gem_mmap_obj(struct drm_gem_object *obj,
586 struct vm_area_struct *vma)
587{
588 struct omap_gem_object *omap_obj = to_omap_bo(obj);
cd5351f4
RC
589
590 vma->vm_flags &= ~VM_PFNMAP;
591 vma->vm_flags |= VM_MIXEDMAP;
592
593 if (omap_obj->flags & OMAP_BO_WC) {
594 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
595 } else if (omap_obj->flags & OMAP_BO_UNCACHED) {
596 vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
597 } else {
8b6b569e
RC
598 /*
599 * We do have some private objects, at least for scanout buffers
600 * on hardware without DMM/TILER. But these are allocated write-
601 * combine
602 */
603 if (WARN_ON(!obj->filp))
604 return -EINVAL;
605
606 /*
607 * Shunt off cached objs to shmem file so they have their own
608 * address_space (so unmap_mapping_range does what we want,
609 * in particular in the case of mmap'd dmabufs)
610 */
611 fput(vma->vm_file);
8b6b569e 612 vma->vm_pgoff = 0;
cb0942b8 613 vma->vm_file = get_file(obj->filp);
8b6b569e 614
cd5351f4
RC
615 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
616 }
617
8b6b569e 618 return 0;
cd5351f4
RC
619}
620
7ef93b0a
LP
621/* -----------------------------------------------------------------------------
622 * Dumb Buffers
623 */
8b6b569e 624
cd5351f4
RC
625/**
626 * omap_gem_dumb_create - create a dumb buffer
627 * @drm_file: our client file
628 * @dev: our device
629 * @args: the requested arguments copied from userspace
630 *
631 * Allocate a buffer suitable for use for a frame buffer of the
632 * form described by user space. Give userspace a handle by which
633 * to reference it.
634 */
635int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
636 struct drm_mode_create_dumb *args)
637{
638 union omap_gem_size gsize;
639
bdb2b933 640 args->pitch = align_pitch(0, args->width, args->bpp);
cd5351f4
RC
641 args->size = PAGE_ALIGN(args->pitch * args->height);
642
643 gsize = (union omap_gem_size){
644 .bytes = args->size,
645 };
646
647 return omap_gem_new_handle(dev, file, gsize,
648 OMAP_BO_SCANOUT | OMAP_BO_WC, &args->handle);
649}
650
cd5351f4
RC
651/**
652 * omap_gem_dumb_map - buffer mapping for dumb interface
653 * @file: our drm client file
654 * @dev: drm device
655 * @handle: GEM handle to the object (from dumb_create)
656 *
657 * Do the necessary setup to allow the mapping of the frame buffer
658 * into user memory. We don't have to do much here at the moment.
659 */
660int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
661 uint32_t handle, uint64_t *offset)
662{
663 struct drm_gem_object *obj;
664 int ret = 0;
665
cd5351f4
RC
666 /* GEM does all our handle to object mapping */
667 obj = drm_gem_object_lookup(dev, file, handle);
668 if (obj == NULL) {
669 ret = -ENOENT;
670 goto fail;
671 }
672
673 *offset = omap_gem_mmap_offset(obj);
674
675 drm_gem_object_unreference_unlocked(obj);
676
677fail:
cd5351f4
RC
678 return ret;
679}
680
e1c1174f 681#ifdef CONFIG_DRM_FBDEV_EMULATION
a6a91827
RC
682/* Set scrolling position. This allows us to implement fast scrolling
683 * for console.
9b55b95a
RC
684 *
685 * Call only from non-atomic contexts.
a6a91827
RC
686 */
687int omap_gem_roll(struct drm_gem_object *obj, uint32_t roll)
688{
689 struct omap_gem_object *omap_obj = to_omap_bo(obj);
690 uint32_t npages = obj->size >> PAGE_SHIFT;
691 int ret = 0;
692
693 if (roll > npages) {
694 dev_err(obj->dev->dev, "invalid roll: %d\n", roll);
695 return -EINVAL;
696 }
697
a6a91827
RC
698 omap_obj->roll = roll;
699
af69592a
RC
700 mutex_lock(&obj->dev->struct_mutex);
701
a6a91827
RC
702 /* if we aren't mapped yet, we don't need to do anything */
703 if (omap_obj->block) {
704 struct page **pages;
705 ret = get_pages(obj, &pages);
706 if (ret)
707 goto fail;
708 ret = tiler_pin(omap_obj->block, pages, npages, roll, true);
709 if (ret)
710 dev_err(obj->dev->dev, "could not repin: %d\n", ret);
711 }
712
713fail:
714 mutex_unlock(&obj->dev->struct_mutex);
715
716 return ret;
717}
e1c1174f 718#endif
a6a91827 719
7ef93b0a
LP
720/* -----------------------------------------------------------------------------
721 * Memory Management & DMA Sync
722 */
723
724/**
725 * shmem buffers that are mapped cached can simulate coherency via using
726 * page faulting to keep track of dirty pages
727 */
728static inline bool is_cached_coherent(struct drm_gem_object *obj)
729{
730 struct omap_gem_object *omap_obj = to_omap_bo(obj);
731 return is_shmem(obj) &&
732 ((omap_obj->flags & OMAP_BO_CACHE_MASK) == OMAP_BO_CACHED);
733}
734
8b6b569e
RC
735/* Sync the buffer for CPU access.. note pages should already be
736 * attached, ie. omap_gem_get_pages()
737 */
738void omap_gem_cpu_sync(struct drm_gem_object *obj, int pgoff)
739{
740 struct drm_device *dev = obj->dev;
741 struct omap_gem_object *omap_obj = to_omap_bo(obj);
742
743 if (is_cached_coherent(obj) && omap_obj->addrs[pgoff]) {
744 dma_unmap_page(dev->dev, omap_obj->addrs[pgoff],
745 PAGE_SIZE, DMA_BIDIRECTIONAL);
746 omap_obj->addrs[pgoff] = 0;
747 }
748}
749
750/* sync the buffer for DMA access */
751void omap_gem_dma_sync(struct drm_gem_object *obj,
752 enum dma_data_direction dir)
753{
754 struct drm_device *dev = obj->dev;
755 struct omap_gem_object *omap_obj = to_omap_bo(obj);
756
757 if (is_cached_coherent(obj)) {
758 int i, npages = obj->size >> PAGE_SHIFT;
759 struct page **pages = omap_obj->pages;
760 bool dirty = false;
761
762 for (i = 0; i < npages; i++) {
763 if (!omap_obj->addrs[i]) {
764 omap_obj->addrs[i] = dma_map_page(dev->dev, pages[i], 0,
765 PAGE_SIZE, DMA_BIDIRECTIONAL);
766 dirty = true;
767 }
768 }
769
770 if (dirty) {
771 unmap_mapping_range(obj->filp->f_mapping, 0,
772 omap_gem_mmap_size(obj), 1);
773 }
774 }
775}
776
cd5351f4
RC
777/* Get physical address for DMA.. if 'remap' is true, and the buffer is not
778 * already contiguous, remap it to pin in physically contiguous memory.. (ie.
779 * map in TILER)
780 */
781int omap_gem_get_paddr(struct drm_gem_object *obj,
782 dma_addr_t *paddr, bool remap)
783{
a6a91827 784 struct omap_drm_private *priv = obj->dev->dev_private;
cd5351f4
RC
785 struct omap_gem_object *omap_obj = to_omap_bo(obj);
786 int ret = 0;
787
f7f9f453
RC
788 mutex_lock(&obj->dev->struct_mutex);
789
a6a91827 790 if (remap && is_shmem(obj) && priv->has_dmm) {
f7f9f453
RC
791 if (omap_obj->paddr_cnt == 0) {
792 struct page **pages;
a6a91827 793 uint32_t npages = obj->size >> PAGE_SHIFT;
f7f9f453
RC
794 enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
795 struct tiler_block *block;
a6a91827 796
f7f9f453
RC
797 BUG_ON(omap_obj->block);
798
799 ret = get_pages(obj, &pages);
800 if (ret)
801 goto fail;
802
f7f9f453
RC
803 if (omap_obj->flags & OMAP_BO_TILED) {
804 block = tiler_reserve_2d(fmt,
805 omap_obj->width,
806 omap_obj->height, 0);
807 } else {
808 block = tiler_reserve_1d(obj->size);
809 }
810
811 if (IS_ERR(block)) {
812 ret = PTR_ERR(block);
813 dev_err(obj->dev->dev,
814 "could not remap: %d (%d)\n", ret, fmt);
815 goto fail;
816 }
817
818 /* TODO: enable async refill.. */
a6a91827
RC
819 ret = tiler_pin(block, pages, npages,
820 omap_obj->roll, true);
f7f9f453
RC
821 if (ret) {
822 tiler_release(block);
823 dev_err(obj->dev->dev,
824 "could not pin: %d\n", ret);
825 goto fail;
826 }
827
828 omap_obj->paddr = tiler_ssptr(block);
829 omap_obj->block = block;
830
2d31ca3a 831 DBG("got paddr: %pad", &omap_obj->paddr);
f7f9f453
RC
832 }
833
834 omap_obj->paddr_cnt++;
835
836 *paddr = omap_obj->paddr;
837 } else if (omap_obj->flags & OMAP_BO_DMA) {
838 *paddr = omap_obj->paddr;
839 } else {
840 ret = -EINVAL;
8b6b569e 841 goto fail;
cd5351f4
RC
842 }
843
f7f9f453
RC
844fail:
845 mutex_unlock(&obj->dev->struct_mutex);
cd5351f4
RC
846
847 return ret;
848}
849
850/* Release physical address, when DMA is no longer being performed.. this
851 * could potentially unpin and unmap buffers from TILER
852 */
393a949f 853void omap_gem_put_paddr(struct drm_gem_object *obj)
cd5351f4 854{
f7f9f453 855 struct omap_gem_object *omap_obj = to_omap_bo(obj);
393a949f 856 int ret;
f7f9f453
RC
857
858 mutex_lock(&obj->dev->struct_mutex);
859 if (omap_obj->paddr_cnt > 0) {
860 omap_obj->paddr_cnt--;
861 if (omap_obj->paddr_cnt == 0) {
862 ret = tiler_unpin(omap_obj->block);
863 if (ret) {
864 dev_err(obj->dev->dev,
865 "could not unpin pages: %d\n", ret);
f7f9f453
RC
866 }
867 ret = tiler_release(omap_obj->block);
868 if (ret) {
869 dev_err(obj->dev->dev,
870 "could not release unmap: %d\n", ret);
871 }
3f4d17c4 872 omap_obj->paddr = 0;
f7f9f453
RC
873 omap_obj->block = NULL;
874 }
875 }
393a949f 876
f7f9f453 877 mutex_unlock(&obj->dev->struct_mutex);
cd5351f4
RC
878}
879
3c810c61
RC
880/* Get rotated scanout address (only valid if already pinned), at the
881 * specified orientation and x,y offset from top-left corner of buffer
882 * (only valid for tiled 2d buffers)
883 */
884int omap_gem_rotated_paddr(struct drm_gem_object *obj, uint32_t orient,
885 int x, int y, dma_addr_t *paddr)
886{
887 struct omap_gem_object *omap_obj = to_omap_bo(obj);
888 int ret = -EINVAL;
889
890 mutex_lock(&obj->dev->struct_mutex);
891 if ((omap_obj->paddr_cnt > 0) && omap_obj->block &&
892 (omap_obj->flags & OMAP_BO_TILED)) {
893 *paddr = tiler_tsptr(omap_obj->block, orient, x, y);
894 ret = 0;
895 }
896 mutex_unlock(&obj->dev->struct_mutex);
897 return ret;
898}
899
900/* Get tiler stride for the buffer (only valid for 2d tiled buffers) */
901int omap_gem_tiled_stride(struct drm_gem_object *obj, uint32_t orient)
902{
903 struct omap_gem_object *omap_obj = to_omap_bo(obj);
904 int ret = -EINVAL;
905 if (omap_obj->flags & OMAP_BO_TILED)
906 ret = tiler_stride(gem2fmt(omap_obj->flags), orient);
907 return ret;
908}
909
6ad11bc3
RC
910/* if !remap, and we don't have pages backing, then fail, rather than
911 * increasing the pin count (which we don't really do yet anyways,
912 * because we don't support swapping pages back out). And 'remap'
913 * might not be quite the right name, but I wanted to keep it working
914 * similarly to omap_gem_get_paddr(). Note though that mutex is not
915 * aquired if !remap (because this can be called in atomic ctxt),
916 * but probably omap_gem_get_paddr() should be changed to work in the
917 * same way. If !remap, a matching omap_gem_put_pages() call is not
918 * required (and should not be made).
919 */
920int omap_gem_get_pages(struct drm_gem_object *obj, struct page ***pages,
921 bool remap)
cd5351f4
RC
922{
923 int ret;
6ad11bc3
RC
924 if (!remap) {
925 struct omap_gem_object *omap_obj = to_omap_bo(obj);
926 if (!omap_obj->pages)
927 return -ENOMEM;
928 *pages = omap_obj->pages;
929 return 0;
930 }
cd5351f4
RC
931 mutex_lock(&obj->dev->struct_mutex);
932 ret = get_pages(obj, pages);
933 mutex_unlock(&obj->dev->struct_mutex);
934 return ret;
935}
936
937/* release pages when DMA no longer being performed */
938int omap_gem_put_pages(struct drm_gem_object *obj)
939{
940 /* do something here if we dynamically attach/detach pages.. at
941 * least they would no longer need to be pinned if everyone has
942 * released the pages..
943 */
944 return 0;
945}
946
e1c1174f 947#ifdef CONFIG_DRM_FBDEV_EMULATION
f7f9f453
RC
948/* Get kernel virtual address for CPU access.. this more or less only
949 * exists for omap_fbdev. This should be called with struct_mutex
950 * held.
cd5351f4
RC
951 */
952void *omap_gem_vaddr(struct drm_gem_object *obj)
953{
954 struct omap_gem_object *omap_obj = to_omap_bo(obj);
696e3ca3 955 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
f7f9f453
RC
956 if (!omap_obj->vaddr) {
957 struct page **pages;
958 int ret = get_pages(obj, &pages);
959 if (ret)
960 return ERR_PTR(ret);
961 omap_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
962 VM_MAP, pgprot_writecombine(PAGE_KERNEL));
963 }
cd5351f4
RC
964 return omap_obj->vaddr;
965}
e1c1174f 966#endif
cd5351f4 967
7ef93b0a
LP
968/* -----------------------------------------------------------------------------
969 * Power Management
970 */
971
e78edba1
AG
972#ifdef CONFIG_PM
973/* re-pin objects in DMM in resume path: */
974int omap_gem_resume(struct device *dev)
975{
976 struct drm_device *drm_dev = dev_get_drvdata(dev);
977 struct omap_drm_private *priv = drm_dev->dev_private;
978 struct omap_gem_object *omap_obj;
979 int ret = 0;
980
981 list_for_each_entry(omap_obj, &priv->obj_list, mm_list) {
982 if (omap_obj->block) {
983 struct drm_gem_object *obj = &omap_obj->base;
984 uint32_t npages = obj->size >> PAGE_SHIFT;
985 WARN_ON(!omap_obj->pages); /* this can't happen */
986 ret = tiler_pin(omap_obj->block,
987 omap_obj->pages, npages,
988 omap_obj->roll, true);
989 if (ret) {
990 dev_err(dev, "could not repin: %d\n", ret);
991 return ret;
992 }
993 }
994 }
995
996 return 0;
997}
998#endif
999
7ef93b0a
LP
1000/* -----------------------------------------------------------------------------
1001 * DebugFS
1002 */
1003
f6b6036e
RC
1004#ifdef CONFIG_DEBUG_FS
1005void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
1006{
f6b6036e 1007 struct omap_gem_object *omap_obj = to_omap_bo(obj);
0de23977 1008 uint64_t off;
f6b6036e 1009
0de23977 1010 off = drm_vma_node_start(&obj->vma_node);
f6b6036e 1011
2d31ca3a 1012 seq_printf(m, "%08x: %2d (%2d) %08llx %pad (%2d) %p %4d",
f6b6036e 1013 omap_obj->flags, obj->name, obj->refcount.refcount.counter,
2d31ca3a 1014 off, &omap_obj->paddr, omap_obj->paddr_cnt,
f6b6036e
RC
1015 omap_obj->vaddr, omap_obj->roll);
1016
1017 if (omap_obj->flags & OMAP_BO_TILED) {
1018 seq_printf(m, " %dx%d", omap_obj->width, omap_obj->height);
1019 if (omap_obj->block) {
1020 struct tcm_area *area = &omap_obj->block->area;
1021 seq_printf(m, " (%dx%d, %dx%d)",
1022 area->p0.x, area->p0.y,
1023 area->p1.x, area->p1.y);
1024 }
1025 } else {
1026 seq_printf(m, " %d", obj->size);
1027 }
1028
1029 seq_printf(m, "\n");
1030}
1031
1032void omap_gem_describe_objects(struct list_head *list, struct seq_file *m)
1033{
1034 struct omap_gem_object *omap_obj;
1035 int count = 0;
1036 size_t size = 0;
1037
1038 list_for_each_entry(omap_obj, list, mm_list) {
1039 struct drm_gem_object *obj = &omap_obj->base;
1040 seq_printf(m, " ");
1041 omap_gem_describe(obj, m);
1042 count++;
1043 size += obj->size;
1044 }
1045
1046 seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
1047}
1048#endif
1049
7ef93b0a
LP
1050/* -----------------------------------------------------------------------------
1051 * Buffer Synchronization
cd5351f4
RC
1052 */
1053
7ef93b0a
LP
1054static DEFINE_SPINLOCK(sync_lock);
1055
cd5351f4
RC
1056struct omap_gem_sync_waiter {
1057 struct list_head list;
1058 struct omap_gem_object *omap_obj;
1059 enum omap_gem_op op;
1060 uint32_t read_target, write_target;
1061 /* notify called w/ sync_lock held */
1062 void (*notify)(void *arg);
1063 void *arg;
1064};
1065
1066/* list of omap_gem_sync_waiter.. the notify fxn gets called back when
1067 * the read and/or write target count is achieved which can call a user
1068 * callback (ex. to kick 3d and/or 2d), wakeup blocked task (prep for
1069 * cpu access), etc.
1070 */
1071static LIST_HEAD(waiters);
1072
1073static inline bool is_waiting(struct omap_gem_sync_waiter *waiter)
1074{
1075 struct omap_gem_object *omap_obj = waiter->omap_obj;
1076 if ((waiter->op & OMAP_GEM_READ) &&
f2cff0f3 1077 (omap_obj->sync->write_complete < waiter->write_target))
cd5351f4
RC
1078 return true;
1079 if ((waiter->op & OMAP_GEM_WRITE) &&
f2cff0f3 1080 (omap_obj->sync->read_complete < waiter->read_target))
cd5351f4
RC
1081 return true;
1082 return false;
1083}
1084
1085/* macro for sync debug.. */
1086#define SYNCDBG 0
1087#define SYNC(fmt, ...) do { if (SYNCDBG) \
1088 printk(KERN_ERR "%s:%d: "fmt"\n", \
1089 __func__, __LINE__, ##__VA_ARGS__); \
1090 } while (0)
1091
1092
1093static void sync_op_update(void)
1094{
1095 struct omap_gem_sync_waiter *waiter, *n;
1096 list_for_each_entry_safe(waiter, n, &waiters, list) {
1097 if (!is_waiting(waiter)) {
1098 list_del(&waiter->list);
1099 SYNC("notify: %p", waiter);
1100 waiter->notify(waiter->arg);
1101 kfree(waiter);
1102 }
1103 }
1104}
1105
1106static inline int sync_op(struct drm_gem_object *obj,
1107 enum omap_gem_op op, bool start)
1108{
1109 struct omap_gem_object *omap_obj = to_omap_bo(obj);
1110 int ret = 0;
1111
1112 spin_lock(&sync_lock);
1113
1114 if (!omap_obj->sync) {
1115 omap_obj->sync = kzalloc(sizeof(*omap_obj->sync), GFP_ATOMIC);
1116 if (!omap_obj->sync) {
1117 ret = -ENOMEM;
1118 goto unlock;
1119 }
1120 }
1121
1122 if (start) {
1123 if (op & OMAP_GEM_READ)
1124 omap_obj->sync->read_pending++;
1125 if (op & OMAP_GEM_WRITE)
1126 omap_obj->sync->write_pending++;
1127 } else {
1128 if (op & OMAP_GEM_READ)
1129 omap_obj->sync->read_complete++;
1130 if (op & OMAP_GEM_WRITE)
1131 omap_obj->sync->write_complete++;
1132 sync_op_update();
1133 }
1134
1135unlock:
1136 spin_unlock(&sync_lock);
1137
1138 return ret;
1139}
1140
1141/* it is a bit lame to handle updates in this sort of polling way, but
1142 * in case of PVR, the GPU can directly update read/write complete
1143 * values, and not really tell us which ones it updated.. this also
1144 * means that sync_lock is not quite sufficient. So we'll need to
1145 * do something a bit better when it comes time to add support for
1146 * separate 2d hw..
1147 */
1148void omap_gem_op_update(void)
1149{
1150 spin_lock(&sync_lock);
1151 sync_op_update();
1152 spin_unlock(&sync_lock);
1153}
1154
1155/* mark the start of read and/or write operation */
1156int omap_gem_op_start(struct drm_gem_object *obj, enum omap_gem_op op)
1157{
1158 return sync_op(obj, op, true);
1159}
1160
1161int omap_gem_op_finish(struct drm_gem_object *obj, enum omap_gem_op op)
1162{
1163 return sync_op(obj, op, false);
1164}
1165
1166static DECLARE_WAIT_QUEUE_HEAD(sync_event);
1167
1168static void sync_notify(void *arg)
1169{
1170 struct task_struct **waiter_task = arg;
1171 *waiter_task = NULL;
1172 wake_up_all(&sync_event);
1173}
1174
1175int omap_gem_op_sync(struct drm_gem_object *obj, enum omap_gem_op op)
1176{
1177 struct omap_gem_object *omap_obj = to_omap_bo(obj);
1178 int ret = 0;
1179 if (omap_obj->sync) {
1180 struct task_struct *waiter_task = current;
1181 struct omap_gem_sync_waiter *waiter =
1182 kzalloc(sizeof(*waiter), GFP_KERNEL);
1183
ae053039 1184 if (!waiter)
cd5351f4 1185 return -ENOMEM;
cd5351f4
RC
1186
1187 waiter->omap_obj = omap_obj;
1188 waiter->op = op;
1189 waiter->read_target = omap_obj->sync->read_pending;
1190 waiter->write_target = omap_obj->sync->write_pending;
1191 waiter->notify = sync_notify;
1192 waiter->arg = &waiter_task;
1193
1194 spin_lock(&sync_lock);
1195 if (is_waiting(waiter)) {
1196 SYNC("waited: %p", waiter);
1197 list_add_tail(&waiter->list, &waiters);
1198 spin_unlock(&sync_lock);
1199 ret = wait_event_interruptible(sync_event,
1200 (waiter_task == NULL));
1201 spin_lock(&sync_lock);
1202 if (waiter_task) {
1203 SYNC("interrupted: %p", waiter);
1204 /* we were interrupted */
1205 list_del(&waiter->list);
1206 waiter_task = NULL;
1207 } else {
1208 /* freed in sync_op_update() */
1209 waiter = NULL;
1210 }
1211 }
1212 spin_unlock(&sync_lock);
d2c87e2d 1213 kfree(waiter);
cd5351f4
RC
1214 }
1215 return ret;
1216}
1217
1218/* call fxn(arg), either synchronously or asynchronously if the op
1219 * is currently blocked.. fxn() can be called from any context
1220 *
1221 * (TODO for now fxn is called back from whichever context calls
1222 * omap_gem_op_update().. but this could be better defined later
1223 * if needed)
1224 *
1225 * TODO more code in common w/ _sync()..
1226 */
1227int omap_gem_op_async(struct drm_gem_object *obj, enum omap_gem_op op,
1228 void (*fxn)(void *arg), void *arg)
1229{
1230 struct omap_gem_object *omap_obj = to_omap_bo(obj);
1231 if (omap_obj->sync) {
1232 struct omap_gem_sync_waiter *waiter =
1233 kzalloc(sizeof(*waiter), GFP_ATOMIC);
1234
ae053039 1235 if (!waiter)
cd5351f4 1236 return -ENOMEM;
cd5351f4
RC
1237
1238 waiter->omap_obj = omap_obj;
1239 waiter->op = op;
1240 waiter->read_target = omap_obj->sync->read_pending;
1241 waiter->write_target = omap_obj->sync->write_pending;
1242 waiter->notify = fxn;
1243 waiter->arg = arg;
1244
1245 spin_lock(&sync_lock);
1246 if (is_waiting(waiter)) {
1247 SYNC("waited: %p", waiter);
1248 list_add_tail(&waiter->list, &waiters);
1249 spin_unlock(&sync_lock);
1250 return 0;
1251 }
1252
1253 spin_unlock(&sync_lock);
15ec2ca9
SP
1254
1255 kfree(waiter);
cd5351f4
RC
1256 }
1257
1258 /* no waiting.. */
1259 fxn(arg);
1260
1261 return 0;
1262}
1263
1264/* special API so PVR can update the buffer to use a sync-object allocated
1265 * from it's sync-obj heap. Only used for a newly allocated (from PVR's
1266 * perspective) sync-object, so we overwrite the new syncobj w/ values
1267 * from the already allocated syncobj (if there is one)
1268 */
1269int omap_gem_set_sync_object(struct drm_gem_object *obj, void *syncobj)
1270{
1271 struct omap_gem_object *omap_obj = to_omap_bo(obj);
1272 int ret = 0;
1273
1274 spin_lock(&sync_lock);
1275
1276 if ((omap_obj->flags & OMAP_BO_EXT_SYNC) && !syncobj) {
1277 /* clearing a previously set syncobj */
e6200964
PH
1278 syncobj = kmemdup(omap_obj->sync, sizeof(*omap_obj->sync),
1279 GFP_ATOMIC);
cd5351f4
RC
1280 if (!syncobj) {
1281 ret = -ENOMEM;
1282 goto unlock;
1283 }
cd5351f4
RC
1284 omap_obj->flags &= ~OMAP_BO_EXT_SYNC;
1285 omap_obj->sync = syncobj;
1286 } else if (syncobj && !(omap_obj->flags & OMAP_BO_EXT_SYNC)) {
1287 /* replacing an existing syncobj */
1288 if (omap_obj->sync) {
1289 memcpy(syncobj, omap_obj->sync, sizeof(*omap_obj->sync));
1290 kfree(omap_obj->sync);
1291 }
1292 omap_obj->flags |= OMAP_BO_EXT_SYNC;
1293 omap_obj->sync = syncobj;
1294 }
1295
1296unlock:
1297 spin_unlock(&sync_lock);
1298 return ret;
1299}
1300
7ef93b0a
LP
1301/* -----------------------------------------------------------------------------
1302 * Constructor & Destructor
1303 */
1304
cd5351f4
RC
1305/* don't call directly.. called from GEM core when it is time to actually
1306 * free the object..
1307 */
1308void omap_gem_free_object(struct drm_gem_object *obj)
1309{
1310 struct drm_device *dev = obj->dev;
76c4055f 1311 struct omap_drm_private *priv = dev->dev_private;
cd5351f4
RC
1312 struct omap_gem_object *omap_obj = to_omap_bo(obj);
1313
f7f9f453
RC
1314 evict(obj);
1315
f6b6036e
RC
1316 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
1317
76c4055f 1318 spin_lock(&priv->list_lock);
f6b6036e 1319 list_del(&omap_obj->mm_list);
76c4055f 1320 spin_unlock(&priv->list_lock);
f6b6036e 1321
0de23977 1322 drm_gem_free_mmap_offset(obj);
cd5351f4 1323
9a0774e0
RC
1324 /* this means the object is still pinned.. which really should
1325 * not happen. I think..
1326 */
1327 WARN_ON(omap_obj->paddr_cnt > 0);
1328
cd5351f4
RC
1329 /* don't free externally allocated backing memory */
1330 if (!(omap_obj->flags & OMAP_BO_EXT_MEM)) {
ae053039 1331 if (omap_obj->pages)
cd5351f4 1332 omap_gem_detach_pages(obj);
ae053039 1333
cd5351f4
RC
1334 if (!is_shmem(obj)) {
1335 dma_free_writecombine(dev->dev, obj->size,
1336 omap_obj->vaddr, omap_obj->paddr);
f7f9f453
RC
1337 } else if (omap_obj->vaddr) {
1338 vunmap(omap_obj->vaddr);
cd5351f4
RC
1339 }
1340 }
1341
1342 /* don't free externally allocated syncobj */
ae053039 1343 if (!(omap_obj->flags & OMAP_BO_EXT_SYNC))
cd5351f4 1344 kfree(omap_obj->sync);
cd5351f4
RC
1345
1346 drm_gem_object_release(obj);
1347
1348 kfree(obj);
1349}
1350
cd5351f4
RC
1351/* GEM buffer object constructor */
1352struct drm_gem_object *omap_gem_new(struct drm_device *dev,
1353 union omap_gem_size gsize, uint32_t flags)
1354{
a6a91827 1355 struct omap_drm_private *priv = dev->dev_private;
cd5351f4
RC
1356 struct omap_gem_object *omap_obj;
1357 struct drm_gem_object *obj = NULL;
ab5a60c3 1358 struct address_space *mapping;
cd5351f4
RC
1359 size_t size;
1360 int ret;
1361
1362 if (flags & OMAP_BO_TILED) {
f4302747 1363 if (!priv->usergart) {
f7f9f453
RC
1364 dev_err(dev->dev, "Tiled buffers require DMM\n");
1365 goto fail;
1366 }
1367
1368 /* tiled buffers are always shmem paged backed.. when they are
1369 * scanned out, they are remapped into DMM/TILER
1370 */
1371 flags &= ~OMAP_BO_SCANOUT;
1372
1373 /* currently don't allow cached buffers.. there is some caching
1374 * stuff that needs to be handled better
1375 */
7cb0d6c1
TV
1376 flags &= ~(OMAP_BO_CACHED|OMAP_BO_WC|OMAP_BO_UNCACHED);
1377 flags |= tiler_get_cpu_cache_flags();
cd5351f4 1378
f7f9f453
RC
1379 /* align dimensions to slot boundaries... */
1380 tiler_align(gem2fmt(flags),
1381 &gsize.tiled.width, &gsize.tiled.height);
1382
1383 /* ...and calculate size based on aligned dimensions */
1384 size = tiler_size(gem2fmt(flags),
1385 gsize.tiled.width, gsize.tiled.height);
1386 } else {
1387 size = PAGE_ALIGN(gsize.bytes);
1388 }
cd5351f4
RC
1389
1390 omap_obj = kzalloc(sizeof(*omap_obj), GFP_KERNEL);
78110bb8 1391 if (!omap_obj)
a903e3b6 1392 return NULL;
f6b6036e 1393
cd5351f4
RC
1394 obj = &omap_obj->base;
1395
a6a91827
RC
1396 if ((flags & OMAP_BO_SCANOUT) && !priv->has_dmm) {
1397 /* attempt to allocate contiguous memory if we don't
1398 * have DMM for remappign discontiguous buffers
1399 */
cd5351f4
RC
1400 omap_obj->vaddr = dma_alloc_writecombine(dev->dev, size,
1401 &omap_obj->paddr, GFP_KERNEL);
a903e3b6
TV
1402 if (!omap_obj->vaddr) {
1403 kfree(omap_obj);
ae053039 1404
a903e3b6
TV
1405 return NULL;
1406 }
1407
1408 flags |= OMAP_BO_DMA;
cd5351f4
RC
1409 }
1410
a903e3b6
TV
1411 spin_lock(&priv->list_lock);
1412 list_add(&omap_obj->mm_list, &priv->obj_list);
1413 spin_unlock(&priv->list_lock);
1414
cd5351f4
RC
1415 omap_obj->flags = flags;
1416
f7f9f453
RC
1417 if (flags & OMAP_BO_TILED) {
1418 omap_obj->width = gsize.tiled.width;
1419 omap_obj->height = gsize.tiled.height;
1420 }
1421
ab5a60c3 1422 if (flags & (OMAP_BO_DMA|OMAP_BO_EXT_MEM)) {
89c8233f 1423 drm_gem_private_object_init(dev, obj, size);
ab5a60c3 1424 } else {
cd5351f4 1425 ret = drm_gem_object_init(dev, obj, size);
ab5a60c3
DH
1426 if (ret)
1427 goto fail;
cd5351f4 1428
ab5a60c3
DH
1429 mapping = file_inode(obj->filp)->i_mapping;
1430 mapping_set_gfp_mask(mapping, GFP_USER | __GFP_DMA32);
1431 }
cd5351f4
RC
1432
1433 return obj;
1434
1435fail:
ae053039 1436 if (obj)
cd5351f4 1437 omap_gem_free_object(obj);
ae053039 1438
cd5351f4
RC
1439 return NULL;
1440}
f7f9f453 1441
7ef93b0a
LP
1442/* convenience method to construct a GEM buffer object, and userspace handle */
1443int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file,
1444 union omap_gem_size gsize, uint32_t flags, uint32_t *handle)
1445{
1446 struct drm_gem_object *obj;
1447 int ret;
1448
1449 obj = omap_gem_new(dev, gsize, flags);
1450 if (!obj)
1451 return -ENOMEM;
1452
1453 ret = drm_gem_handle_create(file, obj, handle);
1454 if (ret) {
1455 drm_gem_object_release(obj);
1456 kfree(obj); /* TODO isn't there a dtor to call? just copying i915 */
1457 return ret;
1458 }
1459
1460 /* drop reference from allocate - handle holds it now */
1461 drm_gem_object_unreference_unlocked(obj);
1462
1463 return 0;
1464}
1465
1466/* -----------------------------------------------------------------------------
1467 * Init & Cleanup
1468 */
1469
1470/* If DMM is used, we need to set some stuff up.. */
f7f9f453
RC
1471void omap_gem_init(struct drm_device *dev)
1472{
a6a91827 1473 struct omap_drm_private *priv = dev->dev_private;
f4302747 1474 struct omap_drm_usergart *usergart;
f7f9f453
RC
1475 const enum tiler_fmt fmts[] = {
1476 TILFMT_8BIT, TILFMT_16BIT, TILFMT_32BIT
1477 };
5c137797 1478 int i, j;
f7f9f453 1479
e5e4e9b7 1480 if (!dmm_is_available()) {
f7f9f453 1481 /* DMM only supported on OMAP4 and later, so this isn't fatal */
5c137797 1482 dev_warn(dev->dev, "DMM not available, disable DMM support\n");
f7f9f453
RC
1483 return;
1484 }
1485
78110bb8
JP
1486 usergart = kcalloc(3, sizeof(*usergart), GFP_KERNEL);
1487 if (!usergart)
b369839b 1488 return;
f7f9f453
RC
1489
1490 /* reserve 4k aligned/wide regions for userspace mappings: */
1491 for (i = 0; i < ARRAY_SIZE(fmts); i++) {
1492 uint16_t h = 1, w = PAGE_SIZE >> i;
1493 tiler_align(fmts[i], &w, &h);
1494 /* note: since each region is 1 4kb page wide, and minimum
1495 * number of rows, the height ends up being the same as the
1496 * # of pages in the region
1497 */
1498 usergart[i].height = h;
1499 usergart[i].height_shift = ilog2(h);
3c810c61 1500 usergart[i].stride_pfn = tiler_stride(fmts[i], 0) >> PAGE_SHIFT;
f7f9f453
RC
1501 usergart[i].slot_shift = ilog2((PAGE_SIZE / h) >> i);
1502 for (j = 0; j < NUM_USERGART_ENTRIES; j++) {
f4302747
LP
1503 struct omap_drm_usergart_entry *entry;
1504 struct tiler_block *block;
1505
1506 entry = &usergart[i].entry[j];
1507 block = tiler_reserve_2d(fmts[i], w, h, PAGE_SIZE);
f7f9f453
RC
1508 if (IS_ERR(block)) {
1509 dev_err(dev->dev,
1510 "reserve failed: %d, %d, %ld\n",
1511 i, j, PTR_ERR(block));
1512 return;
1513 }
1514 entry->paddr = tiler_ssptr(block);
1515 entry->block = block;
1516
2d31ca3a
RK
1517 DBG("%d:%d: %dx%d: paddr=%pad stride=%d", i, j, w, h,
1518 &entry->paddr,
f7f9f453
RC
1519 usergart[i].stride_pfn << PAGE_SHIFT);
1520 }
1521 }
a6a91827 1522
f4302747 1523 priv->usergart = usergart;
a6a91827 1524 priv->has_dmm = true;
f7f9f453
RC
1525}
1526
1527void omap_gem_deinit(struct drm_device *dev)
1528{
f4302747
LP
1529 struct omap_drm_private *priv = dev->dev_private;
1530
f7f9f453
RC
1531 /* I believe we can rely on there being no more outstanding GEM
1532 * objects which could depend on usergart/dmm at this point.
1533 */
f4302747 1534 kfree(priv->usergart);
f7f9f453 1535}