]>
git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/staging/omapdrm/omap_gem.c
2 * drivers/staging/omapdrm/omap_gem.c
4 * Copyright (C) 2011 Texas Instruments
5 * Author: Rob Clark <rob.clark@linaro.org>
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as published by
9 * the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
21 #include <linux/spinlock.h>
22 #include <linux/shmem_fs.h>
25 #include "omap_dmm_tiler.h"
27 /* remove these once drm core helpers are merged */
28 struct page
** _drm_gem_get_pages(struct drm_gem_object
*obj
, gfp_t gfpmask
);
29 void _drm_gem_put_pages(struct drm_gem_object
*obj
, struct page
**pages
,
30 bool dirty
, bool accessed
);
31 int _drm_gem_create_mmap_offset_size(struct drm_gem_object
*obj
, size_t size
);
34 * GEM buffer object implementation.
37 #define to_omap_bo(x) container_of(x, struct omap_gem_object, base)
39 /* note: we use upper 8 bits of flags for driver-internal flags: */
40 #define OMAP_BO_DMA 0x01000000 /* actually is physically contiguous */
41 #define OMAP_BO_EXT_SYNC 0x02000000 /* externally allocated sync object */
42 #define OMAP_BO_EXT_MEM 0x04000000 /* externally allocated memory */
45 struct omap_gem_object
{
46 struct drm_gem_object base
;
50 /** width/height for tiled formats (rounded up to slot boundaries) */
51 uint16_t width
, height
;
53 /** roll applied when mapping to DMM */
57 * If buffer is allocated physically contiguous, the OMAP_BO_DMA flag
58 * is set and the paddr is valid. Also if the buffer is remapped in
59 * TILER and paddr_cnt > 0, then paddr is valid. But if you are using
60 * the physical address and OMAP_BO_DMA is not set, then you should
61 * be going thru omap_gem_{get,put}_paddr() to ensure the mapping is
62 * not removed from under your feet.
64 * Note that OMAP_BO_SCANOUT is a hint from userspace that DMA capable
65 * buffer is requested, but doesn't mean that it is. Use the
66 * OMAP_BO_DMA flag to determine if the buffer has a DMA capable
77 * tiler block used when buffer is remapped in DMM/TILER.
79 struct tiler_block
*block
;
82 * Array of backing pages, if allocated. Note that pages are never
83 * allocated for buffers originally allocated from contiguous memory
87 /** addresses corresponding to pages in above array */
91 * Virtual address, if mapped.
96 * sync-object allocated on demand (if needed)
98 * Per-buffer sync-object for tracking pending and completed hw/dma
99 * read and write operations. The layout in memory is dictated by
100 * the SGX firmware, which uses this information to stall the command
101 * stream if a surface is not ready yet.
103 * Note that when buffer is used by SGX, the sync-object needs to be
104 * allocated from a special heap of sync-objects. This way many sync
105 * objects can be packed in a page, and not waste GPU virtual address
106 * space. Because of this we have to have a omap_gem_set_sync_object()
107 * API to allow replacement of the syncobj after it has (potentially)
108 * already been allocated. A bit ugly but I haven't thought of a
109 * better alternative.
112 uint32_t write_pending
;
113 uint32_t write_complete
;
114 uint32_t read_pending
;
115 uint32_t read_complete
;
119 /* To deal with userspace mmap'ings of 2d tiled buffers, which (a) are
120 * not necessarily pinned in TILER all the time, and (b) when they are
121 * they are not necessarily page aligned, we reserve one or more small
122 * regions in each of the 2d containers to use as a user-GART where we
123 * can create a second page-aligned mapping of parts of the buffer
124 * being accessed from userspace.
126 * Note that we could optimize slightly when we know that multiple
127 * tiler containers are backed by the same PAT.. but I'll leave that
130 #define NUM_USERGART_ENTRIES 2
131 struct usergart_entry
{
132 struct tiler_block
*block
; /* the reserved tiler block */
134 struct drm_gem_object
*obj
; /* the current pinned obj */
135 pgoff_t obj_pgoff
; /* page offset of obj currently
139 struct usergart_entry entry
[NUM_USERGART_ENTRIES
];
140 int height
; /* height in rows */
141 int height_shift
; /* ilog2(height in rows) */
142 int slot_shift
; /* ilog2(width per slot) */
143 int stride_pfn
; /* stride in pages */
144 int last
; /* index of last used entry */
147 static void evict_entry(struct drm_gem_object
*obj
,
148 enum tiler_fmt fmt
, struct usergart_entry
*entry
)
150 if (obj
->dev
->dev_mapping
) {
151 size_t size
= PAGE_SIZE
* usergart
[fmt
].height
;
152 loff_t off
= omap_gem_mmap_offset(obj
) +
153 (entry
->obj_pgoff
<< PAGE_SHIFT
);
154 unmap_mapping_range(obj
->dev
->dev_mapping
, off
, size
, 1);
160 /* Evict a buffer from usergart, if it is mapped there */
161 static void evict(struct drm_gem_object
*obj
)
163 struct omap_gem_object
*omap_obj
= to_omap_bo(obj
);
165 if (omap_obj
->flags
& OMAP_BO_TILED
) {
166 enum tiler_fmt fmt
= gem2fmt(omap_obj
->flags
);
172 for (i
= 0; i
< NUM_USERGART_ENTRIES
; i
++) {
173 struct usergart_entry
*entry
= &usergart
[fmt
].entry
[i
];
174 if (entry
->obj
== obj
)
175 evict_entry(obj
, fmt
, entry
);
180 /* GEM objects can either be allocated from contiguous memory (in which
181 * case obj->filp==NULL), or w/ shmem backing (obj->filp!=NULL). But non
182 * contiguous buffers can be remapped in TILER/DMM if they need to be
183 * contiguous... but we don't do this all the time to reduce pressure
184 * on TILER/DMM space when we know at allocation time that the buffer
185 * will need to be scanned out.
187 static inline bool is_shmem(struct drm_gem_object
*obj
)
189 return obj
->filp
!= NULL
;
192 static int get_pages(struct drm_gem_object
*obj
, struct page
***pages
);
194 static DEFINE_SPINLOCK(sync_lock
);
196 /** ensure backing pages are allocated */
197 static int omap_gem_attach_pages(struct drm_gem_object
*obj
)
199 struct omap_gem_object
*omap_obj
= to_omap_bo(obj
);
202 WARN_ON(omap_obj
->pages
);
204 /* TODO: __GFP_DMA32 .. but somehow GFP_HIGHMEM is coming from the
205 * mapping_gfp_mask(mapping) which conflicts w/ GFP_DMA32.. probably
206 * we actually want CMA memory for it all anyways..
208 pages
= _drm_gem_get_pages(obj
, GFP_KERNEL
);
210 dev_err(obj
->dev
->dev
, "could not get pages: %ld\n", PTR_ERR(pages
));
211 return PTR_ERR(pages
);
214 /* for non-cached buffers, ensure the new pages are clean because
215 * DSS, GPU, etc. are not cache coherent:
217 if (omap_obj
->flags
& (OMAP_BO_WC
|OMAP_BO_UNCACHED
)) {
218 int i
, npages
= obj
->size
>> PAGE_SHIFT
;
219 dma_addr_t
*addrs
= kmalloc(npages
* sizeof(addrs
), GFP_KERNEL
);
220 for (i
= 0; i
< npages
; i
++) {
221 addrs
[i
] = dma_map_page(obj
->dev
->dev
, pages
[i
],
222 0, PAGE_SIZE
, DMA_BIDIRECTIONAL
);
224 omap_obj
->addrs
= addrs
;
227 omap_obj
->pages
= pages
;
231 /** release backing pages */
232 static void omap_gem_detach_pages(struct drm_gem_object
*obj
)
234 struct omap_gem_object
*omap_obj
= to_omap_bo(obj
);
236 /* for non-cached buffers, ensure the new pages are clean because
237 * DSS, GPU, etc. are not cache coherent:
239 if (omap_obj
->flags
& (OMAP_BO_WC
|OMAP_BO_UNCACHED
)) {
240 int i
, npages
= obj
->size
>> PAGE_SHIFT
;
241 for (i
= 0; i
< npages
; i
++) {
242 dma_unmap_page(obj
->dev
->dev
, omap_obj
->addrs
[i
],
243 PAGE_SIZE
, DMA_BIDIRECTIONAL
);
245 kfree(omap_obj
->addrs
);
246 omap_obj
->addrs
= NULL
;
249 _drm_gem_put_pages(obj
, omap_obj
->pages
, true, false);
250 omap_obj
->pages
= NULL
;
253 /** get mmap offset */
254 uint64_t omap_gem_mmap_offset(struct drm_gem_object
*obj
)
256 if (!obj
->map_list
.map
) {
257 /* Make it mmapable */
258 size_t size
= omap_gem_mmap_size(obj
);
259 int ret
= _drm_gem_create_mmap_offset_size(obj
, size
);
262 dev_err(obj
->dev
->dev
, "could not allocate mmap offset");
267 return (uint64_t)obj
->map_list
.hash
.key
<< PAGE_SHIFT
;
271 size_t omap_gem_mmap_size(struct drm_gem_object
*obj
)
273 struct omap_gem_object
*omap_obj
= to_omap_bo(obj
);
274 size_t size
= obj
->size
;
276 if (omap_obj
->flags
& OMAP_BO_TILED
) {
277 /* for tiled buffers, the virtual size has stride rounded up
278 * to 4kb.. (to hide the fact that row n+1 might start 16kb or
279 * 32kb later!). But we don't back the entire buffer with
280 * pages, only the valid picture part.. so need to adjust for
281 * this in the size used to mmap and generate mmap offset
283 size
= tiler_vsize(gem2fmt(omap_obj
->flags
),
284 omap_obj
->width
, omap_obj
->height
);
291 /* Normal handling for the case of faulting in non-tiled buffers */
292 static int fault_1d(struct drm_gem_object
*obj
,
293 struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
295 struct omap_gem_object
*omap_obj
= to_omap_bo(obj
);
299 /* We don't use vmf->pgoff since that has the fake offset: */
300 pgoff
= ((unsigned long)vmf
->virtual_address
-
301 vma
->vm_start
) >> PAGE_SHIFT
;
303 if (omap_obj
->pages
) {
304 pfn
= page_to_pfn(omap_obj
->pages
[pgoff
]);
306 BUG_ON(!(omap_obj
->flags
& OMAP_BO_DMA
));
307 pfn
= (omap_obj
->paddr
>> PAGE_SHIFT
) + pgoff
;
310 VERB("Inserting %p pfn %lx, pa %lx", vmf
->virtual_address
,
311 pfn
, pfn
<< PAGE_SHIFT
);
313 return vm_insert_mixed(vma
, (unsigned long)vmf
->virtual_address
, pfn
);
316 /* Special handling for the case of faulting in 2d tiled buffers */
317 static int fault_2d(struct drm_gem_object
*obj
,
318 struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
320 struct omap_gem_object
*omap_obj
= to_omap_bo(obj
);
321 struct usergart_entry
*entry
;
322 enum tiler_fmt fmt
= gem2fmt(omap_obj
->flags
);
323 struct page
*pages
[64]; /* XXX is this too much to have on stack? */
325 pgoff_t pgoff
, base_pgoff
;
332 /* TODO: this fxn might need a bit tweaking to deal w/ tiled buffers
333 * that are wider than 4kb
336 /* We don't use vmf->pgoff since that has the fake offset: */
337 pgoff
= ((unsigned long)vmf
->virtual_address
-
338 vma
->vm_start
) >> PAGE_SHIFT
;
340 /* actual address we start mapping at is rounded down to previous slot
341 * boundary in the y direction:
343 base_pgoff
= round_down(pgoff
, usergart
[fmt
].height
);
344 vaddr
= vmf
->virtual_address
- ((pgoff
- base_pgoff
) << PAGE_SHIFT
);
345 entry
= &usergart
[fmt
].entry
[usergart
[fmt
].last
];
347 slots
= omap_obj
->width
>> usergart
[fmt
].slot_shift
;
349 /* evict previous buffer using this usergart entry, if any: */
351 evict_entry(entry
->obj
, fmt
, entry
);
354 entry
->obj_pgoff
= base_pgoff
;
356 /* now convert base_pgoff to phys offset from virt offset:
358 base_pgoff
= (base_pgoff
>> usergart
[fmt
].height_shift
) * slots
;
360 /* map in pages. Note the height of the slot is also equal to the
361 * number of pages that need to be mapped in to fill 4kb wide CPU page.
362 * If the height is 64, then 64 pages fill a 4kb wide by 64 row region.
363 * Beyond the valid pixel part of the buffer, we set pages[i] to NULL to
364 * get a dummy page mapped in.. if someone reads/writes it they will get
365 * random/undefined content, but at least it won't be corrupting
366 * whatever other random page used to be mapped in, or other undefined
369 memcpy(pages
, &omap_obj
->pages
[base_pgoff
],
370 sizeof(struct page
*) * slots
);
371 memset(pages
+ slots
, 0,
372 sizeof(struct page
*) * (usergart
[fmt
].height
- slots
));
374 ret
= tiler_pin(entry
->block
, pages
, ARRAY_SIZE(pages
), 0, true);
376 dev_err(obj
->dev
->dev
, "failed to pin: %d\n", ret
);
380 i
= usergart
[fmt
].height
;
381 pfn
= entry
->paddr
>> PAGE_SHIFT
;
383 VERB("Inserting %p pfn %lx, pa %lx", vmf
->virtual_address
,
384 pfn
, pfn
<< PAGE_SHIFT
);
387 vm_insert_mixed(vma
, (unsigned long)vaddr
, pfn
);
388 pfn
+= usergart
[fmt
].stride_pfn
;
392 /* simple round-robin: */
393 usergart
[fmt
].last
= (usergart
[fmt
].last
+ 1) % NUM_USERGART_ENTRIES
;
399 * omap_gem_fault - pagefault handler for GEM objects
400 * @vma: the VMA of the GEM object
403 * Invoked when a fault occurs on an mmap of a GEM managed area. GEM
404 * does most of the work for us including the actual map/unmap calls
405 * but we need to do the actual page work.
407 * The VMA was set up by GEM. In doing so it also ensured that the
408 * vma->vm_private_data points to the GEM object that is backing this
411 int omap_gem_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
413 struct drm_gem_object
*obj
= vma
->vm_private_data
;
414 struct omap_gem_object
*omap_obj
= to_omap_bo(obj
);
415 struct drm_device
*dev
= obj
->dev
;
419 /* Make sure we don't parallel update on a fault, nor move or remove
420 * something from beneath our feet
422 mutex_lock(&dev
->struct_mutex
);
424 /* if a shmem backed object, make sure we have pages attached now */
425 ret
= get_pages(obj
, &pages
);
430 /* where should we do corresponding put_pages().. we are mapping
431 * the original page, rather than thru a GART, so we can't rely
432 * on eviction to trigger this. But munmap() or all mappings should
433 * probably trigger put_pages()?
436 if (omap_obj
->flags
& OMAP_BO_TILED
)
437 ret
= fault_2d(obj
, vma
, vmf
);
439 ret
= fault_1d(obj
, vma
, vmf
);
443 mutex_unlock(&dev
->struct_mutex
);
448 return VM_FAULT_NOPAGE
;
452 return VM_FAULT_SIGBUS
;
456 /** We override mainly to fix up some of the vm mapping flags.. */
457 int omap_gem_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
459 struct omap_gem_object
*omap_obj
;
462 ret
= drm_gem_mmap(filp
, vma
);
464 DBG("mmap failed: %d", ret
);
468 /* after drm_gem_mmap(), it is safe to access the obj */
469 omap_obj
= to_omap_bo(vma
->vm_private_data
);
471 vma
->vm_flags
&= ~VM_PFNMAP
;
472 vma
->vm_flags
|= VM_MIXEDMAP
;
474 if (omap_obj
->flags
& OMAP_BO_WC
) {
475 vma
->vm_page_prot
= pgprot_writecombine(vm_get_page_prot(vma
->vm_flags
));
476 } else if (omap_obj
->flags
& OMAP_BO_UNCACHED
) {
477 vma
->vm_page_prot
= pgprot_noncached(vm_get_page_prot(vma
->vm_flags
));
479 vma
->vm_page_prot
= vm_get_page_prot(vma
->vm_flags
);
486 * omap_gem_dumb_create - create a dumb buffer
487 * @drm_file: our client file
489 * @args: the requested arguments copied from userspace
491 * Allocate a buffer suitable for use for a frame buffer of the
492 * form described by user space. Give userspace a handle by which
495 int omap_gem_dumb_create(struct drm_file
*file
, struct drm_device
*dev
,
496 struct drm_mode_create_dumb
*args
)
498 union omap_gem_size gsize
;
500 /* in case someone tries to feed us a completely bogus stride: */
501 args
->pitch
= align_pitch(args
->pitch
, args
->width
, args
->bpp
);
502 args
->size
= PAGE_ALIGN(args
->pitch
* args
->height
);
504 gsize
= (union omap_gem_size
){
508 return omap_gem_new_handle(dev
, file
, gsize
,
509 OMAP_BO_SCANOUT
| OMAP_BO_WC
, &args
->handle
);
513 * omap_gem_dumb_destroy - destroy a dumb buffer
515 * @dev: our DRM device
516 * @handle: the object handle
518 * Destroy a handle that was created via omap_gem_dumb_create.
520 int omap_gem_dumb_destroy(struct drm_file
*file
, struct drm_device
*dev
,
523 /* No special work needed, drop the reference and see what falls out */
524 return drm_gem_handle_delete(file
, handle
);
528 * omap_gem_dumb_map - buffer mapping for dumb interface
529 * @file: our drm client file
531 * @handle: GEM handle to the object (from dumb_create)
533 * Do the necessary setup to allow the mapping of the frame buffer
534 * into user memory. We don't have to do much here at the moment.
536 int omap_gem_dumb_map_offset(struct drm_file
*file
, struct drm_device
*dev
,
537 uint32_t handle
, uint64_t *offset
)
539 struct drm_gem_object
*obj
;
542 /* GEM does all our handle to object mapping */
543 obj
= drm_gem_object_lookup(dev
, file
, handle
);
549 *offset
= omap_gem_mmap_offset(obj
);
551 drm_gem_object_unreference_unlocked(obj
);
557 /* Set scrolling position. This allows us to implement fast scrolling
560 int omap_gem_roll(struct drm_gem_object
*obj
, uint32_t roll
)
562 struct omap_gem_object
*omap_obj
= to_omap_bo(obj
);
563 uint32_t npages
= obj
->size
>> PAGE_SHIFT
;
567 dev_err(obj
->dev
->dev
, "invalid roll: %d\n", roll
);
571 omap_obj
->roll
= roll
;
573 if (in_atomic() || mutex_is_locked(&obj
->dev
->struct_mutex
)) {
574 /* this can get called from fbcon in atomic context.. so
575 * just ignore it and wait for next time called from
576 * interruptible context to update the PAT.. the result
577 * may be that user sees wrap-around instead of scrolling
578 * momentarily on the screen. If we wanted to be fancier
579 * we could perhaps schedule some workqueue work at this
585 mutex_lock(&obj
->dev
->struct_mutex
);
587 /* if we aren't mapped yet, we don't need to do anything */
588 if (omap_obj
->block
) {
590 ret
= get_pages(obj
, &pages
);
593 ret
= tiler_pin(omap_obj
->block
, pages
, npages
, roll
, true);
595 dev_err(obj
->dev
->dev
, "could not repin: %d\n", ret
);
599 mutex_unlock(&obj
->dev
->struct_mutex
);
604 /* Get physical address for DMA.. if 'remap' is true, and the buffer is not
605 * already contiguous, remap it to pin in physically contiguous memory.. (ie.
608 int omap_gem_get_paddr(struct drm_gem_object
*obj
,
609 dma_addr_t
*paddr
, bool remap
)
611 struct omap_drm_private
*priv
= obj
->dev
->dev_private
;
612 struct omap_gem_object
*omap_obj
= to_omap_bo(obj
);
615 mutex_lock(&obj
->dev
->struct_mutex
);
617 if (remap
&& is_shmem(obj
) && priv
->has_dmm
) {
618 if (omap_obj
->paddr_cnt
== 0) {
620 uint32_t npages
= obj
->size
>> PAGE_SHIFT
;
621 enum tiler_fmt fmt
= gem2fmt(omap_obj
->flags
);
622 struct tiler_block
*block
;
624 BUG_ON(omap_obj
->block
);
626 ret
= get_pages(obj
, &pages
);
630 if (omap_obj
->flags
& OMAP_BO_TILED
) {
631 block
= tiler_reserve_2d(fmt
,
633 omap_obj
->height
, 0);
635 block
= tiler_reserve_1d(obj
->size
);
639 ret
= PTR_ERR(block
);
640 dev_err(obj
->dev
->dev
,
641 "could not remap: %d (%d)\n", ret
, fmt
);
645 /* TODO: enable async refill.. */
646 ret
= tiler_pin(block
, pages
, npages
,
647 omap_obj
->roll
, true);
649 tiler_release(block
);
650 dev_err(obj
->dev
->dev
,
651 "could not pin: %d\n", ret
);
655 omap_obj
->paddr
= tiler_ssptr(block
);
656 omap_obj
->block
= block
;
658 DBG("got paddr: %08x", omap_obj
->paddr
);
661 omap_obj
->paddr_cnt
++;
663 *paddr
= omap_obj
->paddr
;
664 } else if (omap_obj
->flags
& OMAP_BO_DMA
) {
665 *paddr
= omap_obj
->paddr
;
671 mutex_unlock(&obj
->dev
->struct_mutex
);
676 /* Release physical address, when DMA is no longer being performed.. this
677 * could potentially unpin and unmap buffers from TILER
679 int omap_gem_put_paddr(struct drm_gem_object
*obj
)
681 struct omap_gem_object
*omap_obj
= to_omap_bo(obj
);
684 mutex_lock(&obj
->dev
->struct_mutex
);
685 if (omap_obj
->paddr_cnt
> 0) {
686 omap_obj
->paddr_cnt
--;
687 if (omap_obj
->paddr_cnt
== 0) {
688 ret
= tiler_unpin(omap_obj
->block
);
690 dev_err(obj
->dev
->dev
,
691 "could not unpin pages: %d\n", ret
);
694 ret
= tiler_release(omap_obj
->block
);
696 dev_err(obj
->dev
->dev
,
697 "could not release unmap: %d\n", ret
);
699 omap_obj
->block
= NULL
;
703 mutex_unlock(&obj
->dev
->struct_mutex
);
707 /* acquire pages when needed (for example, for DMA where physically
708 * contiguous buffer is not required
710 static int get_pages(struct drm_gem_object
*obj
, struct page
***pages
)
712 struct omap_gem_object
*omap_obj
= to_omap_bo(obj
);
715 if (is_shmem(obj
) && !omap_obj
->pages
) {
716 ret
= omap_gem_attach_pages(obj
);
718 dev_err(obj
->dev
->dev
, "could not attach pages\n");
723 /* TODO: even phys-contig.. we should have a list of pages? */
724 *pages
= omap_obj
->pages
;
729 int omap_gem_get_pages(struct drm_gem_object
*obj
, struct page
***pages
)
732 mutex_lock(&obj
->dev
->struct_mutex
);
733 ret
= get_pages(obj
, pages
);
734 mutex_unlock(&obj
->dev
->struct_mutex
);
738 /* release pages when DMA no longer being performed */
739 int omap_gem_put_pages(struct drm_gem_object
*obj
)
741 /* do something here if we dynamically attach/detach pages.. at
742 * least they would no longer need to be pinned if everyone has
743 * released the pages..
748 /* Get kernel virtual address for CPU access.. this more or less only
749 * exists for omap_fbdev. This should be called with struct_mutex
752 void *omap_gem_vaddr(struct drm_gem_object
*obj
)
754 struct omap_gem_object
*omap_obj
= to_omap_bo(obj
);
755 WARN_ON(! mutex_is_locked(&obj
->dev
->struct_mutex
));
756 if (!omap_obj
->vaddr
) {
758 int ret
= get_pages(obj
, &pages
);
761 omap_obj
->vaddr
= vmap(pages
, obj
->size
>> PAGE_SHIFT
,
762 VM_MAP
, pgprot_writecombine(PAGE_KERNEL
));
764 return omap_obj
->vaddr
;
767 /* Buffer Synchronization:
770 struct omap_gem_sync_waiter
{
771 struct list_head list
;
772 struct omap_gem_object
*omap_obj
;
774 uint32_t read_target
, write_target
;
775 /* notify called w/ sync_lock held */
776 void (*notify
)(void *arg
);
780 /* list of omap_gem_sync_waiter.. the notify fxn gets called back when
781 * the read and/or write target count is achieved which can call a user
782 * callback (ex. to kick 3d and/or 2d), wakeup blocked task (prep for
785 static LIST_HEAD(waiters
);
787 static inline bool is_waiting(struct omap_gem_sync_waiter
*waiter
)
789 struct omap_gem_object
*omap_obj
= waiter
->omap_obj
;
790 if ((waiter
->op
& OMAP_GEM_READ
) &&
791 (omap_obj
->sync
->read_complete
< waiter
->read_target
))
793 if ((waiter
->op
& OMAP_GEM_WRITE
) &&
794 (omap_obj
->sync
->write_complete
< waiter
->write_target
))
799 /* macro for sync debug.. */
801 #define SYNC(fmt, ...) do { if (SYNCDBG) \
802 printk(KERN_ERR "%s:%d: "fmt"\n", \
803 __func__, __LINE__, ##__VA_ARGS__); \
807 static void sync_op_update(void)
809 struct omap_gem_sync_waiter
*waiter
, *n
;
810 list_for_each_entry_safe(waiter
, n
, &waiters
, list
) {
811 if (!is_waiting(waiter
)) {
812 list_del(&waiter
->list
);
813 SYNC("notify: %p", waiter
);
814 waiter
->notify(waiter
->arg
);
820 static inline int sync_op(struct drm_gem_object
*obj
,
821 enum omap_gem_op op
, bool start
)
823 struct omap_gem_object
*omap_obj
= to_omap_bo(obj
);
826 spin_lock(&sync_lock
);
828 if (!omap_obj
->sync
) {
829 omap_obj
->sync
= kzalloc(sizeof(*omap_obj
->sync
), GFP_ATOMIC
);
830 if (!omap_obj
->sync
) {
837 if (op
& OMAP_GEM_READ
)
838 omap_obj
->sync
->read_pending
++;
839 if (op
& OMAP_GEM_WRITE
)
840 omap_obj
->sync
->write_pending
++;
842 if (op
& OMAP_GEM_READ
)
843 omap_obj
->sync
->read_complete
++;
844 if (op
& OMAP_GEM_WRITE
)
845 omap_obj
->sync
->write_complete
++;
850 spin_unlock(&sync_lock
);
855 /* it is a bit lame to handle updates in this sort of polling way, but
856 * in case of PVR, the GPU can directly update read/write complete
857 * values, and not really tell us which ones it updated.. this also
858 * means that sync_lock is not quite sufficient. So we'll need to
859 * do something a bit better when it comes time to add support for
862 void omap_gem_op_update(void)
864 spin_lock(&sync_lock
);
866 spin_unlock(&sync_lock
);
869 /* mark the start of read and/or write operation */
870 int omap_gem_op_start(struct drm_gem_object
*obj
, enum omap_gem_op op
)
872 return sync_op(obj
, op
, true);
875 int omap_gem_op_finish(struct drm_gem_object
*obj
, enum omap_gem_op op
)
877 return sync_op(obj
, op
, false);
880 static DECLARE_WAIT_QUEUE_HEAD(sync_event
);
882 static void sync_notify(void *arg
)
884 struct task_struct
**waiter_task
= arg
;
886 wake_up_all(&sync_event
);
889 int omap_gem_op_sync(struct drm_gem_object
*obj
, enum omap_gem_op op
)
891 struct omap_gem_object
*omap_obj
= to_omap_bo(obj
);
893 if (omap_obj
->sync
) {
894 struct task_struct
*waiter_task
= current
;
895 struct omap_gem_sync_waiter
*waiter
=
896 kzalloc(sizeof(*waiter
), GFP_KERNEL
);
902 waiter
->omap_obj
= omap_obj
;
904 waiter
->read_target
= omap_obj
->sync
->read_pending
;
905 waiter
->write_target
= omap_obj
->sync
->write_pending
;
906 waiter
->notify
= sync_notify
;
907 waiter
->arg
= &waiter_task
;
909 spin_lock(&sync_lock
);
910 if (is_waiting(waiter
)) {
911 SYNC("waited: %p", waiter
);
912 list_add_tail(&waiter
->list
, &waiters
);
913 spin_unlock(&sync_lock
);
914 ret
= wait_event_interruptible(sync_event
,
915 (waiter_task
== NULL
));
916 spin_lock(&sync_lock
);
918 SYNC("interrupted: %p", waiter
);
919 /* we were interrupted */
920 list_del(&waiter
->list
);
923 /* freed in sync_op_update() */
927 spin_unlock(&sync_lock
);
936 /* call fxn(arg), either synchronously or asynchronously if the op
937 * is currently blocked.. fxn() can be called from any context
939 * (TODO for now fxn is called back from whichever context calls
940 * omap_gem_op_update().. but this could be better defined later
943 * TODO more code in common w/ _sync()..
945 int omap_gem_op_async(struct drm_gem_object
*obj
, enum omap_gem_op op
,
946 void (*fxn
)(void *arg
), void *arg
)
948 struct omap_gem_object
*omap_obj
= to_omap_bo(obj
);
949 if (omap_obj
->sync
) {
950 struct omap_gem_sync_waiter
*waiter
=
951 kzalloc(sizeof(*waiter
), GFP_ATOMIC
);
957 waiter
->omap_obj
= omap_obj
;
959 waiter
->read_target
= omap_obj
->sync
->read_pending
;
960 waiter
->write_target
= omap_obj
->sync
->write_pending
;
961 waiter
->notify
= fxn
;
964 spin_lock(&sync_lock
);
965 if (is_waiting(waiter
)) {
966 SYNC("waited: %p", waiter
);
967 list_add_tail(&waiter
->list
, &waiters
);
968 spin_unlock(&sync_lock
);
972 spin_unlock(&sync_lock
);
981 /* special API so PVR can update the buffer to use a sync-object allocated
982 * from it's sync-obj heap. Only used for a newly allocated (from PVR's
983 * perspective) sync-object, so we overwrite the new syncobj w/ values
984 * from the already allocated syncobj (if there is one)
986 int omap_gem_set_sync_object(struct drm_gem_object
*obj
, void *syncobj
)
988 struct omap_gem_object
*omap_obj
= to_omap_bo(obj
);
991 spin_lock(&sync_lock
);
993 if ((omap_obj
->flags
& OMAP_BO_EXT_SYNC
) && !syncobj
) {
994 /* clearing a previously set syncobj */
995 syncobj
= kzalloc(sizeof(*omap_obj
->sync
), GFP_ATOMIC
);
1000 memcpy(syncobj
, omap_obj
->sync
, sizeof(*omap_obj
->sync
));
1001 omap_obj
->flags
&= ~OMAP_BO_EXT_SYNC
;
1002 omap_obj
->sync
= syncobj
;
1003 } else if (syncobj
&& !(omap_obj
->flags
& OMAP_BO_EXT_SYNC
)) {
1004 /* replacing an existing syncobj */
1005 if (omap_obj
->sync
) {
1006 memcpy(syncobj
, omap_obj
->sync
, sizeof(*omap_obj
->sync
));
1007 kfree(omap_obj
->sync
);
1009 omap_obj
->flags
|= OMAP_BO_EXT_SYNC
;
1010 omap_obj
->sync
= syncobj
;
1014 spin_unlock(&sync_lock
);
1018 int omap_gem_init_object(struct drm_gem_object
*obj
)
1020 return -EINVAL
; /* unused */
1023 /* don't call directly.. called from GEM core when it is time to actually
1026 void omap_gem_free_object(struct drm_gem_object
*obj
)
1028 struct drm_device
*dev
= obj
->dev
;
1029 struct omap_gem_object
*omap_obj
= to_omap_bo(obj
);
1033 if (obj
->map_list
.map
) {
1034 drm_gem_free_mmap_offset(obj
);
1037 /* don't free externally allocated backing memory */
1038 if (!(omap_obj
->flags
& OMAP_BO_EXT_MEM
)) {
1039 if (omap_obj
->pages
) {
1040 omap_gem_detach_pages(obj
);
1042 if (!is_shmem(obj
)) {
1043 dma_free_writecombine(dev
->dev
, obj
->size
,
1044 omap_obj
->vaddr
, omap_obj
->paddr
);
1045 } else if (omap_obj
->vaddr
) {
1046 vunmap(omap_obj
->vaddr
);
1050 /* don't free externally allocated syncobj */
1051 if (!(omap_obj
->flags
& OMAP_BO_EXT_SYNC
)) {
1052 kfree(omap_obj
->sync
);
1055 drm_gem_object_release(obj
);
1060 /* convenience method to construct a GEM buffer object, and userspace handle */
1061 int omap_gem_new_handle(struct drm_device
*dev
, struct drm_file
*file
,
1062 union omap_gem_size gsize
, uint32_t flags
, uint32_t *handle
)
1064 struct drm_gem_object
*obj
;
1067 obj
= omap_gem_new(dev
, gsize
, flags
);
1071 ret
= drm_gem_handle_create(file
, obj
, handle
);
1073 drm_gem_object_release(obj
);
1074 kfree(obj
); /* TODO isn't there a dtor to call? just copying i915 */
1078 /* drop reference from allocate - handle holds it now */
1079 drm_gem_object_unreference_unlocked(obj
);
1084 /* GEM buffer object constructor */
1085 struct drm_gem_object
*omap_gem_new(struct drm_device
*dev
,
1086 union omap_gem_size gsize
, uint32_t flags
)
1088 struct omap_drm_private
*priv
= dev
->dev_private
;
1089 struct omap_gem_object
*omap_obj
;
1090 struct drm_gem_object
*obj
= NULL
;
1094 if (flags
& OMAP_BO_TILED
) {
1096 dev_err(dev
->dev
, "Tiled buffers require DMM\n");
1100 /* tiled buffers are always shmem paged backed.. when they are
1101 * scanned out, they are remapped into DMM/TILER
1103 flags
&= ~OMAP_BO_SCANOUT
;
1105 /* currently don't allow cached buffers.. there is some caching
1106 * stuff that needs to be handled better
1108 flags
&= ~(OMAP_BO_CACHED
|OMAP_BO_UNCACHED
);
1109 flags
|= OMAP_BO_WC
;
1111 /* align dimensions to slot boundaries... */
1112 tiler_align(gem2fmt(flags
),
1113 &gsize
.tiled
.width
, &gsize
.tiled
.height
);
1115 /* ...and calculate size based on aligned dimensions */
1116 size
= tiler_size(gem2fmt(flags
),
1117 gsize
.tiled
.width
, gsize
.tiled
.height
);
1119 size
= PAGE_ALIGN(gsize
.bytes
);
1122 omap_obj
= kzalloc(sizeof(*omap_obj
), GFP_KERNEL
);
1124 dev_err(dev
->dev
, "could not allocate GEM object\n");
1128 obj
= &omap_obj
->base
;
1130 if ((flags
& OMAP_BO_SCANOUT
) && !priv
->has_dmm
) {
1131 /* attempt to allocate contiguous memory if we don't
1132 * have DMM for remappign discontiguous buffers
1134 omap_obj
->vaddr
= dma_alloc_writecombine(dev
->dev
, size
,
1135 &omap_obj
->paddr
, GFP_KERNEL
);
1136 if (omap_obj
->vaddr
) {
1137 flags
|= OMAP_BO_DMA
;
1141 omap_obj
->flags
= flags
;
1143 if (flags
& OMAP_BO_TILED
) {
1144 omap_obj
->width
= gsize
.tiled
.width
;
1145 omap_obj
->height
= gsize
.tiled
.height
;
1148 if (flags
& (OMAP_BO_DMA
|OMAP_BO_EXT_MEM
)) {
1149 ret
= drm_gem_private_object_init(dev
, obj
, size
);
1151 ret
= drm_gem_object_init(dev
, obj
, size
);
1162 omap_gem_free_object(obj
);
1167 /* init/cleanup.. if DMM is used, we need to set some stuff up.. */
1168 void omap_gem_init(struct drm_device
*dev
)
1170 struct omap_drm_private
*priv
= dev
->dev_private
;
1171 const enum tiler_fmt fmts
[] = {
1172 TILFMT_8BIT
, TILFMT_16BIT
, TILFMT_32BIT
1176 ret
= omap_dmm_init(dev
);
1178 /* DMM only supported on OMAP4 and later, so this isn't fatal */
1179 dev_warn(dev
->dev
, "omap_dmm_init failed, disabling DMM\n");
1183 usergart
= kzalloc(3 * sizeof(*usergart
), GFP_KERNEL
);
1185 dev_warn(dev
->dev
, "could not allocate usergart\n");
1189 /* reserve 4k aligned/wide regions for userspace mappings: */
1190 for (i
= 0; i
< ARRAY_SIZE(fmts
); i
++) {
1191 uint16_t h
= 1, w
= PAGE_SIZE
>> i
;
1192 tiler_align(fmts
[i
], &w
, &h
);
1193 /* note: since each region is 1 4kb page wide, and minimum
1194 * number of rows, the height ends up being the same as the
1195 * # of pages in the region
1197 usergart
[i
].height
= h
;
1198 usergart
[i
].height_shift
= ilog2(h
);
1199 usergart
[i
].stride_pfn
= tiler_stride(fmts
[i
]) >> PAGE_SHIFT
;
1200 usergart
[i
].slot_shift
= ilog2((PAGE_SIZE
/ h
) >> i
);
1201 for (j
= 0; j
< NUM_USERGART_ENTRIES
; j
++) {
1202 struct usergart_entry
*entry
= &usergart
[i
].entry
[j
];
1203 struct tiler_block
*block
=
1204 tiler_reserve_2d(fmts
[i
], w
, h
,
1206 if (IS_ERR(block
)) {
1208 "reserve failed: %d, %d, %ld\n",
1209 i
, j
, PTR_ERR(block
));
1212 entry
->paddr
= tiler_ssptr(block
);
1213 entry
->block
= block
;
1215 DBG("%d:%d: %dx%d: paddr=%08x stride=%d", i
, j
, w
, h
,
1217 usergart
[i
].stride_pfn
<< PAGE_SHIFT
);
1221 priv
->has_dmm
= true;
1224 void omap_gem_deinit(struct drm_device
*dev
)
1226 /* I believe we can rely on there being no more outstanding GEM
1227 * objects which could depend on usergart/dmm at this point.