]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/staging/omapdrm/omap_gem.c
drm/omap: DMM/TILER support for OMAP4+ platform
[mirror_ubuntu-artful-kernel.git] / drivers / staging / omapdrm / omap_gem.c
CommitLineData
cd5351f4
RC
1/*
2 * drivers/staging/omapdrm/omap_gem.c
3 *
4 * Copyright (C) 2011 Texas Instruments
5 * Author: Rob Clark <rob.clark@linaro.org>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as published by
9 * the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20
21#include <linux/spinlock.h>
22#include <linux/shmem_fs.h>
23
24#include "omap_drv.h"
25
26/* remove these once drm core helpers are merged */
27struct page ** _drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask);
28void _drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
29 bool dirty, bool accessed);
30
31/*
32 * GEM buffer object implementation.
33 */
34
35#define to_omap_bo(x) container_of(x, struct omap_gem_object, base)
36
37/* note: we use upper 8 bits of flags for driver-internal flags: */
38#define OMAP_BO_DMA 0x01000000 /* actually is physically contiguous */
39#define OMAP_BO_EXT_SYNC 0x02000000 /* externally allocated sync object */
40#define OMAP_BO_EXT_MEM 0x04000000 /* externally allocated memory */
41
42
43struct omap_gem_object {
44 struct drm_gem_object base;
45
46 uint32_t flags;
47
48 /**
49 * If buffer is allocated physically contiguous, the OMAP_BO_DMA flag
50 * is set and the paddr is valid.
51 *
52 * Note that OMAP_BO_SCANOUT is a hint from userspace that DMA capable
53 * buffer is requested, but doesn't mean that it is. Use the
54 * OMAP_BO_DMA flag to determine if the buffer has a DMA capable
55 * physical address.
56 */
57 dma_addr_t paddr;
58
59 /**
60 * Array of backing pages, if allocated. Note that pages are never
61 * allocated for buffers originally allocated from contiguous memory
62 */
63 struct page **pages;
64
65 /**
66 * Virtual address, if mapped.
67 */
68 void *vaddr;
69
70 /**
71 * sync-object allocated on demand (if needed)
72 *
73 * Per-buffer sync-object for tracking pending and completed hw/dma
74 * read and write operations. The layout in memory is dictated by
75 * the SGX firmware, which uses this information to stall the command
76 * stream if a surface is not ready yet.
77 *
78 * Note that when buffer is used by SGX, the sync-object needs to be
79 * allocated from a special heap of sync-objects. This way many sync
80 * objects can be packed in a page, and not waste GPU virtual address
81 * space. Because of this we have to have a omap_gem_set_sync_object()
82 * API to allow replacement of the syncobj after it has (potentially)
83 * already been allocated. A bit ugly but I haven't thought of a
84 * better alternative.
85 */
86 struct {
87 uint32_t write_pending;
88 uint32_t write_complete;
89 uint32_t read_pending;
90 uint32_t read_complete;
91 } *sync;
92};
93
94/* GEM objects can either be allocated from contiguous memory (in which
95 * case obj->filp==NULL), or w/ shmem backing (obj->filp!=NULL). But non
96 * contiguous buffers can be remapped in TILER/DMM if they need to be
97 * contiguous... but we don't do this all the time to reduce pressure
98 * on TILER/DMM space when we know at allocation time that the buffer
99 * will need to be scanned out.
100 */
101static inline bool is_shmem(struct drm_gem_object *obj)
102{
103 return obj->filp != NULL;
104}
105
106static int get_pages(struct drm_gem_object *obj, struct page ***pages);
107
108static DEFINE_SPINLOCK(sync_lock);
109
110/** ensure backing pages are allocated */
111static int omap_gem_attach_pages(struct drm_gem_object *obj)
112{
113 struct omap_gem_object *omap_obj = to_omap_bo(obj);
114 struct page **pages;
115
116 WARN_ON(omap_obj->pages);
117
118 /* TODO: __GFP_DMA32 .. but somehow GFP_HIGHMEM is coming from the
119 * mapping_gfp_mask(mapping) which conflicts w/ GFP_DMA32.. probably
120 * we actually want CMA memory for it all anyways..
121 */
122 pages = _drm_gem_get_pages(obj, GFP_KERNEL);
123 if (IS_ERR(pages)) {
124 dev_err(obj->dev->dev, "could not get pages: %ld\n", PTR_ERR(pages));
125 return PTR_ERR(pages);
126 }
127
128 omap_obj->pages = pages;
129 return 0;
130}
131
132/** release backing pages */
133static void omap_gem_detach_pages(struct drm_gem_object *obj)
134{
135 struct omap_gem_object *omap_obj = to_omap_bo(obj);
136 _drm_gem_put_pages(obj, omap_obj->pages, true, false);
137 omap_obj->pages = NULL;
138}
139
140/** get mmap offset */
141uint64_t omap_gem_mmap_offset(struct drm_gem_object *obj)
142{
143 if (!obj->map_list.map) {
144 /* Make it mmapable */
145 int ret = drm_gem_create_mmap_offset(obj);
146 if (ret) {
147 dev_err(obj->dev->dev, "could not allocate mmap offset");
148 return 0;
149 }
150 }
151
152 return (uint64_t)obj->map_list.hash.key << PAGE_SHIFT;
153}
154
155/**
156 * omap_gem_fault - pagefault handler for GEM objects
157 * @vma: the VMA of the GEM object
158 * @vmf: fault detail
159 *
160 * Invoked when a fault occurs on an mmap of a GEM managed area. GEM
161 * does most of the work for us including the actual map/unmap calls
162 * but we need to do the actual page work.
163 *
164 * The VMA was set up by GEM. In doing so it also ensured that the
165 * vma->vm_private_data points to the GEM object that is backing this
166 * mapping.
167 */
168int omap_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
169{
170 struct drm_gem_object *obj = vma->vm_private_data;
171 struct omap_gem_object *omap_obj = to_omap_bo(obj);
172 struct drm_device *dev = obj->dev;
173 struct page **pages;
174 unsigned long pfn;
175 pgoff_t pgoff;
176 int ret;
177
178 /* Make sure we don't parallel update on a fault, nor move or remove
179 * something from beneath our feet
180 */
181 mutex_lock(&dev->struct_mutex);
182
183 /* if a shmem backed object, make sure we have pages attached now */
184 ret = get_pages(obj, &pages);
185 if (ret) {
186 goto fail;
187 }
188
189 /* where should we do corresponding put_pages().. we are mapping
190 * the original page, rather than thru a GART, so we can't rely
191 * on eviction to trigger this. But munmap() or all mappings should
192 * probably trigger put_pages()?
193 */
194
195 /* We don't use vmf->pgoff since that has the fake offset: */
196 pgoff = ((unsigned long)vmf->virtual_address -
197 vma->vm_start) >> PAGE_SHIFT;
198
199 if (omap_obj->pages) {
200 pfn = page_to_pfn(omap_obj->pages[pgoff]);
201 } else {
202 BUG_ON(!(omap_obj->flags & OMAP_BO_DMA));
203 pfn = (omap_obj->paddr >> PAGE_SHIFT) + pgoff;
204 }
205
206 VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
207 pfn, pfn << PAGE_SHIFT);
208
209 ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn);
210
211fail:
212 mutex_unlock(&dev->struct_mutex);
213 switch (ret) {
214 case 0:
215 case -ERESTARTSYS:
216 case -EINTR:
217 return VM_FAULT_NOPAGE;
218 case -ENOMEM:
219 return VM_FAULT_OOM;
220 default:
221 return VM_FAULT_SIGBUS;
222 }
223}
224
225/** We override mainly to fix up some of the vm mapping flags.. */
226int omap_gem_mmap(struct file *filp, struct vm_area_struct *vma)
227{
228 struct omap_gem_object *omap_obj;
229 int ret;
230
231 ret = drm_gem_mmap(filp, vma);
232 if (ret) {
233 DBG("mmap failed: %d", ret);
234 return ret;
235 }
236
237 /* after drm_gem_mmap(), it is safe to access the obj */
238 omap_obj = to_omap_bo(vma->vm_private_data);
239
240 vma->vm_flags &= ~VM_PFNMAP;
241 vma->vm_flags |= VM_MIXEDMAP;
242
243 if (omap_obj->flags & OMAP_BO_WC) {
244 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
245 } else if (omap_obj->flags & OMAP_BO_UNCACHED) {
246 vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
247 } else {
248 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
249 }
250
251 return ret;
252}
253
254/**
255 * omap_gem_dumb_create - create a dumb buffer
256 * @drm_file: our client file
257 * @dev: our device
258 * @args: the requested arguments copied from userspace
259 *
260 * Allocate a buffer suitable for use for a frame buffer of the
261 * form described by user space. Give userspace a handle by which
262 * to reference it.
263 */
264int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
265 struct drm_mode_create_dumb *args)
266{
267 union omap_gem_size gsize;
268
269 /* in case someone tries to feed us a completely bogus stride: */
270 args->pitch = align_pitch(args->pitch, args->width, args->bpp);
271 args->size = PAGE_ALIGN(args->pitch * args->height);
272
273 gsize = (union omap_gem_size){
274 .bytes = args->size,
275 };
276
277 return omap_gem_new_handle(dev, file, gsize,
278 OMAP_BO_SCANOUT | OMAP_BO_WC, &args->handle);
279}
280
281/**
282 * omap_gem_dumb_destroy - destroy a dumb buffer
283 * @file: client file
284 * @dev: our DRM device
285 * @handle: the object handle
286 *
287 * Destroy a handle that was created via omap_gem_dumb_create.
288 */
289int omap_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
290 uint32_t handle)
291{
292 /* No special work needed, drop the reference and see what falls out */
293 return drm_gem_handle_delete(file, handle);
294}
295
296/**
297 * omap_gem_dumb_map - buffer mapping for dumb interface
298 * @file: our drm client file
299 * @dev: drm device
300 * @handle: GEM handle to the object (from dumb_create)
301 *
302 * Do the necessary setup to allow the mapping of the frame buffer
303 * into user memory. We don't have to do much here at the moment.
304 */
305int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
306 uint32_t handle, uint64_t *offset)
307{
308 struct drm_gem_object *obj;
309 int ret = 0;
310
311 mutex_lock(&dev->struct_mutex);
312
313 /* GEM does all our handle to object mapping */
314 obj = drm_gem_object_lookup(dev, file, handle);
315 if (obj == NULL) {
316 ret = -ENOENT;
317 goto fail;
318 }
319
320 *offset = omap_gem_mmap_offset(obj);
321
322 drm_gem_object_unreference_unlocked(obj);
323
324fail:
325 mutex_unlock(&dev->struct_mutex);
326 return ret;
327}
328
329/* Get physical address for DMA.. if 'remap' is true, and the buffer is not
330 * already contiguous, remap it to pin in physically contiguous memory.. (ie.
331 * map in TILER)
332 */
333int omap_gem_get_paddr(struct drm_gem_object *obj,
334 dma_addr_t *paddr, bool remap)
335{
336 struct omap_gem_object *omap_obj = to_omap_bo(obj);
337 int ret = 0;
338
339 if (is_shmem(obj)) {
340 /* TODO: remap to TILER */
341 return -ENOMEM;
342 }
343
344 *paddr = omap_obj->paddr;
345
346 return ret;
347}
348
349/* Release physical address, when DMA is no longer being performed.. this
350 * could potentially unpin and unmap buffers from TILER
351 */
352int omap_gem_put_paddr(struct drm_gem_object *obj)
353{
354 /* do something here when remap to TILER is used.. */
355 return 0;
356}
357
358/* acquire pages when needed (for example, for DMA where physically
359 * contiguous buffer is not required
360 */
361static int get_pages(struct drm_gem_object *obj, struct page ***pages)
362{
363 struct omap_gem_object *omap_obj = to_omap_bo(obj);
364 int ret = 0;
365
366 if (is_shmem(obj) && !omap_obj->pages) {
367 ret = omap_gem_attach_pages(obj);
368 if (ret) {
369 dev_err(obj->dev->dev, "could not attach pages\n");
370 return ret;
371 }
372 }
373
374 /* TODO: even phys-contig.. we should have a list of pages? */
375 *pages = omap_obj->pages;
376
377 return 0;
378}
379
380int omap_gem_get_pages(struct drm_gem_object *obj, struct page ***pages)
381{
382 int ret;
383 mutex_lock(&obj->dev->struct_mutex);
384 ret = get_pages(obj, pages);
385 mutex_unlock(&obj->dev->struct_mutex);
386 return ret;
387}
388
389/* release pages when DMA no longer being performed */
390int omap_gem_put_pages(struct drm_gem_object *obj)
391{
392 /* do something here if we dynamically attach/detach pages.. at
393 * least they would no longer need to be pinned if everyone has
394 * released the pages..
395 */
396 return 0;
397}
398
399/* Get kernel virtual address for CPU access.. only buffers that are
400 * allocated contiguously have a kernel virtual address, so this more
401 * or less only exists for omap_fbdev
402 */
403void *omap_gem_vaddr(struct drm_gem_object *obj)
404{
405 struct omap_gem_object *omap_obj = to_omap_bo(obj);
406 return omap_obj->vaddr;
407}
408
409/* Buffer Synchronization:
410 */
411
412struct omap_gem_sync_waiter {
413 struct list_head list;
414 struct omap_gem_object *omap_obj;
415 enum omap_gem_op op;
416 uint32_t read_target, write_target;
417 /* notify called w/ sync_lock held */
418 void (*notify)(void *arg);
419 void *arg;
420};
421
422/* list of omap_gem_sync_waiter.. the notify fxn gets called back when
423 * the read and/or write target count is achieved which can call a user
424 * callback (ex. to kick 3d and/or 2d), wakeup blocked task (prep for
425 * cpu access), etc.
426 */
427static LIST_HEAD(waiters);
428
429static inline bool is_waiting(struct omap_gem_sync_waiter *waiter)
430{
431 struct omap_gem_object *omap_obj = waiter->omap_obj;
432 if ((waiter->op & OMAP_GEM_READ) &&
433 (omap_obj->sync->read_complete < waiter->read_target))
434 return true;
435 if ((waiter->op & OMAP_GEM_WRITE) &&
436 (omap_obj->sync->write_complete < waiter->write_target))
437 return true;
438 return false;
439}
440
441/* macro for sync debug.. */
442#define SYNCDBG 0
443#define SYNC(fmt, ...) do { if (SYNCDBG) \
444 printk(KERN_ERR "%s:%d: "fmt"\n", \
445 __func__, __LINE__, ##__VA_ARGS__); \
446 } while (0)
447
448
449static void sync_op_update(void)
450{
451 struct omap_gem_sync_waiter *waiter, *n;
452 list_for_each_entry_safe(waiter, n, &waiters, list) {
453 if (!is_waiting(waiter)) {
454 list_del(&waiter->list);
455 SYNC("notify: %p", waiter);
456 waiter->notify(waiter->arg);
457 kfree(waiter);
458 }
459 }
460}
461
462static inline int sync_op(struct drm_gem_object *obj,
463 enum omap_gem_op op, bool start)
464{
465 struct omap_gem_object *omap_obj = to_omap_bo(obj);
466 int ret = 0;
467
468 spin_lock(&sync_lock);
469
470 if (!omap_obj->sync) {
471 omap_obj->sync = kzalloc(sizeof(*omap_obj->sync), GFP_ATOMIC);
472 if (!omap_obj->sync) {
473 ret = -ENOMEM;
474 goto unlock;
475 }
476 }
477
478 if (start) {
479 if (op & OMAP_GEM_READ)
480 omap_obj->sync->read_pending++;
481 if (op & OMAP_GEM_WRITE)
482 omap_obj->sync->write_pending++;
483 } else {
484 if (op & OMAP_GEM_READ)
485 omap_obj->sync->read_complete++;
486 if (op & OMAP_GEM_WRITE)
487 omap_obj->sync->write_complete++;
488 sync_op_update();
489 }
490
491unlock:
492 spin_unlock(&sync_lock);
493
494 return ret;
495}
496
497/* it is a bit lame to handle updates in this sort of polling way, but
498 * in case of PVR, the GPU can directly update read/write complete
499 * values, and not really tell us which ones it updated.. this also
500 * means that sync_lock is not quite sufficient. So we'll need to
501 * do something a bit better when it comes time to add support for
502 * separate 2d hw..
503 */
504void omap_gem_op_update(void)
505{
506 spin_lock(&sync_lock);
507 sync_op_update();
508 spin_unlock(&sync_lock);
509}
510
511/* mark the start of read and/or write operation */
512int omap_gem_op_start(struct drm_gem_object *obj, enum omap_gem_op op)
513{
514 return sync_op(obj, op, true);
515}
516
517int omap_gem_op_finish(struct drm_gem_object *obj, enum omap_gem_op op)
518{
519 return sync_op(obj, op, false);
520}
521
522static DECLARE_WAIT_QUEUE_HEAD(sync_event);
523
524static void sync_notify(void *arg)
525{
526 struct task_struct **waiter_task = arg;
527 *waiter_task = NULL;
528 wake_up_all(&sync_event);
529}
530
531int omap_gem_op_sync(struct drm_gem_object *obj, enum omap_gem_op op)
532{
533 struct omap_gem_object *omap_obj = to_omap_bo(obj);
534 int ret = 0;
535 if (omap_obj->sync) {
536 struct task_struct *waiter_task = current;
537 struct omap_gem_sync_waiter *waiter =
538 kzalloc(sizeof(*waiter), GFP_KERNEL);
539
540 if (!waiter) {
541 return -ENOMEM;
542 }
543
544 waiter->omap_obj = omap_obj;
545 waiter->op = op;
546 waiter->read_target = omap_obj->sync->read_pending;
547 waiter->write_target = omap_obj->sync->write_pending;
548 waiter->notify = sync_notify;
549 waiter->arg = &waiter_task;
550
551 spin_lock(&sync_lock);
552 if (is_waiting(waiter)) {
553 SYNC("waited: %p", waiter);
554 list_add_tail(&waiter->list, &waiters);
555 spin_unlock(&sync_lock);
556 ret = wait_event_interruptible(sync_event,
557 (waiter_task == NULL));
558 spin_lock(&sync_lock);
559 if (waiter_task) {
560 SYNC("interrupted: %p", waiter);
561 /* we were interrupted */
562 list_del(&waiter->list);
563 waiter_task = NULL;
564 } else {
565 /* freed in sync_op_update() */
566 waiter = NULL;
567 }
568 }
569 spin_unlock(&sync_lock);
570
571 if (waiter) {
572 kfree(waiter);
573 }
574 }
575 return ret;
576}
577
578/* call fxn(arg), either synchronously or asynchronously if the op
579 * is currently blocked.. fxn() can be called from any context
580 *
581 * (TODO for now fxn is called back from whichever context calls
582 * omap_gem_op_update().. but this could be better defined later
583 * if needed)
584 *
585 * TODO more code in common w/ _sync()..
586 */
587int omap_gem_op_async(struct drm_gem_object *obj, enum omap_gem_op op,
588 void (*fxn)(void *arg), void *arg)
589{
590 struct omap_gem_object *omap_obj = to_omap_bo(obj);
591 if (omap_obj->sync) {
592 struct omap_gem_sync_waiter *waiter =
593 kzalloc(sizeof(*waiter), GFP_ATOMIC);
594
595 if (!waiter) {
596 return -ENOMEM;
597 }
598
599 waiter->omap_obj = omap_obj;
600 waiter->op = op;
601 waiter->read_target = omap_obj->sync->read_pending;
602 waiter->write_target = omap_obj->sync->write_pending;
603 waiter->notify = fxn;
604 waiter->arg = arg;
605
606 spin_lock(&sync_lock);
607 if (is_waiting(waiter)) {
608 SYNC("waited: %p", waiter);
609 list_add_tail(&waiter->list, &waiters);
610 spin_unlock(&sync_lock);
611 return 0;
612 }
613
614 spin_unlock(&sync_lock);
615 }
616
617 /* no waiting.. */
618 fxn(arg);
619
620 return 0;
621}
622
623/* special API so PVR can update the buffer to use a sync-object allocated
624 * from it's sync-obj heap. Only used for a newly allocated (from PVR's
625 * perspective) sync-object, so we overwrite the new syncobj w/ values
626 * from the already allocated syncobj (if there is one)
627 */
628int omap_gem_set_sync_object(struct drm_gem_object *obj, void *syncobj)
629{
630 struct omap_gem_object *omap_obj = to_omap_bo(obj);
631 int ret = 0;
632
633 spin_lock(&sync_lock);
634
635 if ((omap_obj->flags & OMAP_BO_EXT_SYNC) && !syncobj) {
636 /* clearing a previously set syncobj */
637 syncobj = kzalloc(sizeof(*omap_obj->sync), GFP_ATOMIC);
638 if (!syncobj) {
639 ret = -ENOMEM;
640 goto unlock;
641 }
642 memcpy(syncobj, omap_obj->sync, sizeof(*omap_obj->sync));
643 omap_obj->flags &= ~OMAP_BO_EXT_SYNC;
644 omap_obj->sync = syncobj;
645 } else if (syncobj && !(omap_obj->flags & OMAP_BO_EXT_SYNC)) {
646 /* replacing an existing syncobj */
647 if (omap_obj->sync) {
648 memcpy(syncobj, omap_obj->sync, sizeof(*omap_obj->sync));
649 kfree(omap_obj->sync);
650 }
651 omap_obj->flags |= OMAP_BO_EXT_SYNC;
652 omap_obj->sync = syncobj;
653 }
654
655unlock:
656 spin_unlock(&sync_lock);
657 return ret;
658}
659
660int omap_gem_init_object(struct drm_gem_object *obj)
661{
662 return -EINVAL; /* unused */
663}
664
665/* don't call directly.. called from GEM core when it is time to actually
666 * free the object..
667 */
668void omap_gem_free_object(struct drm_gem_object *obj)
669{
670 struct drm_device *dev = obj->dev;
671 struct omap_gem_object *omap_obj = to_omap_bo(obj);
672
673 if (obj->map_list.map) {
674 drm_gem_free_mmap_offset(obj);
675 }
676
677 /* don't free externally allocated backing memory */
678 if (!(omap_obj->flags & OMAP_BO_EXT_MEM)) {
679 if (omap_obj->pages) {
680 omap_gem_detach_pages(obj);
681 }
682 if (!is_shmem(obj)) {
683 dma_free_writecombine(dev->dev, obj->size,
684 omap_obj->vaddr, omap_obj->paddr);
685 }
686 }
687
688 /* don't free externally allocated syncobj */
689 if (!(omap_obj->flags & OMAP_BO_EXT_SYNC)) {
690 kfree(omap_obj->sync);
691 }
692
693 drm_gem_object_release(obj);
694
695 kfree(obj);
696}
697
698/* convenience method to construct a GEM buffer object, and userspace handle */
699int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file,
700 union omap_gem_size gsize, uint32_t flags, uint32_t *handle)
701{
702 struct drm_gem_object *obj;
703 int ret;
704
705 obj = omap_gem_new(dev, gsize, flags);
706 if (!obj)
707 return -ENOMEM;
708
709 ret = drm_gem_handle_create(file, obj, handle);
710 if (ret) {
711 drm_gem_object_release(obj);
712 kfree(obj); /* TODO isn't there a dtor to call? just copying i915 */
713 return ret;
714 }
715
716 /* drop reference from allocate - handle holds it now */
717 drm_gem_object_unreference_unlocked(obj);
718
719 return 0;
720}
721
722/* GEM buffer object constructor */
723struct drm_gem_object *omap_gem_new(struct drm_device *dev,
724 union omap_gem_size gsize, uint32_t flags)
725{
726 struct omap_gem_object *omap_obj;
727 struct drm_gem_object *obj = NULL;
728 size_t size;
729 int ret;
730
731 if (flags & OMAP_BO_TILED) {
732 /* TODO: not implemented yet */
733 goto fail;
734 }
735
736 size = PAGE_ALIGN(gsize.bytes);
737
738 omap_obj = kzalloc(sizeof(*omap_obj), GFP_KERNEL);
739 if (!omap_obj) {
740 dev_err(dev->dev, "could not allocate GEM object\n");
741 goto fail;
742 }
743
744 obj = &omap_obj->base;
745
746 if (flags & OMAP_BO_SCANOUT) {
747 /* attempt to allocate contiguous memory */
748 omap_obj->vaddr = dma_alloc_writecombine(dev->dev, size,
749 &omap_obj->paddr, GFP_KERNEL);
750 if (omap_obj->vaddr) {
751 flags |= OMAP_BO_DMA;
752 }
753 }
754
755 omap_obj->flags = flags;
756
757 if (flags & (OMAP_BO_DMA|OMAP_BO_EXT_MEM)) {
758 ret = drm_gem_private_object_init(dev, obj, size);
759 } else {
760 ret = drm_gem_object_init(dev, obj, size);
761 }
762
763 if (ret) {
764 goto fail;
765 }
766
767 return obj;
768
769fail:
770 if (obj) {
771 omap_gem_free_object(obj);
772 }
773 return NULL;
774}