2 * drivers/gpu/drm/omapdrm/omap_gem.c
4 * Copyright (C) 2011 Texas Instruments
5 * Author: Rob Clark <rob.clark@linaro.org>
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as published by
9 * the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
20 #include <linux/seq_file.h>
21 #include <linux/shmem_fs.h>
22 #include <linux/spinlock.h>
23 #include <linux/pfn_t.h>
25 #include <drm/drm_vma_manager.h>
28 #include "omap_dmm_tiler.h"
31 * GEM buffer object implementation.
34 /* note: we use upper 8 bits of flags for driver-internal flags: */
35 #define OMAP_BO_MEM_DMA_API 0x01000000 /* memory allocated with the dma_alloc_* API */
36 #define OMAP_BO_MEM_SHMEM 0x02000000 /* memory allocated through shmem backing */
37 #define OMAP_BO_MEM_DMABUF 0x08000000 /* memory imported from a dmabuf */
39 struct omap_gem_object
{
40 struct drm_gem_object base
;
42 struct list_head mm_list
;
46 /** width/height for tiled formats (rounded up to slot boundaries) */
47 uint16_t width
, height
;
49 /** roll applied when mapping to DMM */
53 * dma_addr contains the buffer DMA address. It is valid for
55 * - buffers allocated through the DMA mapping API (with the
56 * OMAP_BO_MEM_DMA_API flag set)
58 * - buffers imported from dmabuf (with the OMAP_BO_MEM_DMABUF flag set)
59 * if they are physically contiguous (when sgt->orig_nents == 1)
61 * - buffers mapped through the TILER when dma_addr_cnt is not zero, in
62 * which case the DMA address points to the TILER aperture
64 * Physically contiguous buffers have their DMA address equal to the
65 * physical address as we don't remap those buffers through the TILER.
67 * Buffers mapped to the TILER have their DMA address pointing to the
68 * TILER aperture. As TILER mappings are refcounted (through
69 * dma_addr_cnt) the DMA address must be accessed through omap_gem_pin()
70 * to ensure that the mapping won't disappear unexpectedly. References
71 * must be released with omap_gem_unpin().
76 * # of users of dma_addr
78 uint32_t dma_addr_cnt
;
81 * If the buffer has been imported from a dmabuf the OMAP_DB_DMABUF flag
82 * is set and the sgt field is valid.
87 * tiler block used when buffer is remapped in DMM/TILER.
89 struct tiler_block
*block
;
92 * Array of backing pages, if allocated. Note that pages are never
93 * allocated for buffers originally allocated from contiguous memory
97 /** addresses corresponding to pages in above array */
98 dma_addr_t
*dma_addrs
;
101 * Virtual address, if mapped.
106 #define to_omap_bo(x) container_of(x, struct omap_gem_object, base)
108 /* To deal with userspace mmap'ings of 2d tiled buffers, which (a) are
109 * not necessarily pinned in TILER all the time, and (b) when they are
110 * they are not necessarily page aligned, we reserve one or more small
111 * regions in each of the 2d containers to use as a user-GART where we
112 * can create a second page-aligned mapping of parts of the buffer
113 * being accessed from userspace.
115 * Note that we could optimize slightly when we know that multiple
116 * tiler containers are backed by the same PAT.. but I'll leave that
119 #define NUM_USERGART_ENTRIES 2
120 struct omap_drm_usergart_entry
{
121 struct tiler_block
*block
; /* the reserved tiler block */
123 struct drm_gem_object
*obj
; /* the current pinned obj */
124 pgoff_t obj_pgoff
; /* page offset of obj currently
128 struct omap_drm_usergart
{
129 struct omap_drm_usergart_entry entry
[NUM_USERGART_ENTRIES
];
130 int height
; /* height in rows */
131 int height_shift
; /* ilog2(height in rows) */
132 int slot_shift
; /* ilog2(width per slot) */
133 int stride_pfn
; /* stride in pages */
134 int last
; /* index of last used entry */
137 /* -----------------------------------------------------------------------------
141 /** get mmap offset */
142 static uint64_t mmap_offset(struct drm_gem_object
*obj
)
144 struct drm_device
*dev
= obj
->dev
;
148 WARN_ON(!mutex_is_locked(&dev
->struct_mutex
));
150 /* Make it mmapable */
151 size
= omap_gem_mmap_size(obj
);
152 ret
= drm_gem_create_mmap_offset_size(obj
, size
);
154 dev_err(dev
->dev
, "could not allocate mmap offset\n");
158 return drm_vma_node_offset_addr(&obj
->vma_node
);
161 static bool is_contiguous(struct omap_gem_object
*omap_obj
)
163 if (omap_obj
->flags
& OMAP_BO_MEM_DMA_API
)
166 if ((omap_obj
->flags
& OMAP_BO_MEM_DMABUF
) && omap_obj
->sgt
->nents
== 1)
172 /* -----------------------------------------------------------------------------
176 static void evict_entry(struct drm_gem_object
*obj
,
177 enum tiler_fmt fmt
, struct omap_drm_usergart_entry
*entry
)
179 struct omap_gem_object
*omap_obj
= to_omap_bo(obj
);
180 struct omap_drm_private
*priv
= obj
->dev
->dev_private
;
181 int n
= priv
->usergart
[fmt
].height
;
182 size_t size
= PAGE_SIZE
* n
;
183 loff_t off
= mmap_offset(obj
) +
184 (entry
->obj_pgoff
<< PAGE_SHIFT
);
185 const int m
= DIV_ROUND_UP(omap_obj
->width
<< fmt
, PAGE_SIZE
);
189 /* if stride > than PAGE_SIZE then sparse mapping: */
190 for (i
= n
; i
> 0; i
--) {
191 unmap_mapping_range(obj
->dev
->anon_inode
->i_mapping
,
193 off
+= PAGE_SIZE
* m
;
196 unmap_mapping_range(obj
->dev
->anon_inode
->i_mapping
,
203 /* Evict a buffer from usergart, if it is mapped there */
204 static void evict(struct drm_gem_object
*obj
)
206 struct omap_gem_object
*omap_obj
= to_omap_bo(obj
);
207 struct omap_drm_private
*priv
= obj
->dev
->dev_private
;
209 if (omap_obj
->flags
& OMAP_BO_TILED
) {
210 enum tiler_fmt fmt
= gem2fmt(omap_obj
->flags
);
213 for (i
= 0; i
< NUM_USERGART_ENTRIES
; i
++) {
214 struct omap_drm_usergart_entry
*entry
=
215 &priv
->usergart
[fmt
].entry
[i
];
217 if (entry
->obj
== obj
)
218 evict_entry(obj
, fmt
, entry
);
223 /* -----------------------------------------------------------------------------
227 /** ensure backing pages are allocated */
228 static int omap_gem_attach_pages(struct drm_gem_object
*obj
)
230 struct drm_device
*dev
= obj
->dev
;
231 struct omap_gem_object
*omap_obj
= to_omap_bo(obj
);
233 int npages
= obj
->size
>> PAGE_SHIFT
;
237 WARN_ON(omap_obj
->pages
);
239 pages
= drm_gem_get_pages(obj
);
241 dev_err(obj
->dev
->dev
, "could not get pages: %ld\n", PTR_ERR(pages
));
242 return PTR_ERR(pages
);
245 /* for non-cached buffers, ensure the new pages are clean because
246 * DSS, GPU, etc. are not cache coherent:
248 if (omap_obj
->flags
& (OMAP_BO_WC
|OMAP_BO_UNCACHED
)) {
249 addrs
= kmalloc(npages
* sizeof(*addrs
), GFP_KERNEL
);
255 for (i
= 0; i
< npages
; i
++) {
256 addrs
[i
] = dma_map_page(dev
->dev
, pages
[i
],
257 0, PAGE_SIZE
, DMA_TO_DEVICE
);
259 if (dma_mapping_error(dev
->dev
, addrs
[i
])) {
261 "%s: failed to map page\n", __func__
);
263 for (i
= i
- 1; i
>= 0; --i
) {
264 dma_unmap_page(dev
->dev
, addrs
[i
],
265 PAGE_SIZE
, DMA_TO_DEVICE
);
273 addrs
= kzalloc(npages
* sizeof(*addrs
), GFP_KERNEL
);
280 omap_obj
->dma_addrs
= addrs
;
281 omap_obj
->pages
= pages
;
288 drm_gem_put_pages(obj
, pages
, true, false);
293 /* acquire pages when needed (for example, for DMA where physically
294 * contiguous buffer is not required
296 static int get_pages(struct drm_gem_object
*obj
, struct page
***pages
)
298 struct omap_gem_object
*omap_obj
= to_omap_bo(obj
);
301 if ((omap_obj
->flags
& OMAP_BO_MEM_SHMEM
) && !omap_obj
->pages
) {
302 ret
= omap_gem_attach_pages(obj
);
304 dev_err(obj
->dev
->dev
, "could not attach pages\n");
309 /* TODO: even phys-contig.. we should have a list of pages? */
310 *pages
= omap_obj
->pages
;
315 /** release backing pages */
316 static void omap_gem_detach_pages(struct drm_gem_object
*obj
)
318 struct omap_gem_object
*omap_obj
= to_omap_bo(obj
);
319 unsigned int npages
= obj
->size
>> PAGE_SHIFT
;
322 for (i
= 0; i
< npages
; i
++) {
323 if (omap_obj
->dma_addrs
[i
])
324 dma_unmap_page(obj
->dev
->dev
, omap_obj
->dma_addrs
[i
],
325 PAGE_SIZE
, DMA_TO_DEVICE
);
328 kfree(omap_obj
->dma_addrs
);
329 omap_obj
->dma_addrs
= NULL
;
331 drm_gem_put_pages(obj
, omap_obj
->pages
, true, false);
332 omap_obj
->pages
= NULL
;
335 /* get buffer flags */
336 uint32_t omap_gem_flags(struct drm_gem_object
*obj
)
338 return to_omap_bo(obj
)->flags
;
341 uint64_t omap_gem_mmap_offset(struct drm_gem_object
*obj
)
344 mutex_lock(&obj
->dev
->struct_mutex
);
345 offset
= mmap_offset(obj
);
346 mutex_unlock(&obj
->dev
->struct_mutex
);
351 size_t omap_gem_mmap_size(struct drm_gem_object
*obj
)
353 struct omap_gem_object
*omap_obj
= to_omap_bo(obj
);
354 size_t size
= obj
->size
;
356 if (omap_obj
->flags
& OMAP_BO_TILED
) {
357 /* for tiled buffers, the virtual size has stride rounded up
358 * to 4kb.. (to hide the fact that row n+1 might start 16kb or
359 * 32kb later!). But we don't back the entire buffer with
360 * pages, only the valid picture part.. so need to adjust for
361 * this in the size used to mmap and generate mmap offset
363 size
= tiler_vsize(gem2fmt(omap_obj
->flags
),
364 omap_obj
->width
, omap_obj
->height
);
370 /* -----------------------------------------------------------------------------
374 /* Normal handling for the case of faulting in non-tiled buffers */
375 static int fault_1d(struct drm_gem_object
*obj
,
376 struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
378 struct omap_gem_object
*omap_obj
= to_omap_bo(obj
);
382 /* We don't use vmf->pgoff since that has the fake offset: */
383 pgoff
= (vmf
->address
- vma
->vm_start
) >> PAGE_SHIFT
;
385 if (omap_obj
->pages
) {
386 omap_gem_cpu_sync_page(obj
, pgoff
);
387 pfn
= page_to_pfn(omap_obj
->pages
[pgoff
]);
389 BUG_ON(!is_contiguous(omap_obj
));
390 pfn
= (omap_obj
->dma_addr
>> PAGE_SHIFT
) + pgoff
;
393 VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf
->address
,
394 pfn
, pfn
<< PAGE_SHIFT
);
396 return vm_insert_mixed(vma
, vmf
->address
, __pfn_to_pfn_t(pfn
, PFN_DEV
));
399 /* Special handling for the case of faulting in 2d tiled buffers */
400 static int fault_2d(struct drm_gem_object
*obj
,
401 struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
403 struct omap_gem_object
*omap_obj
= to_omap_bo(obj
);
404 struct omap_drm_private
*priv
= obj
->dev
->dev_private
;
405 struct omap_drm_usergart_entry
*entry
;
406 enum tiler_fmt fmt
= gem2fmt(omap_obj
->flags
);
407 struct page
*pages
[64]; /* XXX is this too much to have on stack? */
409 pgoff_t pgoff
, base_pgoff
;
414 * Note the height of the slot is also equal to the number of pages
415 * that need to be mapped in to fill 4kb wide CPU page. If the slot
416 * height is 64, then 64 pages fill a 4kb wide by 64 row region.
418 const int n
= priv
->usergart
[fmt
].height
;
419 const int n_shift
= priv
->usergart
[fmt
].height_shift
;
422 * If buffer width in bytes > PAGE_SIZE then the virtual stride is
423 * rounded up to next multiple of PAGE_SIZE.. this need to be taken
424 * into account in some of the math, so figure out virtual stride
427 const int m
= DIV_ROUND_UP(omap_obj
->width
<< fmt
, PAGE_SIZE
);
429 /* We don't use vmf->pgoff since that has the fake offset: */
430 pgoff
= (vmf
->address
- vma
->vm_start
) >> PAGE_SHIFT
;
433 * Actual address we start mapping at is rounded down to previous slot
434 * boundary in the y direction:
436 base_pgoff
= round_down(pgoff
, m
<< n_shift
);
438 /* figure out buffer width in slots */
439 slots
= omap_obj
->width
>> priv
->usergart
[fmt
].slot_shift
;
441 vaddr
= vmf
->address
- ((pgoff
- base_pgoff
) << PAGE_SHIFT
);
443 entry
= &priv
->usergart
[fmt
].entry
[priv
->usergart
[fmt
].last
];
445 /* evict previous buffer using this usergart entry, if any: */
447 evict_entry(entry
->obj
, fmt
, entry
);
450 entry
->obj_pgoff
= base_pgoff
;
452 /* now convert base_pgoff to phys offset from virt offset: */
453 base_pgoff
= (base_pgoff
>> n_shift
) * slots
;
455 /* for wider-than 4k.. figure out which part of the slot-row we want: */
458 entry
->obj_pgoff
+= off
;
460 slots
= min(slots
- (off
<< n_shift
), n
);
461 base_pgoff
+= off
<< n_shift
;
462 vaddr
+= off
<< PAGE_SHIFT
;
466 * Map in pages. Beyond the valid pixel part of the buffer, we set
467 * pages[i] to NULL to get a dummy page mapped in.. if someone
468 * reads/writes it they will get random/undefined content, but at
469 * least it won't be corrupting whatever other random page used to
470 * be mapped in, or other undefined behavior.
472 memcpy(pages
, &omap_obj
->pages
[base_pgoff
],
473 sizeof(struct page
*) * slots
);
474 memset(pages
+ slots
, 0,
475 sizeof(struct page
*) * (n
- slots
));
477 ret
= tiler_pin(entry
->block
, pages
, ARRAY_SIZE(pages
), 0, true);
479 dev_err(obj
->dev
->dev
, "failed to pin: %d\n", ret
);
483 pfn
= entry
->dma_addr
>> PAGE_SHIFT
;
485 VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf
->address
,
486 pfn
, pfn
<< PAGE_SHIFT
);
488 for (i
= n
; i
> 0; i
--) {
489 vm_insert_mixed(vma
, vaddr
, __pfn_to_pfn_t(pfn
, PFN_DEV
));
490 pfn
+= priv
->usergart
[fmt
].stride_pfn
;
491 vaddr
+= PAGE_SIZE
* m
;
494 /* simple round-robin: */
495 priv
->usergart
[fmt
].last
= (priv
->usergart
[fmt
].last
+ 1)
496 % NUM_USERGART_ENTRIES
;
502 * omap_gem_fault - pagefault handler for GEM objects
505 * Invoked when a fault occurs on an mmap of a GEM managed area. GEM
506 * does most of the work for us including the actual map/unmap calls
507 * but we need to do the actual page work.
509 * The VMA was set up by GEM. In doing so it also ensured that the
510 * vma->vm_private_data points to the GEM object that is backing this
513 int omap_gem_fault(struct vm_fault
*vmf
)
515 struct vm_area_struct
*vma
= vmf
->vma
;
516 struct drm_gem_object
*obj
= vma
->vm_private_data
;
517 struct omap_gem_object
*omap_obj
= to_omap_bo(obj
);
518 struct drm_device
*dev
= obj
->dev
;
522 /* Make sure we don't parallel update on a fault, nor move or remove
523 * something from beneath our feet
525 mutex_lock(&dev
->struct_mutex
);
527 /* if a shmem backed object, make sure we have pages attached now */
528 ret
= get_pages(obj
, &pages
);
532 /* where should we do corresponding put_pages().. we are mapping
533 * the original page, rather than thru a GART, so we can't rely
534 * on eviction to trigger this. But munmap() or all mappings should
535 * probably trigger put_pages()?
538 if (omap_obj
->flags
& OMAP_BO_TILED
)
539 ret
= fault_2d(obj
, vma
, vmf
);
541 ret
= fault_1d(obj
, vma
, vmf
);
545 mutex_unlock(&dev
->struct_mutex
);
552 * EBUSY is ok: this just means that another thread
553 * already did the job.
555 return VM_FAULT_NOPAGE
;
559 return VM_FAULT_SIGBUS
;
563 /** We override mainly to fix up some of the vm mapping flags.. */
564 int omap_gem_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
568 ret
= drm_gem_mmap(filp
, vma
);
570 DBG("mmap failed: %d", ret
);
574 return omap_gem_mmap_obj(vma
->vm_private_data
, vma
);
577 int omap_gem_mmap_obj(struct drm_gem_object
*obj
,
578 struct vm_area_struct
*vma
)
580 struct omap_gem_object
*omap_obj
= to_omap_bo(obj
);
582 vma
->vm_flags
&= ~VM_PFNMAP
;
583 vma
->vm_flags
|= VM_MIXEDMAP
;
585 if (omap_obj
->flags
& OMAP_BO_WC
) {
586 vma
->vm_page_prot
= pgprot_writecombine(vm_get_page_prot(vma
->vm_flags
));
587 } else if (omap_obj
->flags
& OMAP_BO_UNCACHED
) {
588 vma
->vm_page_prot
= pgprot_noncached(vm_get_page_prot(vma
->vm_flags
));
591 * We do have some private objects, at least for scanout buffers
592 * on hardware without DMM/TILER. But these are allocated write-
595 if (WARN_ON(!obj
->filp
))
599 * Shunt off cached objs to shmem file so they have their own
600 * address_space (so unmap_mapping_range does what we want,
601 * in particular in the case of mmap'd dmabufs)
605 vma
->vm_file
= get_file(obj
->filp
);
607 vma
->vm_page_prot
= vm_get_page_prot(vma
->vm_flags
);
613 /* -----------------------------------------------------------------------------
618 * omap_gem_dumb_create - create a dumb buffer
619 * @drm_file: our client file
621 * @args: the requested arguments copied from userspace
623 * Allocate a buffer suitable for use for a frame buffer of the
624 * form described by user space. Give userspace a handle by which
627 int omap_gem_dumb_create(struct drm_file
*file
, struct drm_device
*dev
,
628 struct drm_mode_create_dumb
*args
)
630 union omap_gem_size gsize
;
632 args
->pitch
= DIV_ROUND_UP(args
->width
* args
->bpp
, 8);
634 args
->size
= PAGE_ALIGN(args
->pitch
* args
->height
);
636 gsize
= (union omap_gem_size
){
640 return omap_gem_new_handle(dev
, file
, gsize
,
641 OMAP_BO_SCANOUT
| OMAP_BO_WC
, &args
->handle
);
645 * omap_gem_dumb_map - buffer mapping for dumb interface
646 * @file: our drm client file
648 * @handle: GEM handle to the object (from dumb_create)
650 * Do the necessary setup to allow the mapping of the frame buffer
651 * into user memory. We don't have to do much here at the moment.
653 int omap_gem_dumb_map_offset(struct drm_file
*file
, struct drm_device
*dev
,
654 uint32_t handle
, uint64_t *offset
)
656 struct drm_gem_object
*obj
;
659 /* GEM does all our handle to object mapping */
660 obj
= drm_gem_object_lookup(file
, handle
);
666 *offset
= omap_gem_mmap_offset(obj
);
668 drm_gem_object_unreference_unlocked(obj
);
674 #ifdef CONFIG_DRM_FBDEV_EMULATION
675 /* Set scrolling position. This allows us to implement fast scrolling
678 * Call only from non-atomic contexts.
680 int omap_gem_roll(struct drm_gem_object
*obj
, uint32_t roll
)
682 struct omap_gem_object
*omap_obj
= to_omap_bo(obj
);
683 uint32_t npages
= obj
->size
>> PAGE_SHIFT
;
687 dev_err(obj
->dev
->dev
, "invalid roll: %d\n", roll
);
691 omap_obj
->roll
= roll
;
693 mutex_lock(&obj
->dev
->struct_mutex
);
695 /* if we aren't mapped yet, we don't need to do anything */
696 if (omap_obj
->block
) {
698 ret
= get_pages(obj
, &pages
);
701 ret
= tiler_pin(omap_obj
->block
, pages
, npages
, roll
, true);
703 dev_err(obj
->dev
->dev
, "could not repin: %d\n", ret
);
707 mutex_unlock(&obj
->dev
->struct_mutex
);
713 /* -----------------------------------------------------------------------------
714 * Memory Management & DMA Sync
718 * shmem buffers that are mapped cached are not coherent.
720 * We keep track of dirty pages using page faulting to perform cache management.
721 * When a page is mapped to the CPU in read/write mode the device can't access
722 * it and omap_obj->dma_addrs[i] is NULL. When a page is mapped to the device
723 * the omap_obj->dma_addrs[i] is set to the DMA address, and the page is
724 * unmapped from the CPU.
726 static inline bool is_cached_coherent(struct drm_gem_object
*obj
)
728 struct omap_gem_object
*omap_obj
= to_omap_bo(obj
);
730 return !((omap_obj
->flags
& OMAP_BO_MEM_SHMEM
) &&
731 ((omap_obj
->flags
& OMAP_BO_CACHE_MASK
) == OMAP_BO_CACHED
));
734 /* Sync the buffer for CPU access.. note pages should already be
735 * attached, ie. omap_gem_get_pages()
737 void omap_gem_cpu_sync_page(struct drm_gem_object
*obj
, int pgoff
)
739 struct drm_device
*dev
= obj
->dev
;
740 struct omap_gem_object
*omap_obj
= to_omap_bo(obj
);
742 if (is_cached_coherent(obj
))
745 if (omap_obj
->dma_addrs
[pgoff
]) {
746 dma_unmap_page(dev
->dev
, omap_obj
->dma_addrs
[pgoff
],
747 PAGE_SIZE
, DMA_TO_DEVICE
);
748 omap_obj
->dma_addrs
[pgoff
] = 0;
752 /* sync the buffer for DMA access */
753 void omap_gem_dma_sync_buffer(struct drm_gem_object
*obj
,
754 enum dma_data_direction dir
)
756 struct drm_device
*dev
= obj
->dev
;
757 struct omap_gem_object
*omap_obj
= to_omap_bo(obj
);
758 int i
, npages
= obj
->size
>> PAGE_SHIFT
;
759 struct page
**pages
= omap_obj
->pages
;
762 if (is_cached_coherent(obj
))
765 for (i
= 0; i
< npages
; i
++) {
766 if (!omap_obj
->dma_addrs
[i
]) {
769 addr
= dma_map_page(dev
->dev
, pages
[i
], 0,
771 if (dma_mapping_error(dev
->dev
, addr
)) {
772 dev_warn(dev
->dev
, "%s: failed to map page\n",
778 omap_obj
->dma_addrs
[i
] = addr
;
783 unmap_mapping_range(obj
->filp
->f_mapping
, 0,
784 omap_gem_mmap_size(obj
), 1);
789 * omap_gem_pin() - Pin a GEM object in memory
790 * @obj: the GEM object
791 * @dma_addr: the DMA address
793 * Pin the given GEM object in memory and fill the dma_addr pointer with the
794 * object's DMA address. If the buffer is not physically contiguous it will be
795 * remapped through the TILER to provide a contiguous view.
797 * Pins are reference-counted, calling this function multiple times is allowed
798 * as long the corresponding omap_gem_unpin() calls are balanced.
800 * Return 0 on success or a negative error code otherwise.
802 int omap_gem_pin(struct drm_gem_object
*obj
, dma_addr_t
*dma_addr
)
804 struct omap_drm_private
*priv
= obj
->dev
->dev_private
;
805 struct omap_gem_object
*omap_obj
= to_omap_bo(obj
);
808 mutex_lock(&obj
->dev
->struct_mutex
);
810 if (!is_contiguous(omap_obj
) && priv
->has_dmm
) {
811 if (omap_obj
->dma_addr_cnt
== 0) {
813 uint32_t npages
= obj
->size
>> PAGE_SHIFT
;
814 enum tiler_fmt fmt
= gem2fmt(omap_obj
->flags
);
815 struct tiler_block
*block
;
817 BUG_ON(omap_obj
->block
);
819 ret
= get_pages(obj
, &pages
);
823 if (omap_obj
->flags
& OMAP_BO_TILED
) {
824 block
= tiler_reserve_2d(fmt
,
826 omap_obj
->height
, 0);
828 block
= tiler_reserve_1d(obj
->size
);
832 ret
= PTR_ERR(block
);
833 dev_err(obj
->dev
->dev
,
834 "could not remap: %d (%d)\n", ret
, fmt
);
838 /* TODO: enable async refill.. */
839 ret
= tiler_pin(block
, pages
, npages
,
840 omap_obj
->roll
, true);
842 tiler_release(block
);
843 dev_err(obj
->dev
->dev
,
844 "could not pin: %d\n", ret
);
848 omap_obj
->dma_addr
= tiler_ssptr(block
);
849 omap_obj
->block
= block
;
851 DBG("got dma address: %pad", &omap_obj
->dma_addr
);
854 omap_obj
->dma_addr_cnt
++;
856 *dma_addr
= omap_obj
->dma_addr
;
857 } else if (is_contiguous(omap_obj
)) {
858 *dma_addr
= omap_obj
->dma_addr
;
865 mutex_unlock(&obj
->dev
->struct_mutex
);
871 * omap_gem_unpin() - Unpin a GEM object from memory
872 * @obj: the GEM object
874 * Unpin the given GEM object previously pinned with omap_gem_pin(). Pins are
875 * reference-counted, the actualy unpin will only be performed when the number
876 * of calls to this function matches the number of calls to omap_gem_pin().
878 void omap_gem_unpin(struct drm_gem_object
*obj
)
880 struct omap_gem_object
*omap_obj
= to_omap_bo(obj
);
883 mutex_lock(&obj
->dev
->struct_mutex
);
884 if (omap_obj
->dma_addr_cnt
> 0) {
885 omap_obj
->dma_addr_cnt
--;
886 if (omap_obj
->dma_addr_cnt
== 0) {
887 ret
= tiler_unpin(omap_obj
->block
);
889 dev_err(obj
->dev
->dev
,
890 "could not unpin pages: %d\n", ret
);
892 ret
= tiler_release(omap_obj
->block
);
894 dev_err(obj
->dev
->dev
,
895 "could not release unmap: %d\n", ret
);
897 omap_obj
->dma_addr
= 0;
898 omap_obj
->block
= NULL
;
902 mutex_unlock(&obj
->dev
->struct_mutex
);
905 /* Get rotated scanout address (only valid if already pinned), at the
906 * specified orientation and x,y offset from top-left corner of buffer
907 * (only valid for tiled 2d buffers)
909 int omap_gem_rotated_dma_addr(struct drm_gem_object
*obj
, uint32_t orient
,
910 int x
, int y
, dma_addr_t
*dma_addr
)
912 struct omap_gem_object
*omap_obj
= to_omap_bo(obj
);
915 mutex_lock(&obj
->dev
->struct_mutex
);
916 if ((omap_obj
->dma_addr_cnt
> 0) && omap_obj
->block
&&
917 (omap_obj
->flags
& OMAP_BO_TILED
)) {
918 *dma_addr
= tiler_tsptr(omap_obj
->block
, orient
, x
, y
);
921 mutex_unlock(&obj
->dev
->struct_mutex
);
925 /* Get tiler stride for the buffer (only valid for 2d tiled buffers) */
926 int omap_gem_tiled_stride(struct drm_gem_object
*obj
, uint32_t orient
)
928 struct omap_gem_object
*omap_obj
= to_omap_bo(obj
);
930 if (omap_obj
->flags
& OMAP_BO_TILED
)
931 ret
= tiler_stride(gem2fmt(omap_obj
->flags
), orient
);
935 /* if !remap, and we don't have pages backing, then fail, rather than
936 * increasing the pin count (which we don't really do yet anyways,
937 * because we don't support swapping pages back out). And 'remap'
938 * might not be quite the right name, but I wanted to keep it working
939 * similarly to omap_gem_pin(). Note though that mutex is not
940 * aquired if !remap (because this can be called in atomic ctxt),
941 * but probably omap_gem_unpin() should be changed to work in the
942 * same way. If !remap, a matching omap_gem_put_pages() call is not
943 * required (and should not be made).
945 int omap_gem_get_pages(struct drm_gem_object
*obj
, struct page
***pages
,
950 struct omap_gem_object
*omap_obj
= to_omap_bo(obj
);
951 if (!omap_obj
->pages
)
953 *pages
= omap_obj
->pages
;
956 mutex_lock(&obj
->dev
->struct_mutex
);
957 ret
= get_pages(obj
, pages
);
958 mutex_unlock(&obj
->dev
->struct_mutex
);
962 /* release pages when DMA no longer being performed */
963 int omap_gem_put_pages(struct drm_gem_object
*obj
)
965 /* do something here if we dynamically attach/detach pages.. at
966 * least they would no longer need to be pinned if everyone has
967 * released the pages..
972 #ifdef CONFIG_DRM_FBDEV_EMULATION
973 /* Get kernel virtual address for CPU access.. this more or less only
974 * exists for omap_fbdev. This should be called with struct_mutex
977 void *omap_gem_vaddr(struct drm_gem_object
*obj
)
979 struct omap_gem_object
*omap_obj
= to_omap_bo(obj
);
980 WARN_ON(!mutex_is_locked(&obj
->dev
->struct_mutex
));
981 if (!omap_obj
->vaddr
) {
983 int ret
= get_pages(obj
, &pages
);
986 omap_obj
->vaddr
= vmap(pages
, obj
->size
>> PAGE_SHIFT
,
987 VM_MAP
, pgprot_writecombine(PAGE_KERNEL
));
989 return omap_obj
->vaddr
;
993 /* -----------------------------------------------------------------------------
998 /* re-pin objects in DMM in resume path: */
999 int omap_gem_resume(struct device
*dev
)
1001 struct drm_device
*drm_dev
= dev_get_drvdata(dev
);
1002 struct omap_drm_private
*priv
= drm_dev
->dev_private
;
1003 struct omap_gem_object
*omap_obj
;
1006 mutex_lock(&priv
->list_lock
);
1007 list_for_each_entry(omap_obj
, &priv
->obj_list
, mm_list
) {
1008 if (omap_obj
->block
) {
1009 struct drm_gem_object
*obj
= &omap_obj
->base
;
1010 uint32_t npages
= obj
->size
>> PAGE_SHIFT
;
1011 WARN_ON(!omap_obj
->pages
); /* this can't happen */
1012 ret
= tiler_pin(omap_obj
->block
,
1013 omap_obj
->pages
, npages
,
1014 omap_obj
->roll
, true);
1016 dev_err(dev
, "could not repin: %d\n", ret
);
1023 mutex_unlock(&priv
->list_lock
);
1028 /* -----------------------------------------------------------------------------
1032 #ifdef CONFIG_DEBUG_FS
1033 void omap_gem_describe(struct drm_gem_object
*obj
, struct seq_file
*m
)
1035 struct omap_gem_object
*omap_obj
= to_omap_bo(obj
);
1038 off
= drm_vma_node_start(&obj
->vma_node
);
1040 seq_printf(m
, "%08x: %2d (%2d) %08llx %pad (%2d) %p %4d",
1041 omap_obj
->flags
, obj
->name
, kref_read(&obj
->refcount
),
1042 off
, &omap_obj
->dma_addr
, omap_obj
->dma_addr_cnt
,
1043 omap_obj
->vaddr
, omap_obj
->roll
);
1045 if (omap_obj
->flags
& OMAP_BO_TILED
) {
1046 seq_printf(m
, " %dx%d", omap_obj
->width
, omap_obj
->height
);
1047 if (omap_obj
->block
) {
1048 struct tcm_area
*area
= &omap_obj
->block
->area
;
1049 seq_printf(m
, " (%dx%d, %dx%d)",
1050 area
->p0
.x
, area
->p0
.y
,
1051 area
->p1
.x
, area
->p1
.y
);
1054 seq_printf(m
, " %zu", obj
->size
);
1057 seq_printf(m
, "\n");
1060 void omap_gem_describe_objects(struct list_head
*list
, struct seq_file
*m
)
1062 struct omap_gem_object
*omap_obj
;
1066 list_for_each_entry(omap_obj
, list
, mm_list
) {
1067 struct drm_gem_object
*obj
= &omap_obj
->base
;
1069 omap_gem_describe(obj
, m
);
1074 seq_printf(m
, "Total %d objects, %zu bytes\n", count
, size
);
1078 /* -----------------------------------------------------------------------------
1079 * Constructor & Destructor
1082 void omap_gem_free_object(struct drm_gem_object
*obj
)
1084 struct drm_device
*dev
= obj
->dev
;
1085 struct omap_drm_private
*priv
= dev
->dev_private
;
1086 struct omap_gem_object
*omap_obj
= to_omap_bo(obj
);
1090 WARN_ON(!mutex_is_locked(&dev
->struct_mutex
));
1092 mutex_lock(&priv
->list_lock
);
1093 list_del(&omap_obj
->mm_list
);
1094 mutex_unlock(&priv
->list_lock
);
1096 /* this means the object is still pinned.. which really should
1097 * not happen. I think..
1099 WARN_ON(omap_obj
->dma_addr_cnt
> 0);
1101 if (omap_obj
->pages
) {
1102 if (omap_obj
->flags
& OMAP_BO_MEM_DMABUF
)
1103 kfree(omap_obj
->pages
);
1105 omap_gem_detach_pages(obj
);
1108 if (omap_obj
->flags
& OMAP_BO_MEM_DMA_API
) {
1109 dma_free_wc(dev
->dev
, obj
->size
, omap_obj
->vaddr
,
1110 omap_obj
->dma_addr
);
1111 } else if (omap_obj
->vaddr
) {
1112 vunmap(omap_obj
->vaddr
);
1113 } else if (obj
->import_attach
) {
1114 drm_prime_gem_destroy(obj
, omap_obj
->sgt
);
1117 drm_gem_object_release(obj
);
1122 /* GEM buffer object constructor */
1123 struct drm_gem_object
*omap_gem_new(struct drm_device
*dev
,
1124 union omap_gem_size gsize
, uint32_t flags
)
1126 struct omap_drm_private
*priv
= dev
->dev_private
;
1127 struct omap_gem_object
*omap_obj
;
1128 struct drm_gem_object
*obj
;
1129 struct address_space
*mapping
;
1133 /* Validate the flags and compute the memory and cache flags. */
1134 if (flags
& OMAP_BO_TILED
) {
1135 if (!priv
->usergart
) {
1136 dev_err(dev
->dev
, "Tiled buffers require DMM\n");
1141 * Tiled buffers are always shmem paged backed. When they are
1142 * scanned out, they are remapped into DMM/TILER.
1144 flags
&= ~OMAP_BO_SCANOUT
;
1145 flags
|= OMAP_BO_MEM_SHMEM
;
1148 * Currently don't allow cached buffers. There is some caching
1149 * stuff that needs to be handled better.
1151 flags
&= ~(OMAP_BO_CACHED
|OMAP_BO_WC
|OMAP_BO_UNCACHED
);
1152 flags
|= tiler_get_cpu_cache_flags();
1153 } else if ((flags
& OMAP_BO_SCANOUT
) && !priv
->has_dmm
) {
1155 * OMAP_BO_SCANOUT hints that the buffer doesn't need to be
1156 * tiled. However, to lower the pressure on memory allocation,
1157 * use contiguous memory only if no TILER is available.
1159 flags
|= OMAP_BO_MEM_DMA_API
;
1160 } else if (!(flags
& OMAP_BO_MEM_DMABUF
)) {
1162 * All other buffers not backed by dma_buf are shmem-backed.
1164 flags
|= OMAP_BO_MEM_SHMEM
;
1167 /* Allocate the initialize the OMAP GEM object. */
1168 omap_obj
= kzalloc(sizeof(*omap_obj
), GFP_KERNEL
);
1172 obj
= &omap_obj
->base
;
1173 omap_obj
->flags
= flags
;
1175 if (flags
& OMAP_BO_TILED
) {
1177 * For tiled buffers align dimensions to slot boundaries and
1178 * calculate size based on aligned dimensions.
1180 tiler_align(gem2fmt(flags
), &gsize
.tiled
.width
,
1181 &gsize
.tiled
.height
);
1183 size
= tiler_size(gem2fmt(flags
), gsize
.tiled
.width
,
1184 gsize
.tiled
.height
);
1186 omap_obj
->width
= gsize
.tiled
.width
;
1187 omap_obj
->height
= gsize
.tiled
.height
;
1189 size
= PAGE_ALIGN(gsize
.bytes
);
1192 /* Initialize the GEM object. */
1193 if (!(flags
& OMAP_BO_MEM_SHMEM
)) {
1194 drm_gem_private_object_init(dev
, obj
, size
);
1196 ret
= drm_gem_object_init(dev
, obj
, size
);
1200 mapping
= obj
->filp
->f_mapping
;
1201 mapping_set_gfp_mask(mapping
, GFP_USER
| __GFP_DMA32
);
1204 /* Allocate memory if needed. */
1205 if (flags
& OMAP_BO_MEM_DMA_API
) {
1206 omap_obj
->vaddr
= dma_alloc_wc(dev
->dev
, size
,
1207 &omap_obj
->dma_addr
,
1209 if (!omap_obj
->vaddr
)
1213 mutex_lock(&priv
->list_lock
);
1214 list_add(&omap_obj
->mm_list
, &priv
->obj_list
);
1215 mutex_unlock(&priv
->list_lock
);
1220 drm_gem_object_release(obj
);
1226 struct drm_gem_object
*omap_gem_new_dmabuf(struct drm_device
*dev
, size_t size
,
1227 struct sg_table
*sgt
)
1229 struct omap_drm_private
*priv
= dev
->dev_private
;
1230 struct omap_gem_object
*omap_obj
;
1231 struct drm_gem_object
*obj
;
1232 union omap_gem_size gsize
;
1234 /* Without a DMM only physically contiguous buffers can be supported. */
1235 if (sgt
->orig_nents
!= 1 && !priv
->has_dmm
)
1236 return ERR_PTR(-EINVAL
);
1238 mutex_lock(&dev
->struct_mutex
);
1240 gsize
.bytes
= PAGE_ALIGN(size
);
1241 obj
= omap_gem_new(dev
, gsize
, OMAP_BO_MEM_DMABUF
| OMAP_BO_WC
);
1243 obj
= ERR_PTR(-ENOMEM
);
1247 omap_obj
= to_omap_bo(obj
);
1248 omap_obj
->sgt
= sgt
;
1250 if (sgt
->orig_nents
== 1) {
1251 omap_obj
->dma_addr
= sg_dma_address(sgt
->sgl
);
1253 /* Create pages list from sgt */
1254 struct sg_page_iter iter
;
1255 struct page
**pages
;
1256 unsigned int npages
;
1259 npages
= DIV_ROUND_UP(size
, PAGE_SIZE
);
1260 pages
= kcalloc(npages
, sizeof(*pages
), GFP_KERNEL
);
1262 omap_gem_free_object(obj
);
1263 obj
= ERR_PTR(-ENOMEM
);
1267 omap_obj
->pages
= pages
;
1269 for_each_sg_page(sgt
->sgl
, &iter
, sgt
->orig_nents
, 0) {
1270 pages
[i
++] = sg_page_iter_page(&iter
);
1275 if (WARN_ON(i
!= npages
)) {
1276 omap_gem_free_object(obj
);
1277 obj
= ERR_PTR(-ENOMEM
);
1283 mutex_unlock(&dev
->struct_mutex
);
1287 /* convenience method to construct a GEM buffer object, and userspace handle */
1288 int omap_gem_new_handle(struct drm_device
*dev
, struct drm_file
*file
,
1289 union omap_gem_size gsize
, uint32_t flags
, uint32_t *handle
)
1291 struct drm_gem_object
*obj
;
1294 obj
= omap_gem_new(dev
, gsize
, flags
);
1298 ret
= drm_gem_handle_create(file
, obj
, handle
);
1300 omap_gem_free_object(obj
);
1304 /* drop reference from allocate - handle holds it now */
1305 drm_gem_object_unreference_unlocked(obj
);
1310 /* -----------------------------------------------------------------------------
1314 /* If DMM is used, we need to set some stuff up.. */
1315 void omap_gem_init(struct drm_device
*dev
)
1317 struct omap_drm_private
*priv
= dev
->dev_private
;
1318 struct omap_drm_usergart
*usergart
;
1319 const enum tiler_fmt fmts
[] = {
1320 TILFMT_8BIT
, TILFMT_16BIT
, TILFMT_32BIT
1324 if (!dmm_is_available()) {
1325 /* DMM only supported on OMAP4 and later, so this isn't fatal */
1326 dev_warn(dev
->dev
, "DMM not available, disable DMM support\n");
1330 usergart
= kcalloc(3, sizeof(*usergart
), GFP_KERNEL
);
1334 /* reserve 4k aligned/wide regions for userspace mappings: */
1335 for (i
= 0; i
< ARRAY_SIZE(fmts
); i
++) {
1336 uint16_t h
= 1, w
= PAGE_SIZE
>> i
;
1337 tiler_align(fmts
[i
], &w
, &h
);
1338 /* note: since each region is 1 4kb page wide, and minimum
1339 * number of rows, the height ends up being the same as the
1340 * # of pages in the region
1342 usergart
[i
].height
= h
;
1343 usergart
[i
].height_shift
= ilog2(h
);
1344 usergart
[i
].stride_pfn
= tiler_stride(fmts
[i
], 0) >> PAGE_SHIFT
;
1345 usergart
[i
].slot_shift
= ilog2((PAGE_SIZE
/ h
) >> i
);
1346 for (j
= 0; j
< NUM_USERGART_ENTRIES
; j
++) {
1347 struct omap_drm_usergart_entry
*entry
;
1348 struct tiler_block
*block
;
1350 entry
= &usergart
[i
].entry
[j
];
1351 block
= tiler_reserve_2d(fmts
[i
], w
, h
, PAGE_SIZE
);
1352 if (IS_ERR(block
)) {
1354 "reserve failed: %d, %d, %ld\n",
1355 i
, j
, PTR_ERR(block
));
1358 entry
->dma_addr
= tiler_ssptr(block
);
1359 entry
->block
= block
;
1361 DBG("%d:%d: %dx%d: dma_addr=%pad stride=%d", i
, j
, w
, h
,
1363 usergart
[i
].stride_pfn
<< PAGE_SHIFT
);
1367 priv
->usergart
= usergart
;
1368 priv
->has_dmm
= true;
1371 void omap_gem_deinit(struct drm_device
*dev
)
1373 struct omap_drm_private
*priv
= dev
->dev_private
;
1375 /* I believe we can rely on there being no more outstanding GEM
1376 * objects which could depend on usergart/dmm at this point.
1378 kfree(priv
->usergart
);