]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/gpu/drm/omapdrm/omap_gem.c
drm/omap: gem: Fix mm_list locking
[mirror_ubuntu-bionic-kernel.git] / drivers / gpu / drm / omapdrm / omap_gem.c
CommitLineData
cd5351f4 1/*
8bb0daff 2 * drivers/gpu/drm/omapdrm/omap_gem.c
cd5351f4
RC
3 *
4 * Copyright (C) 2011 Texas Instruments
5 * Author: Rob Clark <rob.clark@linaro.org>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as published by
9 * the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
2d802453 20#include <linux/seq_file.h>
cd5351f4 21#include <linux/shmem_fs.h>
2d278f54 22#include <linux/spinlock.h>
01c8f1c4 23#include <linux/pfn_t.h>
2d278f54 24
0de23977 25#include <drm/drm_vma_manager.h>
cd5351f4
RC
26
27#include "omap_drv.h"
f7f9f453 28#include "omap_dmm_tiler.h"
cd5351f4 29
cd5351f4
RC
30/*
31 * GEM buffer object implementation.
32 */
33
cd5351f4 34/* note: we use upper 8 bits of flags for driver-internal flags: */
cdb0381d
LP
35#define OMAP_BO_MEM_DMA_API 0x01000000 /* memory allocated with the dma_alloc_* API */
36#define OMAP_BO_MEM_SHMEM 0x02000000 /* memory allocated through shmem backing */
b22e6690 37#define OMAP_BO_MEM_DMABUF 0x08000000 /* memory imported from a dmabuf */
cd5351f4 38
cd5351f4
RC
39struct omap_gem_object {
40 struct drm_gem_object base;
41
f6b6036e
RC
42 struct list_head mm_list;
43
cd5351f4
RC
44 uint32_t flags;
45
f7f9f453
RC
46 /** width/height for tiled formats (rounded up to slot boundaries) */
47 uint16_t width, height;
48
a6a91827
RC
49 /** roll applied when mapping to DMM */
50 uint32_t roll;
51
cd5351f4 52 /**
16869083 53 * dma_addr contains the buffer DMA address. It is valid for
cd5351f4 54 *
b22e6690
LP
55 * - buffers allocated through the DMA mapping API (with the
56 * OMAP_BO_MEM_DMA_API flag set)
57 *
58 * - buffers imported from dmabuf (with the OMAP_BO_MEM_DMABUF flag set)
59 * if they are physically contiguous (when sgt->orig_nents == 1)
60 *
16869083 61 * - buffers mapped through the TILER when dma_addr_cnt is not zero, in
b22e6690
LP
62 * which case the DMA address points to the TILER aperture
63 *
64 * Physically contiguous buffers have their DMA address equal to the
65 * physical address as we don't remap those buffers through the TILER.
66 *
67 * Buffers mapped to the TILER have their DMA address pointing to the
16869083 68 * TILER aperture. As TILER mappings are refcounted (through
bc20c85c
LP
69 * dma_addr_cnt) the DMA address must be accessed through omap_gem_pin()
70 * to ensure that the mapping won't disappear unexpectedly. References
71 * must be released with omap_gem_unpin().
cd5351f4 72 */
16869083 73 dma_addr_t dma_addr;
cd5351f4 74
f7f9f453 75 /**
16869083 76 * # of users of dma_addr
f7f9f453 77 */
16869083 78 uint32_t dma_addr_cnt;
f7f9f453 79
b22e6690
LP
80 /**
81 * If the buffer has been imported from a dmabuf the OMAP_DB_DMABUF flag
82 * is set and the sgt field is valid.
83 */
84 struct sg_table *sgt;
85
f7f9f453
RC
86 /**
87 * tiler block used when buffer is remapped in DMM/TILER.
88 */
89 struct tiler_block *block;
90
cd5351f4
RC
91 /**
92 * Array of backing pages, if allocated. Note that pages are never
93 * allocated for buffers originally allocated from contiguous memory
94 */
95 struct page **pages;
96
f3bc9d24 97 /** addresses corresponding to pages in above array */
57c22f7c 98 dma_addr_t *dma_addrs;
f3bc9d24 99
cd5351f4
RC
100 /**
101 * Virtual address, if mapped.
102 */
103 void *vaddr;
cd5351f4
RC
104};
105
7ef93b0a 106#define to_omap_bo(x) container_of(x, struct omap_gem_object, base)
c5b1247b 107
f7f9f453
RC
108/* To deal with userspace mmap'ings of 2d tiled buffers, which (a) are
109 * not necessarily pinned in TILER all the time, and (b) when they are
110 * they are not necessarily page aligned, we reserve one or more small
111 * regions in each of the 2d containers to use as a user-GART where we
112 * can create a second page-aligned mapping of parts of the buffer
113 * being accessed from userspace.
114 *
115 * Note that we could optimize slightly when we know that multiple
116 * tiler containers are backed by the same PAT.. but I'll leave that
117 * for later..
118 */
119#define NUM_USERGART_ENTRIES 2
f4302747 120struct omap_drm_usergart_entry {
f7f9f453 121 struct tiler_block *block; /* the reserved tiler block */
16869083 122 dma_addr_t dma_addr;
f7f9f453
RC
123 struct drm_gem_object *obj; /* the current pinned obj */
124 pgoff_t obj_pgoff; /* page offset of obj currently
125 mapped in */
126};
f4302747
LP
127
128struct omap_drm_usergart {
129 struct omap_drm_usergart_entry entry[NUM_USERGART_ENTRIES];
f7f9f453
RC
130 int height; /* height in rows */
131 int height_shift; /* ilog2(height in rows) */
132 int slot_shift; /* ilog2(width per slot) */
133 int stride_pfn; /* stride in pages */
134 int last; /* index of last used entry */
f4302747 135};
f7f9f453 136
b902f8f4
LP
137/* -----------------------------------------------------------------------------
138 * Helpers
139 */
140
141/** get mmap offset */
142static uint64_t mmap_offset(struct drm_gem_object *obj)
143{
144 struct drm_device *dev = obj->dev;
145 int ret;
146 size_t size;
147
148 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
149
150 /* Make it mmapable */
151 size = omap_gem_mmap_size(obj);
152 ret = drm_gem_create_mmap_offset_size(obj, size);
153 if (ret) {
154 dev_err(dev->dev, "could not allocate mmap offset\n");
155 return 0;
156 }
157
158 return drm_vma_node_offset_addr(&obj->vma_node);
159}
160
b22e6690 161static bool is_contiguous(struct omap_gem_object *omap_obj)
7ef93b0a 162{
b22e6690
LP
163 if (omap_obj->flags & OMAP_BO_MEM_DMA_API)
164 return true;
165
166 if ((omap_obj->flags & OMAP_BO_MEM_DMABUF) && omap_obj->sgt->nents == 1)
167 return true;
168
169 return false;
7ef93b0a
LP
170}
171
172/* -----------------------------------------------------------------------------
173 * Eviction
174 */
f7f9f453
RC
175
176static void evict_entry(struct drm_gem_object *obj,
f4302747 177 enum tiler_fmt fmt, struct omap_drm_usergart_entry *entry)
f7f9f453 178{
6796cb16 179 struct omap_gem_object *omap_obj = to_omap_bo(obj);
f4302747
LP
180 struct omap_drm_private *priv = obj->dev->dev_private;
181 int n = priv->usergart[fmt].height;
6796cb16
DH
182 size_t size = PAGE_SIZE * n;
183 loff_t off = mmap_offset(obj) +
184 (entry->obj_pgoff << PAGE_SHIFT);
cc8dd766 185 const int m = DIV_ROUND_UP(omap_obj->width << fmt, PAGE_SIZE);
6796cb16
DH
186
187 if (m > 1) {
188 int i;
189 /* if stride > than PAGE_SIZE then sparse mapping: */
190 for (i = n; i > 0; i--) {
191 unmap_mapping_range(obj->dev->anon_inode->i_mapping,
192 off, PAGE_SIZE, 1);
193 off += PAGE_SIZE * m;
e559895a 194 }
6796cb16
DH
195 } else {
196 unmap_mapping_range(obj->dev->anon_inode->i_mapping,
197 off, size, 1);
f7f9f453
RC
198 }
199
200 entry->obj = NULL;
201}
202
203/* Evict a buffer from usergart, if it is mapped there */
204static void evict(struct drm_gem_object *obj)
205{
206 struct omap_gem_object *omap_obj = to_omap_bo(obj);
f4302747 207 struct omap_drm_private *priv = obj->dev->dev_private;
f7f9f453
RC
208
209 if (omap_obj->flags & OMAP_BO_TILED) {
210 enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
211 int i;
212
f7f9f453 213 for (i = 0; i < NUM_USERGART_ENTRIES; i++) {
f4302747
LP
214 struct omap_drm_usergart_entry *entry =
215 &priv->usergart[fmt].entry[i];
216
f7f9f453
RC
217 if (entry->obj == obj)
218 evict_entry(obj, fmt, entry);
219 }
220 }
221}
222
7ef93b0a
LP
223/* -----------------------------------------------------------------------------
224 * Page Management
8b6b569e 225 */
cd5351f4
RC
226
227/** ensure backing pages are allocated */
228static int omap_gem_attach_pages(struct drm_gem_object *obj)
229{
8b6b569e 230 struct drm_device *dev = obj->dev;
cd5351f4
RC
231 struct omap_gem_object *omap_obj = to_omap_bo(obj);
232 struct page **pages;
d4eb23a9
EG
233 int npages = obj->size >> PAGE_SHIFT;
234 int i, ret;
8b6b569e 235 dma_addr_t *addrs;
cd5351f4
RC
236
237 WARN_ON(omap_obj->pages);
238
0cdbe8ac 239 pages = drm_gem_get_pages(obj);
cd5351f4
RC
240 if (IS_ERR(pages)) {
241 dev_err(obj->dev->dev, "could not get pages: %ld\n", PTR_ERR(pages));
242 return PTR_ERR(pages);
243 }
244
f3bc9d24
RC
245 /* for non-cached buffers, ensure the new pages are clean because
246 * DSS, GPU, etc. are not cache coherent:
247 */
248 if (omap_obj->flags & (OMAP_BO_WC|OMAP_BO_UNCACHED)) {
23d84ed9 249 addrs = kmalloc(npages * sizeof(*addrs), GFP_KERNEL);
d4eb23a9
EG
250 if (!addrs) {
251 ret = -ENOMEM;
252 goto free_pages;
253 }
254
f3bc9d24 255 for (i = 0; i < npages; i++) {
8b6b569e 256 addrs[i] = dma_map_page(dev->dev, pages[i],
97817fd4 257 0, PAGE_SIZE, DMA_TO_DEVICE);
579ef254
TV
258
259 if (dma_mapping_error(dev->dev, addrs[i])) {
260 dev_warn(dev->dev,
261 "%s: failed to map page\n", __func__);
262
263 for (i = i - 1; i >= 0; --i) {
264 dma_unmap_page(dev->dev, addrs[i],
97817fd4 265 PAGE_SIZE, DMA_TO_DEVICE);
579ef254
TV
266 }
267
268 ret = -ENOMEM;
269 goto free_addrs;
270 }
f3bc9d24 271 }
8b6b569e 272 } else {
23d84ed9 273 addrs = kzalloc(npages * sizeof(*addrs), GFP_KERNEL);
d4eb23a9
EG
274 if (!addrs) {
275 ret = -ENOMEM;
276 goto free_pages;
277 }
f3bc9d24
RC
278 }
279
57c22f7c 280 omap_obj->dma_addrs = addrs;
cd5351f4 281 omap_obj->pages = pages;
8b6b569e 282
cd5351f4 283 return 0;
d4eb23a9 284
579ef254
TV
285free_addrs:
286 kfree(addrs);
d4eb23a9 287free_pages:
ddcd09d6 288 drm_gem_put_pages(obj, pages, true, false);
d4eb23a9
EG
289
290 return ret;
cd5351f4
RC
291}
292
b902f8f4
LP
293/* acquire pages when needed (for example, for DMA where physically
294 * contiguous buffer is not required
295 */
296static int get_pages(struct drm_gem_object *obj, struct page ***pages)
297{
298 struct omap_gem_object *omap_obj = to_omap_bo(obj);
299 int ret = 0;
300
cdb0381d 301 if ((omap_obj->flags & OMAP_BO_MEM_SHMEM) && !omap_obj->pages) {
b902f8f4
LP
302 ret = omap_gem_attach_pages(obj);
303 if (ret) {
304 dev_err(obj->dev->dev, "could not attach pages\n");
305 return ret;
306 }
307 }
308
309 /* TODO: even phys-contig.. we should have a list of pages? */
310 *pages = omap_obj->pages;
311
312 return 0;
313}
314
cd5351f4
RC
315/** release backing pages */
316static void omap_gem_detach_pages(struct drm_gem_object *obj)
317{
318 struct omap_gem_object *omap_obj = to_omap_bo(obj);
930dc19c
LP
319 unsigned int npages = obj->size >> PAGE_SHIFT;
320 unsigned int i;
f3bc9d24 321
930dc19c
LP
322 for (i = 0; i < npages; i++) {
323 if (omap_obj->dma_addrs[i])
324 dma_unmap_page(obj->dev->dev, omap_obj->dma_addrs[i],
97817fd4 325 PAGE_SIZE, DMA_TO_DEVICE);
f3bc9d24
RC
326 }
327
57c22f7c
LP
328 kfree(omap_obj->dma_addrs);
329 omap_obj->dma_addrs = NULL;
8b6b569e 330
ddcd09d6 331 drm_gem_put_pages(obj, omap_obj->pages, true, false);
cd5351f4
RC
332 omap_obj->pages = NULL;
333}
334
6ad11bc3
RC
335/* get buffer flags */
336uint32_t omap_gem_flags(struct drm_gem_object *obj)
337{
338 return to_omap_bo(obj)->flags;
339}
340
c5b1247b
RC
341uint64_t omap_gem_mmap_offset(struct drm_gem_object *obj)
342{
343 uint64_t offset;
344 mutex_lock(&obj->dev->struct_mutex);
345 offset = mmap_offset(obj);
346 mutex_unlock(&obj->dev->struct_mutex);
347 return offset;
348}
349
f7f9f453
RC
350/** get mmap size */
351size_t omap_gem_mmap_size(struct drm_gem_object *obj)
352{
353 struct omap_gem_object *omap_obj = to_omap_bo(obj);
354 size_t size = obj->size;
355
356 if (omap_obj->flags & OMAP_BO_TILED) {
357 /* for tiled buffers, the virtual size has stride rounded up
358 * to 4kb.. (to hide the fact that row n+1 might start 16kb or
359 * 32kb later!). But we don't back the entire buffer with
360 * pages, only the valid picture part.. so need to adjust for
361 * this in the size used to mmap and generate mmap offset
362 */
363 size = tiler_vsize(gem2fmt(omap_obj->flags),
364 omap_obj->width, omap_obj->height);
365 }
366
367 return size;
368}
369
7ef93b0a
LP
370/* -----------------------------------------------------------------------------
371 * Fault Handling
372 */
373
f7f9f453
RC
374/* Normal handling for the case of faulting in non-tiled buffers */
375static int fault_1d(struct drm_gem_object *obj,
376 struct vm_area_struct *vma, struct vm_fault *vmf)
377{
378 struct omap_gem_object *omap_obj = to_omap_bo(obj);
379 unsigned long pfn;
380 pgoff_t pgoff;
381
382 /* We don't use vmf->pgoff since that has the fake offset: */
1a29d85e 383 pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
f7f9f453
RC
384
385 if (omap_obj->pages) {
d61ce7da 386 omap_gem_cpu_sync_page(obj, pgoff);
f7f9f453
RC
387 pfn = page_to_pfn(omap_obj->pages[pgoff]);
388 } else {
b22e6690 389 BUG_ON(!is_contiguous(omap_obj));
16869083 390 pfn = (omap_obj->dma_addr >> PAGE_SHIFT) + pgoff;
f7f9f453
RC
391 }
392
1a29d85e 393 VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
f7f9f453
RC
394 pfn, pfn << PAGE_SHIFT);
395
1a29d85e 396 return vm_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
f7f9f453
RC
397}
398
399/* Special handling for the case of faulting in 2d tiled buffers */
400static int fault_2d(struct drm_gem_object *obj,
401 struct vm_area_struct *vma, struct vm_fault *vmf)
402{
403 struct omap_gem_object *omap_obj = to_omap_bo(obj);
f4302747
LP
404 struct omap_drm_private *priv = obj->dev->dev_private;
405 struct omap_drm_usergart_entry *entry;
f7f9f453
RC
406 enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
407 struct page *pages[64]; /* XXX is this too much to have on stack? */
408 unsigned long pfn;
409 pgoff_t pgoff, base_pgoff;
1a29d85e 410 unsigned long vaddr;
f7f9f453
RC
411 int i, ret, slots;
412
e559895a
RC
413 /*
414 * Note the height of the slot is also equal to the number of pages
415 * that need to be mapped in to fill 4kb wide CPU page. If the slot
416 * height is 64, then 64 pages fill a 4kb wide by 64 row region.
417 */
f4302747
LP
418 const int n = priv->usergart[fmt].height;
419 const int n_shift = priv->usergart[fmt].height_shift;
e559895a
RC
420
421 /*
422 * If buffer width in bytes > PAGE_SIZE then the virtual stride is
423 * rounded up to next multiple of PAGE_SIZE.. this need to be taken
424 * into account in some of the math, so figure out virtual stride
425 * in pages
f7f9f453 426 */
cc8dd766 427 const int m = DIV_ROUND_UP(omap_obj->width << fmt, PAGE_SIZE);
f7f9f453
RC
428
429 /* We don't use vmf->pgoff since that has the fake offset: */
1a29d85e 430 pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
f7f9f453 431
e559895a
RC
432 /*
433 * Actual address we start mapping at is rounded down to previous slot
f7f9f453
RC
434 * boundary in the y direction:
435 */
e559895a 436 base_pgoff = round_down(pgoff, m << n_shift);
f7f9f453 437
e559895a 438 /* figure out buffer width in slots */
f4302747 439 slots = omap_obj->width >> priv->usergart[fmt].slot_shift;
f7f9f453 440
1a29d85e 441 vaddr = vmf->address - ((pgoff - base_pgoff) << PAGE_SHIFT);
e559895a 442
f4302747 443 entry = &priv->usergart[fmt].entry[priv->usergart[fmt].last];
e559895a 444
f7f9f453
RC
445 /* evict previous buffer using this usergart entry, if any: */
446 if (entry->obj)
447 evict_entry(entry->obj, fmt, entry);
448
449 entry->obj = obj;
450 entry->obj_pgoff = base_pgoff;
451
e559895a
RC
452 /* now convert base_pgoff to phys offset from virt offset: */
453 base_pgoff = (base_pgoff >> n_shift) * slots;
454
455 /* for wider-than 4k.. figure out which part of the slot-row we want: */
456 if (m > 1) {
457 int off = pgoff % m;
458 entry->obj_pgoff += off;
459 base_pgoff /= m;
460 slots = min(slots - (off << n_shift), n);
461 base_pgoff += off << n_shift;
462 vaddr += off << PAGE_SHIFT;
463 }
464
465 /*
466 * Map in pages. Beyond the valid pixel part of the buffer, we set
467 * pages[i] to NULL to get a dummy page mapped in.. if someone
468 * reads/writes it they will get random/undefined content, but at
469 * least it won't be corrupting whatever other random page used to
470 * be mapped in, or other undefined behavior.
f7f9f453
RC
471 */
472 memcpy(pages, &omap_obj->pages[base_pgoff],
473 sizeof(struct page *) * slots);
474 memset(pages + slots, 0,
e559895a 475 sizeof(struct page *) * (n - slots));
f7f9f453 476
a6a91827 477 ret = tiler_pin(entry->block, pages, ARRAY_SIZE(pages), 0, true);
f7f9f453
RC
478 if (ret) {
479 dev_err(obj->dev->dev, "failed to pin: %d\n", ret);
480 return ret;
481 }
482
16869083 483 pfn = entry->dma_addr >> PAGE_SHIFT;
f7f9f453 484
1a29d85e 485 VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
f7f9f453
RC
486 pfn, pfn << PAGE_SHIFT);
487
e559895a 488 for (i = n; i > 0; i--) {
1a29d85e 489 vm_insert_mixed(vma, vaddr, __pfn_to_pfn_t(pfn, PFN_DEV));
f4302747 490 pfn += priv->usergart[fmt].stride_pfn;
e559895a 491 vaddr += PAGE_SIZE * m;
f7f9f453
RC
492 }
493
494 /* simple round-robin: */
f4302747
LP
495 priv->usergart[fmt].last = (priv->usergart[fmt].last + 1)
496 % NUM_USERGART_ENTRIES;
f7f9f453
RC
497
498 return 0;
499}
500
cd5351f4
RC
501/**
502 * omap_gem_fault - pagefault handler for GEM objects
cd5351f4
RC
503 * @vmf: fault detail
504 *
505 * Invoked when a fault occurs on an mmap of a GEM managed area. GEM
506 * does most of the work for us including the actual map/unmap calls
507 * but we need to do the actual page work.
508 *
509 * The VMA was set up by GEM. In doing so it also ensured that the
510 * vma->vm_private_data points to the GEM object that is backing this
511 * mapping.
512 */
11bac800 513int omap_gem_fault(struct vm_fault *vmf)
cd5351f4 514{
11bac800 515 struct vm_area_struct *vma = vmf->vma;
cd5351f4
RC
516 struct drm_gem_object *obj = vma->vm_private_data;
517 struct omap_gem_object *omap_obj = to_omap_bo(obj);
518 struct drm_device *dev = obj->dev;
519 struct page **pages;
cd5351f4
RC
520 int ret;
521
522 /* Make sure we don't parallel update on a fault, nor move or remove
523 * something from beneath our feet
524 */
525 mutex_lock(&dev->struct_mutex);
526
527 /* if a shmem backed object, make sure we have pages attached now */
528 ret = get_pages(obj, &pages);
ae053039 529 if (ret)
cd5351f4 530 goto fail;
cd5351f4
RC
531
532 /* where should we do corresponding put_pages().. we are mapping
533 * the original page, rather than thru a GART, so we can't rely
534 * on eviction to trigger this. But munmap() or all mappings should
535 * probably trigger put_pages()?
536 */
537
f7f9f453
RC
538 if (omap_obj->flags & OMAP_BO_TILED)
539 ret = fault_2d(obj, vma, vmf);
540 else
541 ret = fault_1d(obj, vma, vmf);
cd5351f4 542
cd5351f4
RC
543
544fail:
545 mutex_unlock(&dev->struct_mutex);
546 switch (ret) {
547 case 0:
548 case -ERESTARTSYS:
549 case -EINTR:
e1d4ee0f
RC
550 case -EBUSY:
551 /*
552 * EBUSY is ok: this just means that another thread
553 * already did the job.
554 */
cd5351f4
RC
555 return VM_FAULT_NOPAGE;
556 case -ENOMEM:
557 return VM_FAULT_OOM;
558 default:
559 return VM_FAULT_SIGBUS;
560 }
561}
562
563/** We override mainly to fix up some of the vm mapping flags.. */
564int omap_gem_mmap(struct file *filp, struct vm_area_struct *vma)
565{
cd5351f4
RC
566 int ret;
567
568 ret = drm_gem_mmap(filp, vma);
569 if (ret) {
570 DBG("mmap failed: %d", ret);
571 return ret;
572 }
573
8b6b569e
RC
574 return omap_gem_mmap_obj(vma->vm_private_data, vma);
575}
576
577int omap_gem_mmap_obj(struct drm_gem_object *obj,
578 struct vm_area_struct *vma)
579{
580 struct omap_gem_object *omap_obj = to_omap_bo(obj);
cd5351f4
RC
581
582 vma->vm_flags &= ~VM_PFNMAP;
583 vma->vm_flags |= VM_MIXEDMAP;
584
585 if (omap_obj->flags & OMAP_BO_WC) {
586 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
587 } else if (omap_obj->flags & OMAP_BO_UNCACHED) {
588 vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
589 } else {
8b6b569e
RC
590 /*
591 * We do have some private objects, at least for scanout buffers
592 * on hardware without DMM/TILER. But these are allocated write-
593 * combine
594 */
595 if (WARN_ON(!obj->filp))
596 return -EINVAL;
597
598 /*
599 * Shunt off cached objs to shmem file so they have their own
600 * address_space (so unmap_mapping_range does what we want,
601 * in particular in the case of mmap'd dmabufs)
602 */
603 fput(vma->vm_file);
8b6b569e 604 vma->vm_pgoff = 0;
cb0942b8 605 vma->vm_file = get_file(obj->filp);
8b6b569e 606
cd5351f4
RC
607 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
608 }
609
8b6b569e 610 return 0;
cd5351f4
RC
611}
612
7ef93b0a
LP
613/* -----------------------------------------------------------------------------
614 * Dumb Buffers
615 */
8b6b569e 616
cd5351f4
RC
617/**
618 * omap_gem_dumb_create - create a dumb buffer
619 * @drm_file: our client file
620 * @dev: our device
621 * @args: the requested arguments copied from userspace
622 *
623 * Allocate a buffer suitable for use for a frame buffer of the
624 * form described by user space. Give userspace a handle by which
625 * to reference it.
626 */
627int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
628 struct drm_mode_create_dumb *args)
629{
630 union omap_gem_size gsize;
631
ce481eda 632 args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
6a5228fd 633
cd5351f4
RC
634 args->size = PAGE_ALIGN(args->pitch * args->height);
635
636 gsize = (union omap_gem_size){
637 .bytes = args->size,
638 };
639
640 return omap_gem_new_handle(dev, file, gsize,
641 OMAP_BO_SCANOUT | OMAP_BO_WC, &args->handle);
642}
643
cd5351f4
RC
644/**
645 * omap_gem_dumb_map - buffer mapping for dumb interface
646 * @file: our drm client file
647 * @dev: drm device
648 * @handle: GEM handle to the object (from dumb_create)
649 *
650 * Do the necessary setup to allow the mapping of the frame buffer
651 * into user memory. We don't have to do much here at the moment.
652 */
653int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
654 uint32_t handle, uint64_t *offset)
655{
656 struct drm_gem_object *obj;
657 int ret = 0;
658
cd5351f4 659 /* GEM does all our handle to object mapping */
a8ad0bd8 660 obj = drm_gem_object_lookup(file, handle);
cd5351f4
RC
661 if (obj == NULL) {
662 ret = -ENOENT;
663 goto fail;
664 }
665
666 *offset = omap_gem_mmap_offset(obj);
667
668 drm_gem_object_unreference_unlocked(obj);
669
670fail:
cd5351f4
RC
671 return ret;
672}
673
e1c1174f 674#ifdef CONFIG_DRM_FBDEV_EMULATION
a6a91827
RC
675/* Set scrolling position. This allows us to implement fast scrolling
676 * for console.
9b55b95a
RC
677 *
678 * Call only from non-atomic contexts.
a6a91827
RC
679 */
680int omap_gem_roll(struct drm_gem_object *obj, uint32_t roll)
681{
682 struct omap_gem_object *omap_obj = to_omap_bo(obj);
683 uint32_t npages = obj->size >> PAGE_SHIFT;
684 int ret = 0;
685
686 if (roll > npages) {
687 dev_err(obj->dev->dev, "invalid roll: %d\n", roll);
688 return -EINVAL;
689 }
690
a6a91827
RC
691 omap_obj->roll = roll;
692
af69592a
RC
693 mutex_lock(&obj->dev->struct_mutex);
694
a6a91827
RC
695 /* if we aren't mapped yet, we don't need to do anything */
696 if (omap_obj->block) {
697 struct page **pages;
698 ret = get_pages(obj, &pages);
699 if (ret)
700 goto fail;
701 ret = tiler_pin(omap_obj->block, pages, npages, roll, true);
702 if (ret)
703 dev_err(obj->dev->dev, "could not repin: %d\n", ret);
704 }
705
706fail:
707 mutex_unlock(&obj->dev->struct_mutex);
708
709 return ret;
710}
e1c1174f 711#endif
a6a91827 712
7ef93b0a
LP
713/* -----------------------------------------------------------------------------
714 * Memory Management & DMA Sync
715 */
716
24fbaca0
LP
717/*
718 * shmem buffers that are mapped cached are not coherent.
719 *
720 * We keep track of dirty pages using page faulting to perform cache management.
721 * When a page is mapped to the CPU in read/write mode the device can't access
722 * it and omap_obj->dma_addrs[i] is NULL. When a page is mapped to the device
723 * the omap_obj->dma_addrs[i] is set to the DMA address, and the page is
724 * unmapped from the CPU.
7ef93b0a
LP
725 */
726static inline bool is_cached_coherent(struct drm_gem_object *obj)
727{
728 struct omap_gem_object *omap_obj = to_omap_bo(obj);
cdb0381d 729
24fbaca0
LP
730 return !((omap_obj->flags & OMAP_BO_MEM_SHMEM) &&
731 ((omap_obj->flags & OMAP_BO_CACHE_MASK) == OMAP_BO_CACHED));
7ef93b0a 732}
a6a91827 733
8b6b569e
RC
734/* Sync the buffer for CPU access.. note pages should already be
735 * attached, ie. omap_gem_get_pages()
736 */
d61ce7da 737void omap_gem_cpu_sync_page(struct drm_gem_object *obj, int pgoff)
8b6b569e
RC
738{
739 struct drm_device *dev = obj->dev;
740 struct omap_gem_object *omap_obj = to_omap_bo(obj);
741
24fbaca0
LP
742 if (is_cached_coherent(obj))
743 return;
744
745 if (omap_obj->dma_addrs[pgoff]) {
57c22f7c 746 dma_unmap_page(dev->dev, omap_obj->dma_addrs[pgoff],
97817fd4 747 PAGE_SIZE, DMA_TO_DEVICE);
57c22f7c 748 omap_obj->dma_addrs[pgoff] = 0;
8b6b569e
RC
749 }
750}
751
752/* sync the buffer for DMA access */
d61ce7da 753void omap_gem_dma_sync_buffer(struct drm_gem_object *obj,
8b6b569e
RC
754 enum dma_data_direction dir)
755{
756 struct drm_device *dev = obj->dev;
757 struct omap_gem_object *omap_obj = to_omap_bo(obj);
4fa6ce48
LP
758 int i, npages = obj->size >> PAGE_SHIFT;
759 struct page **pages = omap_obj->pages;
760 bool dirty = false;
8b6b569e 761
24fbaca0 762 if (is_cached_coherent(obj))
4fa6ce48 763 return;
a3d6345d 764
4fa6ce48 765 for (i = 0; i < npages; i++) {
57c22f7c 766 if (!omap_obj->dma_addrs[i]) {
4fa6ce48 767 dma_addr_t addr;
a3d6345d 768
4fa6ce48 769 addr = dma_map_page(dev->dev, pages[i], 0,
97817fd4 770 PAGE_SIZE, dir);
4fa6ce48
LP
771 if (dma_mapping_error(dev->dev, addr)) {
772 dev_warn(dev->dev, "%s: failed to map page\n",
773 __func__);
774 break;
8b6b569e 775 }
8b6b569e 776
4fa6ce48 777 dirty = true;
57c22f7c 778 omap_obj->dma_addrs[i] = addr;
8b6b569e
RC
779 }
780 }
4fa6ce48
LP
781
782 if (dirty) {
783 unmap_mapping_range(obj->filp->f_mapping, 0,
784 omap_gem_mmap_size(obj), 1);
785 }
8b6b569e
RC
786}
787
bc20c85c
LP
788/**
789 * omap_gem_pin() - Pin a GEM object in memory
790 * @obj: the GEM object
791 * @dma_addr: the DMA address
792 *
793 * Pin the given GEM object in memory and fill the dma_addr pointer with the
794 * object's DMA address. If the buffer is not physically contiguous it will be
795 * remapped through the TILER to provide a contiguous view.
796 *
797 * Pins are reference-counted, calling this function multiple times is allowed
798 * as long the corresponding omap_gem_unpin() calls are balanced.
799 *
800 * Return 0 on success or a negative error code otherwise.
cd5351f4 801 */
bc20c85c 802int omap_gem_pin(struct drm_gem_object *obj, dma_addr_t *dma_addr)
cd5351f4 803{
a6a91827 804 struct omap_drm_private *priv = obj->dev->dev_private;
cd5351f4
RC
805 struct omap_gem_object *omap_obj = to_omap_bo(obj);
806 int ret = 0;
807
f7f9f453
RC
808 mutex_lock(&obj->dev->struct_mutex);
809
aa0408bc 810 if (!is_contiguous(omap_obj) && priv->has_dmm) {
16869083 811 if (omap_obj->dma_addr_cnt == 0) {
f7f9f453 812 struct page **pages;
a6a91827 813 uint32_t npages = obj->size >> PAGE_SHIFT;
f7f9f453
RC
814 enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
815 struct tiler_block *block;
a6a91827 816
f7f9f453
RC
817 BUG_ON(omap_obj->block);
818
819 ret = get_pages(obj, &pages);
820 if (ret)
821 goto fail;
822
f7f9f453
RC
823 if (omap_obj->flags & OMAP_BO_TILED) {
824 block = tiler_reserve_2d(fmt,
825 omap_obj->width,
826 omap_obj->height, 0);
827 } else {
828 block = tiler_reserve_1d(obj->size);
829 }
830
831 if (IS_ERR(block)) {
832 ret = PTR_ERR(block);
833 dev_err(obj->dev->dev,
834 "could not remap: %d (%d)\n", ret, fmt);
835 goto fail;
836 }
837
838 /* TODO: enable async refill.. */
a6a91827
RC
839 ret = tiler_pin(block, pages, npages,
840 omap_obj->roll, true);
f7f9f453
RC
841 if (ret) {
842 tiler_release(block);
843 dev_err(obj->dev->dev,
844 "could not pin: %d\n", ret);
845 goto fail;
846 }
847
16869083 848 omap_obj->dma_addr = tiler_ssptr(block);
f7f9f453
RC
849 omap_obj->block = block;
850
16869083 851 DBG("got dma address: %pad", &omap_obj->dma_addr);
f7f9f453
RC
852 }
853
16869083 854 omap_obj->dma_addr_cnt++;
f7f9f453 855
16869083 856 *dma_addr = omap_obj->dma_addr;
b22e6690 857 } else if (is_contiguous(omap_obj)) {
16869083 858 *dma_addr = omap_obj->dma_addr;
f7f9f453
RC
859 } else {
860 ret = -EINVAL;
8b6b569e 861 goto fail;
cd5351f4
RC
862 }
863
f7f9f453
RC
864fail:
865 mutex_unlock(&obj->dev->struct_mutex);
cd5351f4
RC
866
867 return ret;
868}
869
bc20c85c
LP
870/**
871 * omap_gem_unpin() - Unpin a GEM object from memory
872 * @obj: the GEM object
873 *
874 * Unpin the given GEM object previously pinned with omap_gem_pin(). Pins are
875 * reference-counted, the actualy unpin will only be performed when the number
876 * of calls to this function matches the number of calls to omap_gem_pin().
cd5351f4 877 */
bc20c85c 878void omap_gem_unpin(struct drm_gem_object *obj)
cd5351f4 879{
f7f9f453 880 struct omap_gem_object *omap_obj = to_omap_bo(obj);
393a949f 881 int ret;
f7f9f453
RC
882
883 mutex_lock(&obj->dev->struct_mutex);
16869083
LP
884 if (omap_obj->dma_addr_cnt > 0) {
885 omap_obj->dma_addr_cnt--;
886 if (omap_obj->dma_addr_cnt == 0) {
f7f9f453
RC
887 ret = tiler_unpin(omap_obj->block);
888 if (ret) {
889 dev_err(obj->dev->dev,
890 "could not unpin pages: %d\n", ret);
f7f9f453
RC
891 }
892 ret = tiler_release(omap_obj->block);
893 if (ret) {
894 dev_err(obj->dev->dev,
895 "could not release unmap: %d\n", ret);
896 }
16869083 897 omap_obj->dma_addr = 0;
f7f9f453
RC
898 omap_obj->block = NULL;
899 }
900 }
393a949f 901
f7f9f453 902 mutex_unlock(&obj->dev->struct_mutex);
cd5351f4
RC
903}
904
3c810c61
RC
905/* Get rotated scanout address (only valid if already pinned), at the
906 * specified orientation and x,y offset from top-left corner of buffer
907 * (only valid for tiled 2d buffers)
908 */
16869083
LP
909int omap_gem_rotated_dma_addr(struct drm_gem_object *obj, uint32_t orient,
910 int x, int y, dma_addr_t *dma_addr)
3c810c61
RC
911{
912 struct omap_gem_object *omap_obj = to_omap_bo(obj);
913 int ret = -EINVAL;
914
915 mutex_lock(&obj->dev->struct_mutex);
16869083 916 if ((omap_obj->dma_addr_cnt > 0) && omap_obj->block &&
3c810c61 917 (omap_obj->flags & OMAP_BO_TILED)) {
16869083 918 *dma_addr = tiler_tsptr(omap_obj->block, orient, x, y);
3c810c61
RC
919 ret = 0;
920 }
921 mutex_unlock(&obj->dev->struct_mutex);
922 return ret;
923}
924
925/* Get tiler stride for the buffer (only valid for 2d tiled buffers) */
926int omap_gem_tiled_stride(struct drm_gem_object *obj, uint32_t orient)
927{
928 struct omap_gem_object *omap_obj = to_omap_bo(obj);
929 int ret = -EINVAL;
930 if (omap_obj->flags & OMAP_BO_TILED)
931 ret = tiler_stride(gem2fmt(omap_obj->flags), orient);
932 return ret;
933}
934
6ad11bc3
RC
935/* if !remap, and we don't have pages backing, then fail, rather than
936 * increasing the pin count (which we don't really do yet anyways,
937 * because we don't support swapping pages back out). And 'remap'
938 * might not be quite the right name, but I wanted to keep it working
bc20c85c 939 * similarly to omap_gem_pin(). Note though that mutex is not
6ad11bc3 940 * aquired if !remap (because this can be called in atomic ctxt),
bc20c85c 941 * but probably omap_gem_unpin() should be changed to work in the
6ad11bc3
RC
942 * same way. If !remap, a matching omap_gem_put_pages() call is not
943 * required (and should not be made).
944 */
945int omap_gem_get_pages(struct drm_gem_object *obj, struct page ***pages,
946 bool remap)
cd5351f4
RC
947{
948 int ret;
6ad11bc3
RC
949 if (!remap) {
950 struct omap_gem_object *omap_obj = to_omap_bo(obj);
951 if (!omap_obj->pages)
952 return -ENOMEM;
953 *pages = omap_obj->pages;
954 return 0;
955 }
cd5351f4
RC
956 mutex_lock(&obj->dev->struct_mutex);
957 ret = get_pages(obj, pages);
958 mutex_unlock(&obj->dev->struct_mutex);
959 return ret;
960}
961
962/* release pages when DMA no longer being performed */
963int omap_gem_put_pages(struct drm_gem_object *obj)
964{
965 /* do something here if we dynamically attach/detach pages.. at
966 * least they would no longer need to be pinned if everyone has
967 * released the pages..
968 */
969 return 0;
970}
971
e1c1174f 972#ifdef CONFIG_DRM_FBDEV_EMULATION
f7f9f453
RC
973/* Get kernel virtual address for CPU access.. this more or less only
974 * exists for omap_fbdev. This should be called with struct_mutex
975 * held.
cd5351f4
RC
976 */
977void *omap_gem_vaddr(struct drm_gem_object *obj)
978{
979 struct omap_gem_object *omap_obj = to_omap_bo(obj);
696e3ca3 980 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
f7f9f453
RC
981 if (!omap_obj->vaddr) {
982 struct page **pages;
983 int ret = get_pages(obj, &pages);
984 if (ret)
985 return ERR_PTR(ret);
986 omap_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
987 VM_MAP, pgprot_writecombine(PAGE_KERNEL));
988 }
cd5351f4
RC
989 return omap_obj->vaddr;
990}
e1c1174f 991#endif
cd5351f4 992
7ef93b0a
LP
993/* -----------------------------------------------------------------------------
994 * Power Management
995 */
cd5351f4 996
e78edba1
AG
997#ifdef CONFIG_PM
998/* re-pin objects in DMM in resume path: */
999int omap_gem_resume(struct device *dev)
1000{
1001 struct drm_device *drm_dev = dev_get_drvdata(dev);
1002 struct omap_drm_private *priv = drm_dev->dev_private;
1003 struct omap_gem_object *omap_obj;
1004 int ret = 0;
1005
638fe887 1006 mutex_lock(&priv->list_lock);
e78edba1
AG
1007 list_for_each_entry(omap_obj, &priv->obj_list, mm_list) {
1008 if (omap_obj->block) {
1009 struct drm_gem_object *obj = &omap_obj->base;
1010 uint32_t npages = obj->size >> PAGE_SHIFT;
1011 WARN_ON(!omap_obj->pages); /* this can't happen */
1012 ret = tiler_pin(omap_obj->block,
1013 omap_obj->pages, npages,
1014 omap_obj->roll, true);
1015 if (ret) {
1016 dev_err(dev, "could not repin: %d\n", ret);
638fe887 1017 goto done;
e78edba1
AG
1018 }
1019 }
1020 }
1021
638fe887
DV
1022done:
1023 mutex_unlock(&priv->list_lock);
1024 return ret;
e78edba1
AG
1025}
1026#endif
1027
7ef93b0a
LP
1028/* -----------------------------------------------------------------------------
1029 * DebugFS
1030 */
1031
f6b6036e
RC
1032#ifdef CONFIG_DEBUG_FS
1033void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
1034{
f6b6036e 1035 struct omap_gem_object *omap_obj = to_omap_bo(obj);
0de23977 1036 uint64_t off;
f6b6036e 1037
0de23977 1038 off = drm_vma_node_start(&obj->vma_node);
f6b6036e 1039
2d31ca3a 1040 seq_printf(m, "%08x: %2d (%2d) %08llx %pad (%2d) %p %4d",
2c935bc5 1041 omap_obj->flags, obj->name, kref_read(&obj->refcount),
16869083 1042 off, &omap_obj->dma_addr, omap_obj->dma_addr_cnt,
f6b6036e
RC
1043 omap_obj->vaddr, omap_obj->roll);
1044
1045 if (omap_obj->flags & OMAP_BO_TILED) {
1046 seq_printf(m, " %dx%d", omap_obj->width, omap_obj->height);
1047 if (omap_obj->block) {
1048 struct tcm_area *area = &omap_obj->block->area;
1049 seq_printf(m, " (%dx%d, %dx%d)",
1050 area->p0.x, area->p0.y,
1051 area->p1.x, area->p1.y);
1052 }
1053 } else {
2150c19b 1054 seq_printf(m, " %zu", obj->size);
f6b6036e
RC
1055 }
1056
1057 seq_printf(m, "\n");
1058}
1059
1060void omap_gem_describe_objects(struct list_head *list, struct seq_file *m)
1061{
1062 struct omap_gem_object *omap_obj;
1063 int count = 0;
1064 size_t size = 0;
1065
1066 list_for_each_entry(omap_obj, list, mm_list) {
1067 struct drm_gem_object *obj = &omap_obj->base;
1068 seq_printf(m, " ");
1069 omap_gem_describe(obj, m);
1070 count++;
1071 size += obj->size;
1072 }
1073
1074 seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
1075}
1076#endif
1077
7ef93b0a
LP
1078/* -----------------------------------------------------------------------------
1079 * Constructor & Destructor
1080 */
1081
cd5351f4
RC
1082void omap_gem_free_object(struct drm_gem_object *obj)
1083{
1084 struct drm_device *dev = obj->dev;
76c4055f 1085 struct omap_drm_private *priv = dev->dev_private;
cd5351f4
RC
1086 struct omap_gem_object *omap_obj = to_omap_bo(obj);
1087
f7f9f453
RC
1088 evict(obj);
1089
f6b6036e
RC
1090 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
1091
638fe887 1092 mutex_lock(&priv->list_lock);
f6b6036e 1093 list_del(&omap_obj->mm_list);
638fe887 1094 mutex_unlock(&priv->list_lock);
f6b6036e 1095
9a0774e0
RC
1096 /* this means the object is still pinned.. which really should
1097 * not happen. I think..
1098 */
16869083 1099 WARN_ON(omap_obj->dma_addr_cnt > 0);
9a0774e0 1100
3f50effd
TV
1101 if (omap_obj->pages) {
1102 if (omap_obj->flags & OMAP_BO_MEM_DMABUF)
1103 kfree(omap_obj->pages);
1104 else
cd5351f4 1105 omap_gem_detach_pages(obj);
3f50effd 1106 }
ae053039 1107
3f50effd 1108 if (omap_obj->flags & OMAP_BO_MEM_DMA_API) {
266c73b7 1109 dma_free_wc(dev->dev, obj->size, omap_obj->vaddr,
16869083 1110 omap_obj->dma_addr);
3f50effd
TV
1111 } else if (omap_obj->vaddr) {
1112 vunmap(omap_obj->vaddr);
1113 } else if (obj->import_attach) {
1114 drm_prime_gem_destroy(obj, omap_obj->sgt);
cd5351f4
RC
1115 }
1116
cd5351f4
RC
1117 drm_gem_object_release(obj);
1118
00e9c7c7 1119 kfree(omap_obj);
cd5351f4
RC
1120}
1121
1122/* GEM buffer object constructor */
1123struct drm_gem_object *omap_gem_new(struct drm_device *dev,
1124 union omap_gem_size gsize, uint32_t flags)
1125{
a6a91827 1126 struct omap_drm_private *priv = dev->dev_private;
cd5351f4 1127 struct omap_gem_object *omap_obj;
92b4b445 1128 struct drm_gem_object *obj;
ab5a60c3 1129 struct address_space *mapping;
cd5351f4
RC
1130 size_t size;
1131 int ret;
1132
9cba3b99 1133 /* Validate the flags and compute the memory and cache flags. */
cd5351f4 1134 if (flags & OMAP_BO_TILED) {
f4302747 1135 if (!priv->usergart) {
f7f9f453 1136 dev_err(dev->dev, "Tiled buffers require DMM\n");
92b4b445 1137 return NULL;
f7f9f453
RC
1138 }
1139
9cba3b99
LP
1140 /*
1141 * Tiled buffers are always shmem paged backed. When they are
1142 * scanned out, they are remapped into DMM/TILER.
f7f9f453
RC
1143 */
1144 flags &= ~OMAP_BO_SCANOUT;
9cba3b99 1145 flags |= OMAP_BO_MEM_SHMEM;
f7f9f453 1146
9cba3b99
LP
1147 /*
1148 * Currently don't allow cached buffers. There is some caching
1149 * stuff that needs to be handled better.
f7f9f453 1150 */
7cb0d6c1
TV
1151 flags &= ~(OMAP_BO_CACHED|OMAP_BO_WC|OMAP_BO_UNCACHED);
1152 flags |= tiler_get_cpu_cache_flags();
9cba3b99
LP
1153 } else if ((flags & OMAP_BO_SCANOUT) && !priv->has_dmm) {
1154 /*
b22e6690
LP
1155 * OMAP_BO_SCANOUT hints that the buffer doesn't need to be
1156 * tiled. However, to lower the pressure on memory allocation,
1157 * use contiguous memory only if no TILER is available.
9cba3b99
LP
1158 */
1159 flags |= OMAP_BO_MEM_DMA_API;
3f50effd 1160 } else if (!(flags & OMAP_BO_MEM_DMABUF)) {
9cba3b99 1161 /*
3f50effd 1162 * All other buffers not backed by dma_buf are shmem-backed.
9cba3b99
LP
1163 */
1164 flags |= OMAP_BO_MEM_SHMEM;
f7f9f453 1165 }
cd5351f4 1166
9cba3b99 1167 /* Allocate the initialize the OMAP GEM object. */
cd5351f4 1168 omap_obj = kzalloc(sizeof(*omap_obj), GFP_KERNEL);
78110bb8 1169 if (!omap_obj)
a903e3b6 1170 return NULL;
f6b6036e 1171
cd5351f4 1172 obj = &omap_obj->base;
9cba3b99 1173 omap_obj->flags = flags;
cd5351f4 1174
9cba3b99
LP
1175 if (flags & OMAP_BO_TILED) {
1176 /*
1177 * For tiled buffers align dimensions to slot boundaries and
1178 * calculate size based on aligned dimensions.
a6a91827 1179 */
9cba3b99
LP
1180 tiler_align(gem2fmt(flags), &gsize.tiled.width,
1181 &gsize.tiled.height);
ae053039 1182
9cba3b99
LP
1183 size = tiler_size(gem2fmt(flags), gsize.tiled.width,
1184 gsize.tiled.height);
cd5351f4 1185
f7f9f453
RC
1186 omap_obj->width = gsize.tiled.width;
1187 omap_obj->height = gsize.tiled.height;
9cba3b99
LP
1188 } else {
1189 size = PAGE_ALIGN(gsize.bytes);
f7f9f453
RC
1190 }
1191
9cba3b99
LP
1192 /* Initialize the GEM object. */
1193 if (!(flags & OMAP_BO_MEM_SHMEM)) {
89c8233f 1194 drm_gem_private_object_init(dev, obj, size);
ab5a60c3 1195 } else {
cd5351f4 1196 ret = drm_gem_object_init(dev, obj, size);
ab5a60c3 1197 if (ret)
c2eb77ff 1198 goto err_free;
cd5351f4 1199
93c76a3d 1200 mapping = obj->filp->f_mapping;
ab5a60c3
DH
1201 mapping_set_gfp_mask(mapping, GFP_USER | __GFP_DMA32);
1202 }
cd5351f4 1203
c2eb77ff
LP
1204 /* Allocate memory if needed. */
1205 if (flags & OMAP_BO_MEM_DMA_API) {
266c73b7 1206 omap_obj->vaddr = dma_alloc_wc(dev->dev, size,
16869083 1207 &omap_obj->dma_addr,
266c73b7 1208 GFP_KERNEL);
c2eb77ff
LP
1209 if (!omap_obj->vaddr)
1210 goto err_release;
1211 }
1212
638fe887 1213 mutex_lock(&priv->list_lock);
c2eb77ff 1214 list_add(&omap_obj->mm_list, &priv->obj_list);
638fe887 1215 mutex_unlock(&priv->list_lock);
c2eb77ff 1216
cd5351f4
RC
1217 return obj;
1218
c2eb77ff
LP
1219err_release:
1220 drm_gem_object_release(obj);
1221err_free:
1222 kfree(omap_obj);
cd5351f4
RC
1223 return NULL;
1224}
f7f9f453 1225
b22e6690
LP
1226struct drm_gem_object *omap_gem_new_dmabuf(struct drm_device *dev, size_t size,
1227 struct sg_table *sgt)
1228{
1229 struct omap_drm_private *priv = dev->dev_private;
1230 struct omap_gem_object *omap_obj;
1231 struct drm_gem_object *obj;
1232 union omap_gem_size gsize;
1233
1234 /* Without a DMM only physically contiguous buffers can be supported. */
1235 if (sgt->orig_nents != 1 && !priv->has_dmm)
1236 return ERR_PTR(-EINVAL);
1237
1238 mutex_lock(&dev->struct_mutex);
1239
1240 gsize.bytes = PAGE_ALIGN(size);
1241 obj = omap_gem_new(dev, gsize, OMAP_BO_MEM_DMABUF | OMAP_BO_WC);
1242 if (!obj) {
1243 obj = ERR_PTR(-ENOMEM);
1244 goto done;
1245 }
1246
1247 omap_obj = to_omap_bo(obj);
1248 omap_obj->sgt = sgt;
1249
1250 if (sgt->orig_nents == 1) {
16869083 1251 omap_obj->dma_addr = sg_dma_address(sgt->sgl);
b22e6690
LP
1252 } else {
1253 /* Create pages list from sgt */
1254 struct sg_page_iter iter;
1255 struct page **pages;
1256 unsigned int npages;
1257 unsigned int i = 0;
1258
1259 npages = DIV_ROUND_UP(size, PAGE_SIZE);
1260 pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL);
1261 if (!pages) {
1262 omap_gem_free_object(obj);
1263 obj = ERR_PTR(-ENOMEM);
1264 goto done;
1265 }
1266
1267 omap_obj->pages = pages;
1268
1269 for_each_sg_page(sgt->sgl, &iter, sgt->orig_nents, 0) {
1270 pages[i++] = sg_page_iter_page(&iter);
1271 if (i > npages)
1272 break;
1273 }
1274
1275 if (WARN_ON(i != npages)) {
1276 omap_gem_free_object(obj);
1277 obj = ERR_PTR(-ENOMEM);
1278 goto done;
1279 }
1280 }
1281
1282done:
1283 mutex_unlock(&dev->struct_mutex);
1284 return obj;
1285}
1286
7ef93b0a
LP
1287/* convenience method to construct a GEM buffer object, and userspace handle */
1288int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file,
1289 union omap_gem_size gsize, uint32_t flags, uint32_t *handle)
1290{
1291 struct drm_gem_object *obj;
1292 int ret;
1293
1294 obj = omap_gem_new(dev, gsize, flags);
1295 if (!obj)
1296 return -ENOMEM;
1297
1298 ret = drm_gem_handle_create(file, obj, handle);
1299 if (ret) {
cd5351f4 1300 omap_gem_free_object(obj);
7ef93b0a
LP
1301 return ret;
1302 }
ae053039 1303
7ef93b0a
LP
1304 /* drop reference from allocate - handle holds it now */
1305 drm_gem_object_unreference_unlocked(obj);
1306
1307 return 0;
cd5351f4 1308}
f7f9f453 1309
7ef93b0a
LP
1310/* -----------------------------------------------------------------------------
1311 * Init & Cleanup
1312 */
1313
1314/* If DMM is used, we need to set some stuff up.. */
f7f9f453
RC
1315void omap_gem_init(struct drm_device *dev)
1316{
a6a91827 1317 struct omap_drm_private *priv = dev->dev_private;
f4302747 1318 struct omap_drm_usergart *usergart;
f7f9f453
RC
1319 const enum tiler_fmt fmts[] = {
1320 TILFMT_8BIT, TILFMT_16BIT, TILFMT_32BIT
1321 };
5c137797 1322 int i, j;
f7f9f453 1323
e5e4e9b7 1324 if (!dmm_is_available()) {
f7f9f453 1325 /* DMM only supported on OMAP4 and later, so this isn't fatal */
5c137797 1326 dev_warn(dev->dev, "DMM not available, disable DMM support\n");
f7f9f453
RC
1327 return;
1328 }
1329
78110bb8
JP
1330 usergart = kcalloc(3, sizeof(*usergart), GFP_KERNEL);
1331 if (!usergart)
b369839b 1332 return;
f7f9f453
RC
1333
1334 /* reserve 4k aligned/wide regions for userspace mappings: */
1335 for (i = 0; i < ARRAY_SIZE(fmts); i++) {
1336 uint16_t h = 1, w = PAGE_SIZE >> i;
1337 tiler_align(fmts[i], &w, &h);
1338 /* note: since each region is 1 4kb page wide, and minimum
1339 * number of rows, the height ends up being the same as the
1340 * # of pages in the region
1341 */
1342 usergart[i].height = h;
1343 usergart[i].height_shift = ilog2(h);
3c810c61 1344 usergart[i].stride_pfn = tiler_stride(fmts[i], 0) >> PAGE_SHIFT;
f7f9f453
RC
1345 usergart[i].slot_shift = ilog2((PAGE_SIZE / h) >> i);
1346 for (j = 0; j < NUM_USERGART_ENTRIES; j++) {
f4302747
LP
1347 struct omap_drm_usergart_entry *entry;
1348 struct tiler_block *block;
1349
1350 entry = &usergart[i].entry[j];
1351 block = tiler_reserve_2d(fmts[i], w, h, PAGE_SIZE);
f7f9f453
RC
1352 if (IS_ERR(block)) {
1353 dev_err(dev->dev,
1354 "reserve failed: %d, %d, %ld\n",
1355 i, j, PTR_ERR(block));
1356 return;
1357 }
16869083 1358 entry->dma_addr = tiler_ssptr(block);
f7f9f453
RC
1359 entry->block = block;
1360
16869083
LP
1361 DBG("%d:%d: %dx%d: dma_addr=%pad stride=%d", i, j, w, h,
1362 &entry->dma_addr,
f7f9f453
RC
1363 usergart[i].stride_pfn << PAGE_SHIFT);
1364 }
1365 }
a6a91827 1366
f4302747 1367 priv->usergart = usergart;
a6a91827 1368 priv->has_dmm = true;
f7f9f453
RC
1369}
1370
1371void omap_gem_deinit(struct drm_device *dev)
1372{
f4302747
LP
1373 struct omap_drm_private *priv = dev->dev_private;
1374
f7f9f453
RC
1375 /* I believe we can rely on there being no more outstanding GEM
1376 * objects which could depend on usergart/dmm at this point.
1377 */
f4302747 1378 kfree(priv->usergart);
f7f9f453 1379}