]>
Commit | Line | Data |
---|---|---|
cd5351f4 | 1 | /* |
8bb0daff | 2 | * drivers/gpu/drm/omapdrm/omap_gem.c |
cd5351f4 RC |
3 | * |
4 | * Copyright (C) 2011 Texas Instruments | |
5 | * Author: Rob Clark <rob.clark@linaro.org> | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or modify it | |
8 | * under the terms of the GNU General Public License version 2 as published by | |
9 | * the Free Software Foundation. | |
10 | * | |
11 | * This program is distributed in the hope that it will be useful, but WITHOUT | |
12 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
13 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
14 | * more details. | |
15 | * | |
16 | * You should have received a copy of the GNU General Public License along with | |
17 | * this program. If not, see <http://www.gnu.org/licenses/>. | |
18 | */ | |
19 | ||
2d802453 | 20 | #include <linux/seq_file.h> |
cd5351f4 | 21 | #include <linux/shmem_fs.h> |
2d278f54 | 22 | #include <linux/spinlock.h> |
01c8f1c4 | 23 | #include <linux/pfn_t.h> |
2d278f54 | 24 | |
0de23977 | 25 | #include <drm/drm_vma_manager.h> |
cd5351f4 RC |
26 | |
27 | #include "omap_drv.h" | |
f7f9f453 | 28 | #include "omap_dmm_tiler.h" |
cd5351f4 | 29 | |
cd5351f4 RC |
30 | /* |
31 | * GEM buffer object implementation. | |
32 | */ | |
33 | ||
cd5351f4 | 34 | /* note: we use upper 8 bits of flags for driver-internal flags: */ |
cdb0381d LP |
35 | #define OMAP_BO_MEM_DMA_API 0x01000000 /* memory allocated with the dma_alloc_* API */ |
36 | #define OMAP_BO_MEM_SHMEM 0x02000000 /* memory allocated through shmem backing */ | |
b22e6690 | 37 | #define OMAP_BO_MEM_DMABUF 0x08000000 /* memory imported from a dmabuf */ |
cd5351f4 | 38 | |
cd5351f4 RC |
39 | struct omap_gem_object { |
40 | struct drm_gem_object base; | |
41 | ||
f6b6036e RC |
42 | struct list_head mm_list; |
43 | ||
cd5351f4 RC |
44 | uint32_t flags; |
45 | ||
f7f9f453 RC |
46 | /** width/height for tiled formats (rounded up to slot boundaries) */ |
47 | uint16_t width, height; | |
48 | ||
a6a91827 RC |
49 | /** roll applied when mapping to DMM */ |
50 | uint32_t roll; | |
51 | ||
cd5351f4 | 52 | /** |
b22e6690 | 53 | * paddr contains the buffer DMA address. It is valid for |
cd5351f4 | 54 | * |
b22e6690 LP |
55 | * - buffers allocated through the DMA mapping API (with the |
56 | * OMAP_BO_MEM_DMA_API flag set) | |
57 | * | |
58 | * - buffers imported from dmabuf (with the OMAP_BO_MEM_DMABUF flag set) | |
59 | * if they are physically contiguous (when sgt->orig_nents == 1) | |
60 | * | |
61 | * - buffers mapped through the TILER when paddr_cnt is not zero, in | |
62 | * which case the DMA address points to the TILER aperture | |
63 | * | |
64 | * Physically contiguous buffers have their DMA address equal to the | |
65 | * physical address as we don't remap those buffers through the TILER. | |
66 | * | |
67 | * Buffers mapped to the TILER have their DMA address pointing to the | |
68 | * TILER aperture. As TILER mappings are refcounted (through paddr_cnt) | |
69 | * the DMA address must be accessed through omap_get_get_paddr() to | |
70 | * ensure that the mapping won't disappear unexpectedly. References must | |
71 | * be released with omap_gem_put_paddr(). | |
cd5351f4 RC |
72 | */ |
73 | dma_addr_t paddr; | |
74 | ||
f7f9f453 RC |
75 | /** |
76 | * # of users of paddr | |
77 | */ | |
78 | uint32_t paddr_cnt; | |
79 | ||
b22e6690 LP |
80 | /** |
81 | * If the buffer has been imported from a dmabuf the OMAP_DB_DMABUF flag | |
82 | * is set and the sgt field is valid. | |
83 | */ | |
84 | struct sg_table *sgt; | |
85 | ||
f7f9f453 RC |
86 | /** |
87 | * tiler block used when buffer is remapped in DMM/TILER. | |
88 | */ | |
89 | struct tiler_block *block; | |
90 | ||
cd5351f4 RC |
91 | /** |
92 | * Array of backing pages, if allocated. Note that pages are never | |
93 | * allocated for buffers originally allocated from contiguous memory | |
94 | */ | |
95 | struct page **pages; | |
96 | ||
f3bc9d24 RC |
97 | /** addresses corresponding to pages in above array */ |
98 | dma_addr_t *addrs; | |
99 | ||
cd5351f4 RC |
100 | /** |
101 | * Virtual address, if mapped. | |
102 | */ | |
103 | void *vaddr; | |
104 | ||
105 | /** | |
106 | * sync-object allocated on demand (if needed) | |
107 | * | |
108 | * Per-buffer sync-object for tracking pending and completed hw/dma | |
3f50effd | 109 | * read and write operations. |
cd5351f4 RC |
110 | */ |
111 | struct { | |
112 | uint32_t write_pending; | |
113 | uint32_t write_complete; | |
114 | uint32_t read_pending; | |
115 | uint32_t read_complete; | |
116 | } *sync; | |
117 | }; | |
118 | ||
7ef93b0a | 119 | #define to_omap_bo(x) container_of(x, struct omap_gem_object, base) |
c5b1247b | 120 | |
f7f9f453 RC |
121 | /* To deal with userspace mmap'ings of 2d tiled buffers, which (a) are |
122 | * not necessarily pinned in TILER all the time, and (b) when they are | |
123 | * they are not necessarily page aligned, we reserve one or more small | |
124 | * regions in each of the 2d containers to use as a user-GART where we | |
125 | * can create a second page-aligned mapping of parts of the buffer | |
126 | * being accessed from userspace. | |
127 | * | |
128 | * Note that we could optimize slightly when we know that multiple | |
129 | * tiler containers are backed by the same PAT.. but I'll leave that | |
130 | * for later.. | |
131 | */ | |
132 | #define NUM_USERGART_ENTRIES 2 | |
f4302747 | 133 | struct omap_drm_usergart_entry { |
f7f9f453 RC |
134 | struct tiler_block *block; /* the reserved tiler block */ |
135 | dma_addr_t paddr; | |
136 | struct drm_gem_object *obj; /* the current pinned obj */ | |
137 | pgoff_t obj_pgoff; /* page offset of obj currently | |
138 | mapped in */ | |
139 | }; | |
f4302747 LP |
140 | |
141 | struct omap_drm_usergart { | |
142 | struct omap_drm_usergart_entry entry[NUM_USERGART_ENTRIES]; | |
f7f9f453 RC |
143 | int height; /* height in rows */ |
144 | int height_shift; /* ilog2(height in rows) */ | |
145 | int slot_shift; /* ilog2(width per slot) */ | |
146 | int stride_pfn; /* stride in pages */ | |
147 | int last; /* index of last used entry */ | |
f4302747 | 148 | }; |
f7f9f453 | 149 | |
b902f8f4 LP |
150 | /* ----------------------------------------------------------------------------- |
151 | * Helpers | |
152 | */ | |
153 | ||
154 | /** get mmap offset */ | |
155 | static uint64_t mmap_offset(struct drm_gem_object *obj) | |
156 | { | |
157 | struct drm_device *dev = obj->dev; | |
158 | int ret; | |
159 | size_t size; | |
160 | ||
161 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); | |
162 | ||
163 | /* Make it mmapable */ | |
164 | size = omap_gem_mmap_size(obj); | |
165 | ret = drm_gem_create_mmap_offset_size(obj, size); | |
166 | if (ret) { | |
167 | dev_err(dev->dev, "could not allocate mmap offset\n"); | |
168 | return 0; | |
169 | } | |
170 | ||
171 | return drm_vma_node_offset_addr(&obj->vma_node); | |
172 | } | |
173 | ||
b22e6690 | 174 | static bool is_contiguous(struct omap_gem_object *omap_obj) |
7ef93b0a | 175 | { |
b22e6690 LP |
176 | if (omap_obj->flags & OMAP_BO_MEM_DMA_API) |
177 | return true; | |
178 | ||
179 | if ((omap_obj->flags & OMAP_BO_MEM_DMABUF) && omap_obj->sgt->nents == 1) | |
180 | return true; | |
181 | ||
182 | return false; | |
7ef93b0a LP |
183 | } |
184 | ||
185 | /* ----------------------------------------------------------------------------- | |
186 | * Eviction | |
187 | */ | |
f7f9f453 RC |
188 | |
189 | static void evict_entry(struct drm_gem_object *obj, | |
f4302747 | 190 | enum tiler_fmt fmt, struct omap_drm_usergart_entry *entry) |
f7f9f453 | 191 | { |
6796cb16 | 192 | struct omap_gem_object *omap_obj = to_omap_bo(obj); |
f4302747 LP |
193 | struct omap_drm_private *priv = obj->dev->dev_private; |
194 | int n = priv->usergart[fmt].height; | |
6796cb16 DH |
195 | size_t size = PAGE_SIZE * n; |
196 | loff_t off = mmap_offset(obj) + | |
197 | (entry->obj_pgoff << PAGE_SHIFT); | |
198 | const int m = 1 + ((omap_obj->width << fmt) / PAGE_SIZE); | |
199 | ||
200 | if (m > 1) { | |
201 | int i; | |
202 | /* if stride > than PAGE_SIZE then sparse mapping: */ | |
203 | for (i = n; i > 0; i--) { | |
204 | unmap_mapping_range(obj->dev->anon_inode->i_mapping, | |
205 | off, PAGE_SIZE, 1); | |
206 | off += PAGE_SIZE * m; | |
e559895a | 207 | } |
6796cb16 DH |
208 | } else { |
209 | unmap_mapping_range(obj->dev->anon_inode->i_mapping, | |
210 | off, size, 1); | |
f7f9f453 RC |
211 | } |
212 | ||
213 | entry->obj = NULL; | |
214 | } | |
215 | ||
216 | /* Evict a buffer from usergart, if it is mapped there */ | |
217 | static void evict(struct drm_gem_object *obj) | |
218 | { | |
219 | struct omap_gem_object *omap_obj = to_omap_bo(obj); | |
f4302747 | 220 | struct omap_drm_private *priv = obj->dev->dev_private; |
f7f9f453 RC |
221 | |
222 | if (omap_obj->flags & OMAP_BO_TILED) { | |
223 | enum tiler_fmt fmt = gem2fmt(omap_obj->flags); | |
224 | int i; | |
225 | ||
f7f9f453 | 226 | for (i = 0; i < NUM_USERGART_ENTRIES; i++) { |
f4302747 LP |
227 | struct omap_drm_usergart_entry *entry = |
228 | &priv->usergart[fmt].entry[i]; | |
229 | ||
f7f9f453 RC |
230 | if (entry->obj == obj) |
231 | evict_entry(obj, fmt, entry); | |
232 | } | |
233 | } | |
234 | } | |
235 | ||
7ef93b0a LP |
236 | /* ----------------------------------------------------------------------------- |
237 | * Page Management | |
8b6b569e | 238 | */ |
cd5351f4 RC |
239 | |
240 | /** ensure backing pages are allocated */ | |
241 | static int omap_gem_attach_pages(struct drm_gem_object *obj) | |
242 | { | |
8b6b569e | 243 | struct drm_device *dev = obj->dev; |
cd5351f4 RC |
244 | struct omap_gem_object *omap_obj = to_omap_bo(obj); |
245 | struct page **pages; | |
d4eb23a9 EG |
246 | int npages = obj->size >> PAGE_SHIFT; |
247 | int i, ret; | |
8b6b569e | 248 | dma_addr_t *addrs; |
cd5351f4 RC |
249 | |
250 | WARN_ON(omap_obj->pages); | |
251 | ||
0cdbe8ac | 252 | pages = drm_gem_get_pages(obj); |
cd5351f4 RC |
253 | if (IS_ERR(pages)) { |
254 | dev_err(obj->dev->dev, "could not get pages: %ld\n", PTR_ERR(pages)); | |
255 | return PTR_ERR(pages); | |
256 | } | |
257 | ||
f3bc9d24 RC |
258 | /* for non-cached buffers, ensure the new pages are clean because |
259 | * DSS, GPU, etc. are not cache coherent: | |
260 | */ | |
261 | if (omap_obj->flags & (OMAP_BO_WC|OMAP_BO_UNCACHED)) { | |
23d84ed9 | 262 | addrs = kmalloc(npages * sizeof(*addrs), GFP_KERNEL); |
d4eb23a9 EG |
263 | if (!addrs) { |
264 | ret = -ENOMEM; | |
265 | goto free_pages; | |
266 | } | |
267 | ||
f3bc9d24 | 268 | for (i = 0; i < npages; i++) { |
8b6b569e | 269 | addrs[i] = dma_map_page(dev->dev, pages[i], |
f3bc9d24 | 270 | 0, PAGE_SIZE, DMA_BIDIRECTIONAL); |
579ef254 TV |
271 | |
272 | if (dma_mapping_error(dev->dev, addrs[i])) { | |
273 | dev_warn(dev->dev, | |
274 | "%s: failed to map page\n", __func__); | |
275 | ||
276 | for (i = i - 1; i >= 0; --i) { | |
277 | dma_unmap_page(dev->dev, addrs[i], | |
278 | PAGE_SIZE, DMA_BIDIRECTIONAL); | |
279 | } | |
280 | ||
281 | ret = -ENOMEM; | |
282 | goto free_addrs; | |
283 | } | |
f3bc9d24 | 284 | } |
8b6b569e | 285 | } else { |
23d84ed9 | 286 | addrs = kzalloc(npages * sizeof(*addrs), GFP_KERNEL); |
d4eb23a9 EG |
287 | if (!addrs) { |
288 | ret = -ENOMEM; | |
289 | goto free_pages; | |
290 | } | |
f3bc9d24 RC |
291 | } |
292 | ||
8b6b569e | 293 | omap_obj->addrs = addrs; |
cd5351f4 | 294 | omap_obj->pages = pages; |
8b6b569e | 295 | |
cd5351f4 | 296 | return 0; |
d4eb23a9 | 297 | |
579ef254 TV |
298 | free_addrs: |
299 | kfree(addrs); | |
d4eb23a9 | 300 | free_pages: |
ddcd09d6 | 301 | drm_gem_put_pages(obj, pages, true, false); |
d4eb23a9 EG |
302 | |
303 | return ret; | |
cd5351f4 RC |
304 | } |
305 | ||
b902f8f4 LP |
306 | /* acquire pages when needed (for example, for DMA where physically |
307 | * contiguous buffer is not required | |
308 | */ | |
309 | static int get_pages(struct drm_gem_object *obj, struct page ***pages) | |
310 | { | |
311 | struct omap_gem_object *omap_obj = to_omap_bo(obj); | |
312 | int ret = 0; | |
313 | ||
cdb0381d | 314 | if ((omap_obj->flags & OMAP_BO_MEM_SHMEM) && !omap_obj->pages) { |
b902f8f4 LP |
315 | ret = omap_gem_attach_pages(obj); |
316 | if (ret) { | |
317 | dev_err(obj->dev->dev, "could not attach pages\n"); | |
318 | return ret; | |
319 | } | |
320 | } | |
321 | ||
322 | /* TODO: even phys-contig.. we should have a list of pages? */ | |
323 | *pages = omap_obj->pages; | |
324 | ||
325 | return 0; | |
326 | } | |
327 | ||
cd5351f4 RC |
328 | /** release backing pages */ |
329 | static void omap_gem_detach_pages(struct drm_gem_object *obj) | |
330 | { | |
331 | struct omap_gem_object *omap_obj = to_omap_bo(obj); | |
f3bc9d24 RC |
332 | |
333 | /* for non-cached buffers, ensure the new pages are clean because | |
334 | * DSS, GPU, etc. are not cache coherent: | |
335 | */ | |
336 | if (omap_obj->flags & (OMAP_BO_WC|OMAP_BO_UNCACHED)) { | |
337 | int i, npages = obj->size >> PAGE_SHIFT; | |
338 | for (i = 0; i < npages; i++) { | |
6cb09656 PU |
339 | if (omap_obj->addrs[i]) |
340 | dma_unmap_page(obj->dev->dev, | |
341 | omap_obj->addrs[i], | |
342 | PAGE_SIZE, DMA_BIDIRECTIONAL); | |
f3bc9d24 | 343 | } |
f3bc9d24 RC |
344 | } |
345 | ||
8b6b569e RC |
346 | kfree(omap_obj->addrs); |
347 | omap_obj->addrs = NULL; | |
348 | ||
ddcd09d6 | 349 | drm_gem_put_pages(obj, omap_obj->pages, true, false); |
cd5351f4 RC |
350 | omap_obj->pages = NULL; |
351 | } | |
352 | ||
6ad11bc3 RC |
353 | /* get buffer flags */ |
354 | uint32_t omap_gem_flags(struct drm_gem_object *obj) | |
355 | { | |
356 | return to_omap_bo(obj)->flags; | |
357 | } | |
358 | ||
c5b1247b RC |
359 | uint64_t omap_gem_mmap_offset(struct drm_gem_object *obj) |
360 | { | |
361 | uint64_t offset; | |
362 | mutex_lock(&obj->dev->struct_mutex); | |
363 | offset = mmap_offset(obj); | |
364 | mutex_unlock(&obj->dev->struct_mutex); | |
365 | return offset; | |
366 | } | |
367 | ||
f7f9f453 RC |
368 | /** get mmap size */ |
369 | size_t omap_gem_mmap_size(struct drm_gem_object *obj) | |
370 | { | |
371 | struct omap_gem_object *omap_obj = to_omap_bo(obj); | |
372 | size_t size = obj->size; | |
373 | ||
374 | if (omap_obj->flags & OMAP_BO_TILED) { | |
375 | /* for tiled buffers, the virtual size has stride rounded up | |
376 | * to 4kb.. (to hide the fact that row n+1 might start 16kb or | |
377 | * 32kb later!). But we don't back the entire buffer with | |
378 | * pages, only the valid picture part.. so need to adjust for | |
379 | * this in the size used to mmap and generate mmap offset | |
380 | */ | |
381 | size = tiler_vsize(gem2fmt(omap_obj->flags), | |
382 | omap_obj->width, omap_obj->height); | |
383 | } | |
384 | ||
385 | return size; | |
386 | } | |
387 | ||
7ef93b0a LP |
388 | /* ----------------------------------------------------------------------------- |
389 | * Fault Handling | |
390 | */ | |
391 | ||
f7f9f453 RC |
392 | /* Normal handling for the case of faulting in non-tiled buffers */ |
393 | static int fault_1d(struct drm_gem_object *obj, | |
394 | struct vm_area_struct *vma, struct vm_fault *vmf) | |
395 | { | |
396 | struct omap_gem_object *omap_obj = to_omap_bo(obj); | |
397 | unsigned long pfn; | |
398 | pgoff_t pgoff; | |
399 | ||
400 | /* We don't use vmf->pgoff since that has the fake offset: */ | |
1a29d85e | 401 | pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT; |
f7f9f453 RC |
402 | |
403 | if (omap_obj->pages) { | |
8b6b569e | 404 | omap_gem_cpu_sync(obj, pgoff); |
f7f9f453 RC |
405 | pfn = page_to_pfn(omap_obj->pages[pgoff]); |
406 | } else { | |
b22e6690 | 407 | BUG_ON(!is_contiguous(omap_obj)); |
f7f9f453 RC |
408 | pfn = (omap_obj->paddr >> PAGE_SHIFT) + pgoff; |
409 | } | |
410 | ||
1a29d85e | 411 | VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address, |
f7f9f453 RC |
412 | pfn, pfn << PAGE_SHIFT); |
413 | ||
1a29d85e | 414 | return vm_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV)); |
f7f9f453 RC |
415 | } |
416 | ||
417 | /* Special handling for the case of faulting in 2d tiled buffers */ | |
418 | static int fault_2d(struct drm_gem_object *obj, | |
419 | struct vm_area_struct *vma, struct vm_fault *vmf) | |
420 | { | |
421 | struct omap_gem_object *omap_obj = to_omap_bo(obj); | |
f4302747 LP |
422 | struct omap_drm_private *priv = obj->dev->dev_private; |
423 | struct omap_drm_usergart_entry *entry; | |
f7f9f453 RC |
424 | enum tiler_fmt fmt = gem2fmt(omap_obj->flags); |
425 | struct page *pages[64]; /* XXX is this too much to have on stack? */ | |
426 | unsigned long pfn; | |
427 | pgoff_t pgoff, base_pgoff; | |
1a29d85e | 428 | unsigned long vaddr; |
f7f9f453 RC |
429 | int i, ret, slots; |
430 | ||
e559895a RC |
431 | /* |
432 | * Note the height of the slot is also equal to the number of pages | |
433 | * that need to be mapped in to fill 4kb wide CPU page. If the slot | |
434 | * height is 64, then 64 pages fill a 4kb wide by 64 row region. | |
435 | */ | |
f4302747 LP |
436 | const int n = priv->usergart[fmt].height; |
437 | const int n_shift = priv->usergart[fmt].height_shift; | |
e559895a RC |
438 | |
439 | /* | |
440 | * If buffer width in bytes > PAGE_SIZE then the virtual stride is | |
441 | * rounded up to next multiple of PAGE_SIZE.. this need to be taken | |
442 | * into account in some of the math, so figure out virtual stride | |
443 | * in pages | |
f7f9f453 | 444 | */ |
e559895a | 445 | const int m = 1 + ((omap_obj->width << fmt) / PAGE_SIZE); |
f7f9f453 RC |
446 | |
447 | /* We don't use vmf->pgoff since that has the fake offset: */ | |
1a29d85e | 448 | pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT; |
f7f9f453 | 449 | |
e559895a RC |
450 | /* |
451 | * Actual address we start mapping at is rounded down to previous slot | |
f7f9f453 RC |
452 | * boundary in the y direction: |
453 | */ | |
e559895a | 454 | base_pgoff = round_down(pgoff, m << n_shift); |
f7f9f453 | 455 | |
e559895a | 456 | /* figure out buffer width in slots */ |
f4302747 | 457 | slots = omap_obj->width >> priv->usergart[fmt].slot_shift; |
f7f9f453 | 458 | |
1a29d85e | 459 | vaddr = vmf->address - ((pgoff - base_pgoff) << PAGE_SHIFT); |
e559895a | 460 | |
f4302747 | 461 | entry = &priv->usergart[fmt].entry[priv->usergart[fmt].last]; |
e559895a | 462 | |
f7f9f453 RC |
463 | /* evict previous buffer using this usergart entry, if any: */ |
464 | if (entry->obj) | |
465 | evict_entry(entry->obj, fmt, entry); | |
466 | ||
467 | entry->obj = obj; | |
468 | entry->obj_pgoff = base_pgoff; | |
469 | ||
e559895a RC |
470 | /* now convert base_pgoff to phys offset from virt offset: */ |
471 | base_pgoff = (base_pgoff >> n_shift) * slots; | |
472 | ||
473 | /* for wider-than 4k.. figure out which part of the slot-row we want: */ | |
474 | if (m > 1) { | |
475 | int off = pgoff % m; | |
476 | entry->obj_pgoff += off; | |
477 | base_pgoff /= m; | |
478 | slots = min(slots - (off << n_shift), n); | |
479 | base_pgoff += off << n_shift; | |
480 | vaddr += off << PAGE_SHIFT; | |
481 | } | |
482 | ||
483 | /* | |
484 | * Map in pages. Beyond the valid pixel part of the buffer, we set | |
485 | * pages[i] to NULL to get a dummy page mapped in.. if someone | |
486 | * reads/writes it they will get random/undefined content, but at | |
487 | * least it won't be corrupting whatever other random page used to | |
488 | * be mapped in, or other undefined behavior. | |
f7f9f453 RC |
489 | */ |
490 | memcpy(pages, &omap_obj->pages[base_pgoff], | |
491 | sizeof(struct page *) * slots); | |
492 | memset(pages + slots, 0, | |
e559895a | 493 | sizeof(struct page *) * (n - slots)); |
f7f9f453 | 494 | |
a6a91827 | 495 | ret = tiler_pin(entry->block, pages, ARRAY_SIZE(pages), 0, true); |
f7f9f453 RC |
496 | if (ret) { |
497 | dev_err(obj->dev->dev, "failed to pin: %d\n", ret); | |
498 | return ret; | |
499 | } | |
500 | ||
f7f9f453 RC |
501 | pfn = entry->paddr >> PAGE_SHIFT; |
502 | ||
1a29d85e | 503 | VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address, |
f7f9f453 RC |
504 | pfn, pfn << PAGE_SHIFT); |
505 | ||
e559895a | 506 | for (i = n; i > 0; i--) { |
1a29d85e | 507 | vm_insert_mixed(vma, vaddr, __pfn_to_pfn_t(pfn, PFN_DEV)); |
f4302747 | 508 | pfn += priv->usergart[fmt].stride_pfn; |
e559895a | 509 | vaddr += PAGE_SIZE * m; |
f7f9f453 RC |
510 | } |
511 | ||
512 | /* simple round-robin: */ | |
f4302747 LP |
513 | priv->usergart[fmt].last = (priv->usergart[fmt].last + 1) |
514 | % NUM_USERGART_ENTRIES; | |
f7f9f453 RC |
515 | |
516 | return 0; | |
517 | } | |
518 | ||
cd5351f4 RC |
519 | /** |
520 | * omap_gem_fault - pagefault handler for GEM objects | |
cd5351f4 RC |
521 | * @vmf: fault detail |
522 | * | |
523 | * Invoked when a fault occurs on an mmap of a GEM managed area. GEM | |
524 | * does most of the work for us including the actual map/unmap calls | |
525 | * but we need to do the actual page work. | |
526 | * | |
527 | * The VMA was set up by GEM. In doing so it also ensured that the | |
528 | * vma->vm_private_data points to the GEM object that is backing this | |
529 | * mapping. | |
530 | */ | |
11bac800 | 531 | int omap_gem_fault(struct vm_fault *vmf) |
cd5351f4 | 532 | { |
11bac800 | 533 | struct vm_area_struct *vma = vmf->vma; |
cd5351f4 RC |
534 | struct drm_gem_object *obj = vma->vm_private_data; |
535 | struct omap_gem_object *omap_obj = to_omap_bo(obj); | |
536 | struct drm_device *dev = obj->dev; | |
537 | struct page **pages; | |
cd5351f4 RC |
538 | int ret; |
539 | ||
540 | /* Make sure we don't parallel update on a fault, nor move or remove | |
541 | * something from beneath our feet | |
542 | */ | |
543 | mutex_lock(&dev->struct_mutex); | |
544 | ||
545 | /* if a shmem backed object, make sure we have pages attached now */ | |
546 | ret = get_pages(obj, &pages); | |
ae053039 | 547 | if (ret) |
cd5351f4 | 548 | goto fail; |
cd5351f4 RC |
549 | |
550 | /* where should we do corresponding put_pages().. we are mapping | |
551 | * the original page, rather than thru a GART, so we can't rely | |
552 | * on eviction to trigger this. But munmap() or all mappings should | |
553 | * probably trigger put_pages()? | |
554 | */ | |
555 | ||
f7f9f453 RC |
556 | if (omap_obj->flags & OMAP_BO_TILED) |
557 | ret = fault_2d(obj, vma, vmf); | |
558 | else | |
559 | ret = fault_1d(obj, vma, vmf); | |
cd5351f4 | 560 | |
cd5351f4 RC |
561 | |
562 | fail: | |
563 | mutex_unlock(&dev->struct_mutex); | |
564 | switch (ret) { | |
565 | case 0: | |
566 | case -ERESTARTSYS: | |
567 | case -EINTR: | |
e1d4ee0f RC |
568 | case -EBUSY: |
569 | /* | |
570 | * EBUSY is ok: this just means that another thread | |
571 | * already did the job. | |
572 | */ | |
cd5351f4 RC |
573 | return VM_FAULT_NOPAGE; |
574 | case -ENOMEM: | |
575 | return VM_FAULT_OOM; | |
576 | default: | |
577 | return VM_FAULT_SIGBUS; | |
578 | } | |
579 | } | |
580 | ||
581 | /** We override mainly to fix up some of the vm mapping flags.. */ | |
582 | int omap_gem_mmap(struct file *filp, struct vm_area_struct *vma) | |
583 | { | |
cd5351f4 RC |
584 | int ret; |
585 | ||
586 | ret = drm_gem_mmap(filp, vma); | |
587 | if (ret) { | |
588 | DBG("mmap failed: %d", ret); | |
589 | return ret; | |
590 | } | |
591 | ||
8b6b569e RC |
592 | return omap_gem_mmap_obj(vma->vm_private_data, vma); |
593 | } | |
594 | ||
595 | int omap_gem_mmap_obj(struct drm_gem_object *obj, | |
596 | struct vm_area_struct *vma) | |
597 | { | |
598 | struct omap_gem_object *omap_obj = to_omap_bo(obj); | |
cd5351f4 RC |
599 | |
600 | vma->vm_flags &= ~VM_PFNMAP; | |
601 | vma->vm_flags |= VM_MIXEDMAP; | |
602 | ||
603 | if (omap_obj->flags & OMAP_BO_WC) { | |
604 | vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); | |
605 | } else if (omap_obj->flags & OMAP_BO_UNCACHED) { | |
606 | vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags)); | |
607 | } else { | |
8b6b569e RC |
608 | /* |
609 | * We do have some private objects, at least for scanout buffers | |
610 | * on hardware without DMM/TILER. But these are allocated write- | |
611 | * combine | |
612 | */ | |
613 | if (WARN_ON(!obj->filp)) | |
614 | return -EINVAL; | |
615 | ||
616 | /* | |
617 | * Shunt off cached objs to shmem file so they have their own | |
618 | * address_space (so unmap_mapping_range does what we want, | |
619 | * in particular in the case of mmap'd dmabufs) | |
620 | */ | |
621 | fput(vma->vm_file); | |
8b6b569e | 622 | vma->vm_pgoff = 0; |
cb0942b8 | 623 | vma->vm_file = get_file(obj->filp); |
8b6b569e | 624 | |
cd5351f4 RC |
625 | vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); |
626 | } | |
627 | ||
8b6b569e | 628 | return 0; |
cd5351f4 RC |
629 | } |
630 | ||
7ef93b0a LP |
631 | /* ----------------------------------------------------------------------------- |
632 | * Dumb Buffers | |
633 | */ | |
8b6b569e | 634 | |
cd5351f4 RC |
635 | /** |
636 | * omap_gem_dumb_create - create a dumb buffer | |
637 | * @drm_file: our client file | |
638 | * @dev: our device | |
639 | * @args: the requested arguments copied from userspace | |
640 | * | |
641 | * Allocate a buffer suitable for use for a frame buffer of the | |
642 | * form described by user space. Give userspace a handle by which | |
643 | * to reference it. | |
644 | */ | |
645 | int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev, | |
646 | struct drm_mode_create_dumb *args) | |
647 | { | |
648 | union omap_gem_size gsize; | |
649 | ||
ce481eda | 650 | args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8); |
6a5228fd | 651 | |
cd5351f4 RC |
652 | args->size = PAGE_ALIGN(args->pitch * args->height); |
653 | ||
654 | gsize = (union omap_gem_size){ | |
655 | .bytes = args->size, | |
656 | }; | |
657 | ||
658 | return omap_gem_new_handle(dev, file, gsize, | |
659 | OMAP_BO_SCANOUT | OMAP_BO_WC, &args->handle); | |
660 | } | |
661 | ||
cd5351f4 RC |
662 | /** |
663 | * omap_gem_dumb_map - buffer mapping for dumb interface | |
664 | * @file: our drm client file | |
665 | * @dev: drm device | |
666 | * @handle: GEM handle to the object (from dumb_create) | |
667 | * | |
668 | * Do the necessary setup to allow the mapping of the frame buffer | |
669 | * into user memory. We don't have to do much here at the moment. | |
670 | */ | |
671 | int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, | |
672 | uint32_t handle, uint64_t *offset) | |
673 | { | |
674 | struct drm_gem_object *obj; | |
675 | int ret = 0; | |
676 | ||
cd5351f4 | 677 | /* GEM does all our handle to object mapping */ |
a8ad0bd8 | 678 | obj = drm_gem_object_lookup(file, handle); |
cd5351f4 RC |
679 | if (obj == NULL) { |
680 | ret = -ENOENT; | |
681 | goto fail; | |
682 | } | |
683 | ||
684 | *offset = omap_gem_mmap_offset(obj); | |
685 | ||
686 | drm_gem_object_unreference_unlocked(obj); | |
687 | ||
688 | fail: | |
cd5351f4 RC |
689 | return ret; |
690 | } | |
691 | ||
e1c1174f | 692 | #ifdef CONFIG_DRM_FBDEV_EMULATION |
a6a91827 RC |
693 | /* Set scrolling position. This allows us to implement fast scrolling |
694 | * for console. | |
9b55b95a RC |
695 | * |
696 | * Call only from non-atomic contexts. | |
a6a91827 RC |
697 | */ |
698 | int omap_gem_roll(struct drm_gem_object *obj, uint32_t roll) | |
699 | { | |
700 | struct omap_gem_object *omap_obj = to_omap_bo(obj); | |
701 | uint32_t npages = obj->size >> PAGE_SHIFT; | |
702 | int ret = 0; | |
703 | ||
704 | if (roll > npages) { | |
705 | dev_err(obj->dev->dev, "invalid roll: %d\n", roll); | |
706 | return -EINVAL; | |
707 | } | |
708 | ||
a6a91827 RC |
709 | omap_obj->roll = roll; |
710 | ||
af69592a RC |
711 | mutex_lock(&obj->dev->struct_mutex); |
712 | ||
a6a91827 RC |
713 | /* if we aren't mapped yet, we don't need to do anything */ |
714 | if (omap_obj->block) { | |
715 | struct page **pages; | |
716 | ret = get_pages(obj, &pages); | |
717 | if (ret) | |
718 | goto fail; | |
719 | ret = tiler_pin(omap_obj->block, pages, npages, roll, true); | |
720 | if (ret) | |
721 | dev_err(obj->dev->dev, "could not repin: %d\n", ret); | |
722 | } | |
723 | ||
724 | fail: | |
725 | mutex_unlock(&obj->dev->struct_mutex); | |
726 | ||
727 | return ret; | |
728 | } | |
e1c1174f | 729 | #endif |
a6a91827 | 730 | |
7ef93b0a LP |
731 | /* ----------------------------------------------------------------------------- |
732 | * Memory Management & DMA Sync | |
733 | */ | |
734 | ||
735 | /** | |
736 | * shmem buffers that are mapped cached can simulate coherency via using | |
737 | * page faulting to keep track of dirty pages | |
738 | */ | |
739 | static inline bool is_cached_coherent(struct drm_gem_object *obj) | |
740 | { | |
741 | struct omap_gem_object *omap_obj = to_omap_bo(obj); | |
cdb0381d LP |
742 | |
743 | return (omap_obj->flags & OMAP_BO_MEM_SHMEM) && | |
7ef93b0a LP |
744 | ((omap_obj->flags & OMAP_BO_CACHE_MASK) == OMAP_BO_CACHED); |
745 | } | |
a6a91827 | 746 | |
8b6b569e RC |
747 | /* Sync the buffer for CPU access.. note pages should already be |
748 | * attached, ie. omap_gem_get_pages() | |
749 | */ | |
750 | void omap_gem_cpu_sync(struct drm_gem_object *obj, int pgoff) | |
751 | { | |
752 | struct drm_device *dev = obj->dev; | |
753 | struct omap_gem_object *omap_obj = to_omap_bo(obj); | |
754 | ||
755 | if (is_cached_coherent(obj) && omap_obj->addrs[pgoff]) { | |
756 | dma_unmap_page(dev->dev, omap_obj->addrs[pgoff], | |
757 | PAGE_SIZE, DMA_BIDIRECTIONAL); | |
758 | omap_obj->addrs[pgoff] = 0; | |
759 | } | |
760 | } | |
761 | ||
762 | /* sync the buffer for DMA access */ | |
763 | void omap_gem_dma_sync(struct drm_gem_object *obj, | |
764 | enum dma_data_direction dir) | |
765 | { | |
766 | struct drm_device *dev = obj->dev; | |
767 | struct omap_gem_object *omap_obj = to_omap_bo(obj); | |
768 | ||
769 | if (is_cached_coherent(obj)) { | |
770 | int i, npages = obj->size >> PAGE_SHIFT; | |
771 | struct page **pages = omap_obj->pages; | |
772 | bool dirty = false; | |
773 | ||
774 | for (i = 0; i < npages; i++) { | |
775 | if (!omap_obj->addrs[i]) { | |
a3d6345d TV |
776 | dma_addr_t addr; |
777 | ||
778 | addr = dma_map_page(dev->dev, pages[i], 0, | |
8b6b569e | 779 | PAGE_SIZE, DMA_BIDIRECTIONAL); |
a3d6345d TV |
780 | |
781 | if (dma_mapping_error(dev->dev, addr)) { | |
782 | dev_warn(dev->dev, | |
783 | "%s: failed to map page\n", | |
784 | __func__); | |
785 | break; | |
786 | } | |
787 | ||
8b6b569e | 788 | dirty = true; |
a3d6345d | 789 | omap_obj->addrs[i] = addr; |
8b6b569e RC |
790 | } |
791 | } | |
792 | ||
793 | if (dirty) { | |
794 | unmap_mapping_range(obj->filp->f_mapping, 0, | |
795 | omap_gem_mmap_size(obj), 1); | |
796 | } | |
797 | } | |
798 | } | |
799 | ||
cd5351f4 RC |
800 | /* Get physical address for DMA.. if 'remap' is true, and the buffer is not |
801 | * already contiguous, remap it to pin in physically contiguous memory.. (ie. | |
802 | * map in TILER) | |
803 | */ | |
804 | int omap_gem_get_paddr(struct drm_gem_object *obj, | |
805 | dma_addr_t *paddr, bool remap) | |
806 | { | |
a6a91827 | 807 | struct omap_drm_private *priv = obj->dev->dev_private; |
cd5351f4 RC |
808 | struct omap_gem_object *omap_obj = to_omap_bo(obj); |
809 | int ret = 0; | |
810 | ||
f7f9f453 RC |
811 | mutex_lock(&obj->dev->struct_mutex); |
812 | ||
b22e6690 | 813 | if (!is_contiguous(omap_obj) && remap && priv->has_dmm) { |
f7f9f453 RC |
814 | if (omap_obj->paddr_cnt == 0) { |
815 | struct page **pages; | |
a6a91827 | 816 | uint32_t npages = obj->size >> PAGE_SHIFT; |
f7f9f453 RC |
817 | enum tiler_fmt fmt = gem2fmt(omap_obj->flags); |
818 | struct tiler_block *block; | |
a6a91827 | 819 | |
f7f9f453 RC |
820 | BUG_ON(omap_obj->block); |
821 | ||
822 | ret = get_pages(obj, &pages); | |
823 | if (ret) | |
824 | goto fail; | |
825 | ||
f7f9f453 RC |
826 | if (omap_obj->flags & OMAP_BO_TILED) { |
827 | block = tiler_reserve_2d(fmt, | |
828 | omap_obj->width, | |
829 | omap_obj->height, 0); | |
830 | } else { | |
831 | block = tiler_reserve_1d(obj->size); | |
832 | } | |
833 | ||
834 | if (IS_ERR(block)) { | |
835 | ret = PTR_ERR(block); | |
836 | dev_err(obj->dev->dev, | |
837 | "could not remap: %d (%d)\n", ret, fmt); | |
838 | goto fail; | |
839 | } | |
840 | ||
841 | /* TODO: enable async refill.. */ | |
a6a91827 RC |
842 | ret = tiler_pin(block, pages, npages, |
843 | omap_obj->roll, true); | |
f7f9f453 RC |
844 | if (ret) { |
845 | tiler_release(block); | |
846 | dev_err(obj->dev->dev, | |
847 | "could not pin: %d\n", ret); | |
848 | goto fail; | |
849 | } | |
850 | ||
851 | omap_obj->paddr = tiler_ssptr(block); | |
852 | omap_obj->block = block; | |
853 | ||
2d31ca3a | 854 | DBG("got paddr: %pad", &omap_obj->paddr); |
f7f9f453 RC |
855 | } |
856 | ||
857 | omap_obj->paddr_cnt++; | |
858 | ||
859 | *paddr = omap_obj->paddr; | |
b22e6690 | 860 | } else if (is_contiguous(omap_obj)) { |
f7f9f453 RC |
861 | *paddr = omap_obj->paddr; |
862 | } else { | |
863 | ret = -EINVAL; | |
8b6b569e | 864 | goto fail; |
cd5351f4 RC |
865 | } |
866 | ||
f7f9f453 RC |
867 | fail: |
868 | mutex_unlock(&obj->dev->struct_mutex); | |
cd5351f4 RC |
869 | |
870 | return ret; | |
871 | } | |
872 | ||
873 | /* Release physical address, when DMA is no longer being performed.. this | |
874 | * could potentially unpin and unmap buffers from TILER | |
875 | */ | |
393a949f | 876 | void omap_gem_put_paddr(struct drm_gem_object *obj) |
cd5351f4 | 877 | { |
f7f9f453 | 878 | struct omap_gem_object *omap_obj = to_omap_bo(obj); |
393a949f | 879 | int ret; |
f7f9f453 RC |
880 | |
881 | mutex_lock(&obj->dev->struct_mutex); | |
882 | if (omap_obj->paddr_cnt > 0) { | |
883 | omap_obj->paddr_cnt--; | |
884 | if (omap_obj->paddr_cnt == 0) { | |
885 | ret = tiler_unpin(omap_obj->block); | |
886 | if (ret) { | |
887 | dev_err(obj->dev->dev, | |
888 | "could not unpin pages: %d\n", ret); | |
f7f9f453 RC |
889 | } |
890 | ret = tiler_release(omap_obj->block); | |
891 | if (ret) { | |
892 | dev_err(obj->dev->dev, | |
893 | "could not release unmap: %d\n", ret); | |
894 | } | |
3f4d17c4 | 895 | omap_obj->paddr = 0; |
f7f9f453 RC |
896 | omap_obj->block = NULL; |
897 | } | |
898 | } | |
393a949f | 899 | |
f7f9f453 | 900 | mutex_unlock(&obj->dev->struct_mutex); |
cd5351f4 RC |
901 | } |
902 | ||
3c810c61 RC |
903 | /* Get rotated scanout address (only valid if already pinned), at the |
904 | * specified orientation and x,y offset from top-left corner of buffer | |
905 | * (only valid for tiled 2d buffers) | |
906 | */ | |
907 | int omap_gem_rotated_paddr(struct drm_gem_object *obj, uint32_t orient, | |
908 | int x, int y, dma_addr_t *paddr) | |
909 | { | |
910 | struct omap_gem_object *omap_obj = to_omap_bo(obj); | |
911 | int ret = -EINVAL; | |
912 | ||
913 | mutex_lock(&obj->dev->struct_mutex); | |
914 | if ((omap_obj->paddr_cnt > 0) && omap_obj->block && | |
915 | (omap_obj->flags & OMAP_BO_TILED)) { | |
916 | *paddr = tiler_tsptr(omap_obj->block, orient, x, y); | |
917 | ret = 0; | |
918 | } | |
919 | mutex_unlock(&obj->dev->struct_mutex); | |
920 | return ret; | |
921 | } | |
922 | ||
923 | /* Get tiler stride for the buffer (only valid for 2d tiled buffers) */ | |
924 | int omap_gem_tiled_stride(struct drm_gem_object *obj, uint32_t orient) | |
925 | { | |
926 | struct omap_gem_object *omap_obj = to_omap_bo(obj); | |
927 | int ret = -EINVAL; | |
928 | if (omap_obj->flags & OMAP_BO_TILED) | |
929 | ret = tiler_stride(gem2fmt(omap_obj->flags), orient); | |
930 | return ret; | |
931 | } | |
932 | ||
6ad11bc3 RC |
933 | /* if !remap, and we don't have pages backing, then fail, rather than |
934 | * increasing the pin count (which we don't really do yet anyways, | |
935 | * because we don't support swapping pages back out). And 'remap' | |
936 | * might not be quite the right name, but I wanted to keep it working | |
937 | * similarly to omap_gem_get_paddr(). Note though that mutex is not | |
938 | * aquired if !remap (because this can be called in atomic ctxt), | |
939 | * but probably omap_gem_get_paddr() should be changed to work in the | |
940 | * same way. If !remap, a matching omap_gem_put_pages() call is not | |
941 | * required (and should not be made). | |
942 | */ | |
943 | int omap_gem_get_pages(struct drm_gem_object *obj, struct page ***pages, | |
944 | bool remap) | |
cd5351f4 RC |
945 | { |
946 | int ret; | |
6ad11bc3 RC |
947 | if (!remap) { |
948 | struct omap_gem_object *omap_obj = to_omap_bo(obj); | |
949 | if (!omap_obj->pages) | |
950 | return -ENOMEM; | |
951 | *pages = omap_obj->pages; | |
952 | return 0; | |
953 | } | |
cd5351f4 RC |
954 | mutex_lock(&obj->dev->struct_mutex); |
955 | ret = get_pages(obj, pages); | |
956 | mutex_unlock(&obj->dev->struct_mutex); | |
957 | return ret; | |
958 | } | |
959 | ||
960 | /* release pages when DMA no longer being performed */ | |
961 | int omap_gem_put_pages(struct drm_gem_object *obj) | |
962 | { | |
963 | /* do something here if we dynamically attach/detach pages.. at | |
964 | * least they would no longer need to be pinned if everyone has | |
965 | * released the pages.. | |
966 | */ | |
967 | return 0; | |
968 | } | |
969 | ||
e1c1174f | 970 | #ifdef CONFIG_DRM_FBDEV_EMULATION |
f7f9f453 RC |
971 | /* Get kernel virtual address for CPU access.. this more or less only |
972 | * exists for omap_fbdev. This should be called with struct_mutex | |
973 | * held. | |
cd5351f4 RC |
974 | */ |
975 | void *omap_gem_vaddr(struct drm_gem_object *obj) | |
976 | { | |
977 | struct omap_gem_object *omap_obj = to_omap_bo(obj); | |
696e3ca3 | 978 | WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); |
f7f9f453 RC |
979 | if (!omap_obj->vaddr) { |
980 | struct page **pages; | |
981 | int ret = get_pages(obj, &pages); | |
982 | if (ret) | |
983 | return ERR_PTR(ret); | |
984 | omap_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT, | |
985 | VM_MAP, pgprot_writecombine(PAGE_KERNEL)); | |
986 | } | |
cd5351f4 RC |
987 | return omap_obj->vaddr; |
988 | } | |
e1c1174f | 989 | #endif |
cd5351f4 | 990 | |
7ef93b0a LP |
991 | /* ----------------------------------------------------------------------------- |
992 | * Power Management | |
993 | */ | |
cd5351f4 | 994 | |
e78edba1 AG |
995 | #ifdef CONFIG_PM |
996 | /* re-pin objects in DMM in resume path: */ | |
997 | int omap_gem_resume(struct device *dev) | |
998 | { | |
999 | struct drm_device *drm_dev = dev_get_drvdata(dev); | |
1000 | struct omap_drm_private *priv = drm_dev->dev_private; | |
1001 | struct omap_gem_object *omap_obj; | |
1002 | int ret = 0; | |
1003 | ||
1004 | list_for_each_entry(omap_obj, &priv->obj_list, mm_list) { | |
1005 | if (omap_obj->block) { | |
1006 | struct drm_gem_object *obj = &omap_obj->base; | |
1007 | uint32_t npages = obj->size >> PAGE_SHIFT; | |
1008 | WARN_ON(!omap_obj->pages); /* this can't happen */ | |
1009 | ret = tiler_pin(omap_obj->block, | |
1010 | omap_obj->pages, npages, | |
1011 | omap_obj->roll, true); | |
1012 | if (ret) { | |
1013 | dev_err(dev, "could not repin: %d\n", ret); | |
1014 | return ret; | |
1015 | } | |
1016 | } | |
1017 | } | |
1018 | ||
1019 | return 0; | |
1020 | } | |
1021 | #endif | |
1022 | ||
7ef93b0a LP |
1023 | /* ----------------------------------------------------------------------------- |
1024 | * DebugFS | |
1025 | */ | |
1026 | ||
f6b6036e RC |
1027 | #ifdef CONFIG_DEBUG_FS |
1028 | void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m) | |
1029 | { | |
f6b6036e | 1030 | struct omap_gem_object *omap_obj = to_omap_bo(obj); |
0de23977 | 1031 | uint64_t off; |
f6b6036e | 1032 | |
0de23977 | 1033 | off = drm_vma_node_start(&obj->vma_node); |
f6b6036e | 1034 | |
2d31ca3a | 1035 | seq_printf(m, "%08x: %2d (%2d) %08llx %pad (%2d) %p %4d", |
2c935bc5 | 1036 | omap_obj->flags, obj->name, kref_read(&obj->refcount), |
2d31ca3a | 1037 | off, &omap_obj->paddr, omap_obj->paddr_cnt, |
f6b6036e RC |
1038 | omap_obj->vaddr, omap_obj->roll); |
1039 | ||
1040 | if (omap_obj->flags & OMAP_BO_TILED) { | |
1041 | seq_printf(m, " %dx%d", omap_obj->width, omap_obj->height); | |
1042 | if (omap_obj->block) { | |
1043 | struct tcm_area *area = &omap_obj->block->area; | |
1044 | seq_printf(m, " (%dx%d, %dx%d)", | |
1045 | area->p0.x, area->p0.y, | |
1046 | area->p1.x, area->p1.y); | |
1047 | } | |
1048 | } else { | |
1049 | seq_printf(m, " %d", obj->size); | |
1050 | } | |
1051 | ||
1052 | seq_printf(m, "\n"); | |
1053 | } | |
1054 | ||
1055 | void omap_gem_describe_objects(struct list_head *list, struct seq_file *m) | |
1056 | { | |
1057 | struct omap_gem_object *omap_obj; | |
1058 | int count = 0; | |
1059 | size_t size = 0; | |
1060 | ||
1061 | list_for_each_entry(omap_obj, list, mm_list) { | |
1062 | struct drm_gem_object *obj = &omap_obj->base; | |
1063 | seq_printf(m, " "); | |
1064 | omap_gem_describe(obj, m); | |
1065 | count++; | |
1066 | size += obj->size; | |
1067 | } | |
1068 | ||
1069 | seq_printf(m, "Total %d objects, %zu bytes\n", count, size); | |
1070 | } | |
1071 | #endif | |
1072 | ||
7ef93b0a LP |
1073 | /* ----------------------------------------------------------------------------- |
1074 | * Buffer Synchronization | |
cd5351f4 RC |
1075 | */ |
1076 | ||
7ef93b0a LP |
1077 | static DEFINE_SPINLOCK(sync_lock); |
1078 | ||
cd5351f4 RC |
1079 | struct omap_gem_sync_waiter { |
1080 | struct list_head list; | |
1081 | struct omap_gem_object *omap_obj; | |
1082 | enum omap_gem_op op; | |
1083 | uint32_t read_target, write_target; | |
1084 | /* notify called w/ sync_lock held */ | |
1085 | void (*notify)(void *arg); | |
1086 | void *arg; | |
1087 | }; | |
1088 | ||
1089 | /* list of omap_gem_sync_waiter.. the notify fxn gets called back when | |
1090 | * the read and/or write target count is achieved which can call a user | |
1091 | * callback (ex. to kick 3d and/or 2d), wakeup blocked task (prep for | |
1092 | * cpu access), etc. | |
1093 | */ | |
1094 | static LIST_HEAD(waiters); | |
1095 | ||
1096 | static inline bool is_waiting(struct omap_gem_sync_waiter *waiter) | |
1097 | { | |
1098 | struct omap_gem_object *omap_obj = waiter->omap_obj; | |
1099 | if ((waiter->op & OMAP_GEM_READ) && | |
f2cff0f3 | 1100 | (omap_obj->sync->write_complete < waiter->write_target)) |
cd5351f4 RC |
1101 | return true; |
1102 | if ((waiter->op & OMAP_GEM_WRITE) && | |
f2cff0f3 | 1103 | (omap_obj->sync->read_complete < waiter->read_target)) |
cd5351f4 RC |
1104 | return true; |
1105 | return false; | |
1106 | } | |
1107 | ||
1108 | /* macro for sync debug.. */ | |
1109 | #define SYNCDBG 0 | |
8dfe162a JP |
1110 | #define SYNC(fmt, ...) do { if (SYNCDBG) \ |
1111 | pr_err("%s:%d: " fmt "\n", __func__, __LINE__, ##__VA_ARGS__); \ | |
cd5351f4 RC |
1112 | } while (0) |
1113 | ||
1114 | ||
1115 | static void sync_op_update(void) | |
1116 | { | |
1117 | struct omap_gem_sync_waiter *waiter, *n; | |
1118 | list_for_each_entry_safe(waiter, n, &waiters, list) { | |
1119 | if (!is_waiting(waiter)) { | |
1120 | list_del(&waiter->list); | |
1121 | SYNC("notify: %p", waiter); | |
1122 | waiter->notify(waiter->arg); | |
1123 | kfree(waiter); | |
1124 | } | |
1125 | } | |
1126 | } | |
1127 | ||
1128 | static inline int sync_op(struct drm_gem_object *obj, | |
1129 | enum omap_gem_op op, bool start) | |
1130 | { | |
1131 | struct omap_gem_object *omap_obj = to_omap_bo(obj); | |
1132 | int ret = 0; | |
1133 | ||
1134 | spin_lock(&sync_lock); | |
1135 | ||
1136 | if (!omap_obj->sync) { | |
1137 | omap_obj->sync = kzalloc(sizeof(*omap_obj->sync), GFP_ATOMIC); | |
1138 | if (!omap_obj->sync) { | |
1139 | ret = -ENOMEM; | |
1140 | goto unlock; | |
1141 | } | |
1142 | } | |
1143 | ||
1144 | if (start) { | |
1145 | if (op & OMAP_GEM_READ) | |
1146 | omap_obj->sync->read_pending++; | |
1147 | if (op & OMAP_GEM_WRITE) | |
1148 | omap_obj->sync->write_pending++; | |
1149 | } else { | |
1150 | if (op & OMAP_GEM_READ) | |
1151 | omap_obj->sync->read_complete++; | |
1152 | if (op & OMAP_GEM_WRITE) | |
1153 | omap_obj->sync->write_complete++; | |
1154 | sync_op_update(); | |
1155 | } | |
1156 | ||
1157 | unlock: | |
1158 | spin_unlock(&sync_lock); | |
1159 | ||
1160 | return ret; | |
1161 | } | |
1162 | ||
cd5351f4 RC |
1163 | /* mark the start of read and/or write operation */ |
1164 | int omap_gem_op_start(struct drm_gem_object *obj, enum omap_gem_op op) | |
1165 | { | |
1166 | return sync_op(obj, op, true); | |
1167 | } | |
1168 | ||
1169 | int omap_gem_op_finish(struct drm_gem_object *obj, enum omap_gem_op op) | |
1170 | { | |
1171 | return sync_op(obj, op, false); | |
1172 | } | |
1173 | ||
1174 | static DECLARE_WAIT_QUEUE_HEAD(sync_event); | |
1175 | ||
1176 | static void sync_notify(void *arg) | |
1177 | { | |
1178 | struct task_struct **waiter_task = arg; | |
1179 | *waiter_task = NULL; | |
1180 | wake_up_all(&sync_event); | |
1181 | } | |
1182 | ||
1183 | int omap_gem_op_sync(struct drm_gem_object *obj, enum omap_gem_op op) | |
1184 | { | |
1185 | struct omap_gem_object *omap_obj = to_omap_bo(obj); | |
1186 | int ret = 0; | |
1187 | if (omap_obj->sync) { | |
1188 | struct task_struct *waiter_task = current; | |
1189 | struct omap_gem_sync_waiter *waiter = | |
1190 | kzalloc(sizeof(*waiter), GFP_KERNEL); | |
1191 | ||
ae053039 | 1192 | if (!waiter) |
cd5351f4 | 1193 | return -ENOMEM; |
cd5351f4 RC |
1194 | |
1195 | waiter->omap_obj = omap_obj; | |
1196 | waiter->op = op; | |
1197 | waiter->read_target = omap_obj->sync->read_pending; | |
1198 | waiter->write_target = omap_obj->sync->write_pending; | |
1199 | waiter->notify = sync_notify; | |
1200 | waiter->arg = &waiter_task; | |
1201 | ||
1202 | spin_lock(&sync_lock); | |
1203 | if (is_waiting(waiter)) { | |
1204 | SYNC("waited: %p", waiter); | |
1205 | list_add_tail(&waiter->list, &waiters); | |
1206 | spin_unlock(&sync_lock); | |
1207 | ret = wait_event_interruptible(sync_event, | |
1208 | (waiter_task == NULL)); | |
1209 | spin_lock(&sync_lock); | |
1210 | if (waiter_task) { | |
1211 | SYNC("interrupted: %p", waiter); | |
1212 | /* we were interrupted */ | |
1213 | list_del(&waiter->list); | |
1214 | waiter_task = NULL; | |
1215 | } else { | |
1216 | /* freed in sync_op_update() */ | |
1217 | waiter = NULL; | |
1218 | } | |
1219 | } | |
1220 | spin_unlock(&sync_lock); | |
d2c87e2d | 1221 | kfree(waiter); |
cd5351f4 RC |
1222 | } |
1223 | return ret; | |
1224 | } | |
1225 | ||
1226 | /* call fxn(arg), either synchronously or asynchronously if the op | |
1227 | * is currently blocked.. fxn() can be called from any context | |
1228 | * | |
1229 | * (TODO for now fxn is called back from whichever context calls | |
3f50effd | 1230 | * omap_gem_op_finish().. but this could be better defined later |
cd5351f4 RC |
1231 | * if needed) |
1232 | * | |
1233 | * TODO more code in common w/ _sync().. | |
1234 | */ | |
1235 | int omap_gem_op_async(struct drm_gem_object *obj, enum omap_gem_op op, | |
1236 | void (*fxn)(void *arg), void *arg) | |
1237 | { | |
1238 | struct omap_gem_object *omap_obj = to_omap_bo(obj); | |
1239 | if (omap_obj->sync) { | |
1240 | struct omap_gem_sync_waiter *waiter = | |
1241 | kzalloc(sizeof(*waiter), GFP_ATOMIC); | |
1242 | ||
ae053039 | 1243 | if (!waiter) |
cd5351f4 | 1244 | return -ENOMEM; |
cd5351f4 RC |
1245 | |
1246 | waiter->omap_obj = omap_obj; | |
1247 | waiter->op = op; | |
1248 | waiter->read_target = omap_obj->sync->read_pending; | |
1249 | waiter->write_target = omap_obj->sync->write_pending; | |
1250 | waiter->notify = fxn; | |
1251 | waiter->arg = arg; | |
1252 | ||
1253 | spin_lock(&sync_lock); | |
1254 | if (is_waiting(waiter)) { | |
1255 | SYNC("waited: %p", waiter); | |
1256 | list_add_tail(&waiter->list, &waiters); | |
1257 | spin_unlock(&sync_lock); | |
1258 | return 0; | |
1259 | } | |
1260 | ||
1261 | spin_unlock(&sync_lock); | |
15ec2ca9 SP |
1262 | |
1263 | kfree(waiter); | |
cd5351f4 RC |
1264 | } |
1265 | ||
1266 | /* no waiting.. */ | |
1267 | fxn(arg); | |
1268 | ||
1269 | return 0; | |
1270 | } | |
1271 | ||
7ef93b0a LP |
1272 | /* ----------------------------------------------------------------------------- |
1273 | * Constructor & Destructor | |
1274 | */ | |
1275 | ||
cd5351f4 RC |
1276 | void omap_gem_free_object(struct drm_gem_object *obj) |
1277 | { | |
1278 | struct drm_device *dev = obj->dev; | |
76c4055f | 1279 | struct omap_drm_private *priv = dev->dev_private; |
cd5351f4 RC |
1280 | struct omap_gem_object *omap_obj = to_omap_bo(obj); |
1281 | ||
f7f9f453 RC |
1282 | evict(obj); |
1283 | ||
f6b6036e RC |
1284 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); |
1285 | ||
76c4055f | 1286 | spin_lock(&priv->list_lock); |
f6b6036e | 1287 | list_del(&omap_obj->mm_list); |
76c4055f | 1288 | spin_unlock(&priv->list_lock); |
f6b6036e | 1289 | |
9a0774e0 RC |
1290 | /* this means the object is still pinned.. which really should |
1291 | * not happen. I think.. | |
1292 | */ | |
1293 | WARN_ON(omap_obj->paddr_cnt > 0); | |
1294 | ||
3f50effd TV |
1295 | if (omap_obj->pages) { |
1296 | if (omap_obj->flags & OMAP_BO_MEM_DMABUF) | |
1297 | kfree(omap_obj->pages); | |
1298 | else | |
cd5351f4 | 1299 | omap_gem_detach_pages(obj); |
3f50effd | 1300 | } |
ae053039 | 1301 | |
3f50effd | 1302 | if (omap_obj->flags & OMAP_BO_MEM_DMA_API) { |
266c73b7 LT |
1303 | dma_free_wc(dev->dev, obj->size, omap_obj->vaddr, |
1304 | omap_obj->paddr); | |
3f50effd TV |
1305 | } else if (omap_obj->vaddr) { |
1306 | vunmap(omap_obj->vaddr); | |
1307 | } else if (obj->import_attach) { | |
1308 | drm_prime_gem_destroy(obj, omap_obj->sgt); | |
cd5351f4 RC |
1309 | } |
1310 | ||
3f50effd | 1311 | kfree(omap_obj->sync); |
cd5351f4 RC |
1312 | |
1313 | drm_gem_object_release(obj); | |
1314 | ||
00e9c7c7 | 1315 | kfree(omap_obj); |
cd5351f4 RC |
1316 | } |
1317 | ||
1318 | /* GEM buffer object constructor */ | |
1319 | struct drm_gem_object *omap_gem_new(struct drm_device *dev, | |
1320 | union omap_gem_size gsize, uint32_t flags) | |
1321 | { | |
a6a91827 | 1322 | struct omap_drm_private *priv = dev->dev_private; |
cd5351f4 | 1323 | struct omap_gem_object *omap_obj; |
92b4b445 | 1324 | struct drm_gem_object *obj; |
ab5a60c3 | 1325 | struct address_space *mapping; |
cd5351f4 RC |
1326 | size_t size; |
1327 | int ret; | |
1328 | ||
9cba3b99 | 1329 | /* Validate the flags and compute the memory and cache flags. */ |
cd5351f4 | 1330 | if (flags & OMAP_BO_TILED) { |
f4302747 | 1331 | if (!priv->usergart) { |
f7f9f453 | 1332 | dev_err(dev->dev, "Tiled buffers require DMM\n"); |
92b4b445 | 1333 | return NULL; |
f7f9f453 RC |
1334 | } |
1335 | ||
9cba3b99 LP |
1336 | /* |
1337 | * Tiled buffers are always shmem paged backed. When they are | |
1338 | * scanned out, they are remapped into DMM/TILER. | |
f7f9f453 RC |
1339 | */ |
1340 | flags &= ~OMAP_BO_SCANOUT; | |
9cba3b99 | 1341 | flags |= OMAP_BO_MEM_SHMEM; |
f7f9f453 | 1342 | |
9cba3b99 LP |
1343 | /* |
1344 | * Currently don't allow cached buffers. There is some caching | |
1345 | * stuff that needs to be handled better. | |
f7f9f453 | 1346 | */ |
7cb0d6c1 TV |
1347 | flags &= ~(OMAP_BO_CACHED|OMAP_BO_WC|OMAP_BO_UNCACHED); |
1348 | flags |= tiler_get_cpu_cache_flags(); | |
9cba3b99 LP |
1349 | } else if ((flags & OMAP_BO_SCANOUT) && !priv->has_dmm) { |
1350 | /* | |
b22e6690 LP |
1351 | * OMAP_BO_SCANOUT hints that the buffer doesn't need to be |
1352 | * tiled. However, to lower the pressure on memory allocation, | |
1353 | * use contiguous memory only if no TILER is available. | |
9cba3b99 LP |
1354 | */ |
1355 | flags |= OMAP_BO_MEM_DMA_API; | |
3f50effd | 1356 | } else if (!(flags & OMAP_BO_MEM_DMABUF)) { |
9cba3b99 | 1357 | /* |
3f50effd | 1358 | * All other buffers not backed by dma_buf are shmem-backed. |
9cba3b99 LP |
1359 | */ |
1360 | flags |= OMAP_BO_MEM_SHMEM; | |
f7f9f453 | 1361 | } |
cd5351f4 | 1362 | |
9cba3b99 | 1363 | /* Allocate the initialize the OMAP GEM object. */ |
cd5351f4 | 1364 | omap_obj = kzalloc(sizeof(*omap_obj), GFP_KERNEL); |
78110bb8 | 1365 | if (!omap_obj) |
a903e3b6 | 1366 | return NULL; |
f6b6036e | 1367 | |
cd5351f4 | 1368 | obj = &omap_obj->base; |
9cba3b99 | 1369 | omap_obj->flags = flags; |
cd5351f4 | 1370 | |
9cba3b99 LP |
1371 | if (flags & OMAP_BO_TILED) { |
1372 | /* | |
1373 | * For tiled buffers align dimensions to slot boundaries and | |
1374 | * calculate size based on aligned dimensions. | |
a6a91827 | 1375 | */ |
9cba3b99 LP |
1376 | tiler_align(gem2fmt(flags), &gsize.tiled.width, |
1377 | &gsize.tiled.height); | |
ae053039 | 1378 | |
9cba3b99 LP |
1379 | size = tiler_size(gem2fmt(flags), gsize.tiled.width, |
1380 | gsize.tiled.height); | |
cd5351f4 | 1381 | |
f7f9f453 RC |
1382 | omap_obj->width = gsize.tiled.width; |
1383 | omap_obj->height = gsize.tiled.height; | |
9cba3b99 LP |
1384 | } else { |
1385 | size = PAGE_ALIGN(gsize.bytes); | |
f7f9f453 RC |
1386 | } |
1387 | ||
9cba3b99 LP |
1388 | /* Initialize the GEM object. */ |
1389 | if (!(flags & OMAP_BO_MEM_SHMEM)) { | |
89c8233f | 1390 | drm_gem_private_object_init(dev, obj, size); |
ab5a60c3 | 1391 | } else { |
cd5351f4 | 1392 | ret = drm_gem_object_init(dev, obj, size); |
ab5a60c3 | 1393 | if (ret) |
c2eb77ff | 1394 | goto err_free; |
cd5351f4 | 1395 | |
93c76a3d | 1396 | mapping = obj->filp->f_mapping; |
ab5a60c3 DH |
1397 | mapping_set_gfp_mask(mapping, GFP_USER | __GFP_DMA32); |
1398 | } | |
cd5351f4 | 1399 | |
c2eb77ff LP |
1400 | /* Allocate memory if needed. */ |
1401 | if (flags & OMAP_BO_MEM_DMA_API) { | |
266c73b7 LT |
1402 | omap_obj->vaddr = dma_alloc_wc(dev->dev, size, |
1403 | &omap_obj->paddr, | |
1404 | GFP_KERNEL); | |
c2eb77ff LP |
1405 | if (!omap_obj->vaddr) |
1406 | goto err_release; | |
1407 | } | |
1408 | ||
1409 | spin_lock(&priv->list_lock); | |
1410 | list_add(&omap_obj->mm_list, &priv->obj_list); | |
1411 | spin_unlock(&priv->list_lock); | |
1412 | ||
cd5351f4 RC |
1413 | return obj; |
1414 | ||
c2eb77ff LP |
1415 | err_release: |
1416 | drm_gem_object_release(obj); | |
1417 | err_free: | |
1418 | kfree(omap_obj); | |
cd5351f4 RC |
1419 | return NULL; |
1420 | } | |
f7f9f453 | 1421 | |
b22e6690 LP |
1422 | struct drm_gem_object *omap_gem_new_dmabuf(struct drm_device *dev, size_t size, |
1423 | struct sg_table *sgt) | |
1424 | { | |
1425 | struct omap_drm_private *priv = dev->dev_private; | |
1426 | struct omap_gem_object *omap_obj; | |
1427 | struct drm_gem_object *obj; | |
1428 | union omap_gem_size gsize; | |
1429 | ||
1430 | /* Without a DMM only physically contiguous buffers can be supported. */ | |
1431 | if (sgt->orig_nents != 1 && !priv->has_dmm) | |
1432 | return ERR_PTR(-EINVAL); | |
1433 | ||
1434 | mutex_lock(&dev->struct_mutex); | |
1435 | ||
1436 | gsize.bytes = PAGE_ALIGN(size); | |
1437 | obj = omap_gem_new(dev, gsize, OMAP_BO_MEM_DMABUF | OMAP_BO_WC); | |
1438 | if (!obj) { | |
1439 | obj = ERR_PTR(-ENOMEM); | |
1440 | goto done; | |
1441 | } | |
1442 | ||
1443 | omap_obj = to_omap_bo(obj); | |
1444 | omap_obj->sgt = sgt; | |
1445 | ||
1446 | if (sgt->orig_nents == 1) { | |
1447 | omap_obj->paddr = sg_dma_address(sgt->sgl); | |
1448 | } else { | |
1449 | /* Create pages list from sgt */ | |
1450 | struct sg_page_iter iter; | |
1451 | struct page **pages; | |
1452 | unsigned int npages; | |
1453 | unsigned int i = 0; | |
1454 | ||
1455 | npages = DIV_ROUND_UP(size, PAGE_SIZE); | |
1456 | pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL); | |
1457 | if (!pages) { | |
1458 | omap_gem_free_object(obj); | |
1459 | obj = ERR_PTR(-ENOMEM); | |
1460 | goto done; | |
1461 | } | |
1462 | ||
1463 | omap_obj->pages = pages; | |
1464 | ||
1465 | for_each_sg_page(sgt->sgl, &iter, sgt->orig_nents, 0) { | |
1466 | pages[i++] = sg_page_iter_page(&iter); | |
1467 | if (i > npages) | |
1468 | break; | |
1469 | } | |
1470 | ||
1471 | if (WARN_ON(i != npages)) { | |
1472 | omap_gem_free_object(obj); | |
1473 | obj = ERR_PTR(-ENOMEM); | |
1474 | goto done; | |
1475 | } | |
1476 | } | |
1477 | ||
1478 | done: | |
1479 | mutex_unlock(&dev->struct_mutex); | |
1480 | return obj; | |
1481 | } | |
1482 | ||
7ef93b0a LP |
1483 | /* convenience method to construct a GEM buffer object, and userspace handle */ |
1484 | int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file, | |
1485 | union omap_gem_size gsize, uint32_t flags, uint32_t *handle) | |
1486 | { | |
1487 | struct drm_gem_object *obj; | |
1488 | int ret; | |
1489 | ||
1490 | obj = omap_gem_new(dev, gsize, flags); | |
1491 | if (!obj) | |
1492 | return -ENOMEM; | |
1493 | ||
1494 | ret = drm_gem_handle_create(file, obj, handle); | |
1495 | if (ret) { | |
cd5351f4 | 1496 | omap_gem_free_object(obj); |
7ef93b0a LP |
1497 | return ret; |
1498 | } | |
ae053039 | 1499 | |
7ef93b0a LP |
1500 | /* drop reference from allocate - handle holds it now */ |
1501 | drm_gem_object_unreference_unlocked(obj); | |
1502 | ||
1503 | return 0; | |
cd5351f4 | 1504 | } |
f7f9f453 | 1505 | |
7ef93b0a LP |
1506 | /* ----------------------------------------------------------------------------- |
1507 | * Init & Cleanup | |
1508 | */ | |
1509 | ||
1510 | /* If DMM is used, we need to set some stuff up.. */ | |
f7f9f453 RC |
1511 | void omap_gem_init(struct drm_device *dev) |
1512 | { | |
a6a91827 | 1513 | struct omap_drm_private *priv = dev->dev_private; |
f4302747 | 1514 | struct omap_drm_usergart *usergart; |
f7f9f453 RC |
1515 | const enum tiler_fmt fmts[] = { |
1516 | TILFMT_8BIT, TILFMT_16BIT, TILFMT_32BIT | |
1517 | }; | |
5c137797 | 1518 | int i, j; |
f7f9f453 | 1519 | |
e5e4e9b7 | 1520 | if (!dmm_is_available()) { |
f7f9f453 | 1521 | /* DMM only supported on OMAP4 and later, so this isn't fatal */ |
5c137797 | 1522 | dev_warn(dev->dev, "DMM not available, disable DMM support\n"); |
f7f9f453 RC |
1523 | return; |
1524 | } | |
1525 | ||
78110bb8 JP |
1526 | usergart = kcalloc(3, sizeof(*usergart), GFP_KERNEL); |
1527 | if (!usergart) | |
b369839b | 1528 | return; |
f7f9f453 RC |
1529 | |
1530 | /* reserve 4k aligned/wide regions for userspace mappings: */ | |
1531 | for (i = 0; i < ARRAY_SIZE(fmts); i++) { | |
1532 | uint16_t h = 1, w = PAGE_SIZE >> i; | |
1533 | tiler_align(fmts[i], &w, &h); | |
1534 | /* note: since each region is 1 4kb page wide, and minimum | |
1535 | * number of rows, the height ends up being the same as the | |
1536 | * # of pages in the region | |
1537 | */ | |
1538 | usergart[i].height = h; | |
1539 | usergart[i].height_shift = ilog2(h); | |
3c810c61 | 1540 | usergart[i].stride_pfn = tiler_stride(fmts[i], 0) >> PAGE_SHIFT; |
f7f9f453 RC |
1541 | usergart[i].slot_shift = ilog2((PAGE_SIZE / h) >> i); |
1542 | for (j = 0; j < NUM_USERGART_ENTRIES; j++) { | |
f4302747 LP |
1543 | struct omap_drm_usergart_entry *entry; |
1544 | struct tiler_block *block; | |
1545 | ||
1546 | entry = &usergart[i].entry[j]; | |
1547 | block = tiler_reserve_2d(fmts[i], w, h, PAGE_SIZE); | |
f7f9f453 RC |
1548 | if (IS_ERR(block)) { |
1549 | dev_err(dev->dev, | |
1550 | "reserve failed: %d, %d, %ld\n", | |
1551 | i, j, PTR_ERR(block)); | |
1552 | return; | |
1553 | } | |
1554 | entry->paddr = tiler_ssptr(block); | |
1555 | entry->block = block; | |
1556 | ||
2d31ca3a RK |
1557 | DBG("%d:%d: %dx%d: paddr=%pad stride=%d", i, j, w, h, |
1558 | &entry->paddr, | |
f7f9f453 RC |
1559 | usergart[i].stride_pfn << PAGE_SHIFT); |
1560 | } | |
1561 | } | |
a6a91827 | 1562 | |
f4302747 | 1563 | priv->usergart = usergart; |
a6a91827 | 1564 | priv->has_dmm = true; |
f7f9f453 RC |
1565 | } |
1566 | ||
1567 | void omap_gem_deinit(struct drm_device *dev) | |
1568 | { | |
f4302747 LP |
1569 | struct omap_drm_private *priv = dev->dev_private; |
1570 | ||
f7f9f453 RC |
1571 | /* I believe we can rely on there being no more outstanding GEM |
1572 | * objects which could depend on usergart/dmm at this point. | |
1573 | */ | |
f4302747 | 1574 | kfree(priv->usergart); |
f7f9f453 | 1575 | } |