]>
Commit | Line | Data |
---|---|---|
cd5351f4 | 1 | /* |
8bb0daff | 2 | * drivers/gpu/drm/omapdrm/omap_gem.c |
cd5351f4 RC |
3 | * |
4 | * Copyright (C) 2011 Texas Instruments | |
5 | * Author: Rob Clark <rob.clark@linaro.org> | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or modify it | |
8 | * under the terms of the GNU General Public License version 2 as published by | |
9 | * the Free Software Foundation. | |
10 | * | |
11 | * This program is distributed in the hope that it will be useful, but WITHOUT | |
12 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
13 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
14 | * more details. | |
15 | * | |
16 | * You should have received a copy of the GNU General Public License along with | |
17 | * this program. If not, see <http://www.gnu.org/licenses/>. | |
18 | */ | |
19 | ||
cd5351f4 | 20 | #include <linux/shmem_fs.h> |
2d278f54 | 21 | #include <linux/spinlock.h> |
01c8f1c4 | 22 | #include <linux/pfn_t.h> |
2d278f54 | 23 | |
0de23977 | 24 | #include <drm/drm_vma_manager.h> |
cd5351f4 RC |
25 | |
26 | #include "omap_drv.h" | |
f7f9f453 | 27 | #include "omap_dmm_tiler.h" |
cd5351f4 | 28 | |
cd5351f4 RC |
29 | /* |
30 | * GEM buffer object implementation. | |
31 | */ | |
32 | ||
cd5351f4 | 33 | /* note: we use upper 8 bits of flags for driver-internal flags: */ |
cdb0381d LP |
34 | #define OMAP_BO_MEM_DMA_API 0x01000000 /* memory allocated with the dma_alloc_* API */ |
35 | #define OMAP_BO_MEM_SHMEM 0x02000000 /* memory allocated through shmem backing */ | |
36 | #define OMAP_BO_MEM_EXT 0x04000000 /* memory allocated externally */ | |
37 | #define OMAP_BO_EXT_SYNC 0x10000000 /* externally allocated sync object */ | |
cd5351f4 | 38 | |
cd5351f4 RC |
39 | struct omap_gem_object { |
40 | struct drm_gem_object base; | |
41 | ||
f6b6036e RC |
42 | struct list_head mm_list; |
43 | ||
cd5351f4 RC |
44 | uint32_t flags; |
45 | ||
f7f9f453 RC |
46 | /** width/height for tiled formats (rounded up to slot boundaries) */ |
47 | uint16_t width, height; | |
48 | ||
a6a91827 RC |
49 | /** roll applied when mapping to DMM */ |
50 | uint32_t roll; | |
51 | ||
cd5351f4 | 52 | /** |
cdb0381d LP |
53 | * If buffer is allocated physically contiguous, the OMAP_BO_MEM_DMA_API |
54 | * flag is set and the paddr is valid. Also if the buffer is remapped | |
55 | * in TILER and paddr_cnt > 0, then paddr is valid. But if you are using | |
56 | * the physical address and OMAP_BO_MEM_DMA_API is not set, then you | |
57 | * should be going thru omap_gem_{get,put}_paddr() to ensure the mapping | |
58 | * is not removed from under your feet. | |
cd5351f4 RC |
59 | * |
60 | * Note that OMAP_BO_SCANOUT is a hint from userspace that DMA capable | |
cdb0381d LP |
61 | * buffer is requested, but doesn't mean that it is. Use the |
62 | * OMAP_BO_MEM_DMA_API flag to determine if the buffer has a DMA capable | |
cd5351f4 RC |
63 | * physical address. |
64 | */ | |
65 | dma_addr_t paddr; | |
66 | ||
f7f9f453 RC |
67 | /** |
68 | * # of users of paddr | |
69 | */ | |
70 | uint32_t paddr_cnt; | |
71 | ||
72 | /** | |
73 | * tiler block used when buffer is remapped in DMM/TILER. | |
74 | */ | |
75 | struct tiler_block *block; | |
76 | ||
cd5351f4 RC |
77 | /** |
78 | * Array of backing pages, if allocated. Note that pages are never | |
79 | * allocated for buffers originally allocated from contiguous memory | |
80 | */ | |
81 | struct page **pages; | |
82 | ||
f3bc9d24 RC |
83 | /** addresses corresponding to pages in above array */ |
84 | dma_addr_t *addrs; | |
85 | ||
cd5351f4 RC |
86 | /** |
87 | * Virtual address, if mapped. | |
88 | */ | |
89 | void *vaddr; | |
90 | ||
91 | /** | |
92 | * sync-object allocated on demand (if needed) | |
93 | * | |
94 | * Per-buffer sync-object for tracking pending and completed hw/dma | |
95 | * read and write operations. The layout in memory is dictated by | |
96 | * the SGX firmware, which uses this information to stall the command | |
97 | * stream if a surface is not ready yet. | |
98 | * | |
99 | * Note that when buffer is used by SGX, the sync-object needs to be | |
100 | * allocated from a special heap of sync-objects. This way many sync | |
101 | * objects can be packed in a page, and not waste GPU virtual address | |
102 | * space. Because of this we have to have a omap_gem_set_sync_object() | |
103 | * API to allow replacement of the syncobj after it has (potentially) | |
104 | * already been allocated. A bit ugly but I haven't thought of a | |
105 | * better alternative. | |
106 | */ | |
107 | struct { | |
108 | uint32_t write_pending; | |
109 | uint32_t write_complete; | |
110 | uint32_t read_pending; | |
111 | uint32_t read_complete; | |
112 | } *sync; | |
113 | }; | |
114 | ||
7ef93b0a | 115 | #define to_omap_bo(x) container_of(x, struct omap_gem_object, base) |
c5b1247b | 116 | |
f7f9f453 RC |
117 | /* To deal with userspace mmap'ings of 2d tiled buffers, which (a) are |
118 | * not necessarily pinned in TILER all the time, and (b) when they are | |
119 | * they are not necessarily page aligned, we reserve one or more small | |
120 | * regions in each of the 2d containers to use as a user-GART where we | |
121 | * can create a second page-aligned mapping of parts of the buffer | |
122 | * being accessed from userspace. | |
123 | * | |
124 | * Note that we could optimize slightly when we know that multiple | |
125 | * tiler containers are backed by the same PAT.. but I'll leave that | |
126 | * for later.. | |
127 | */ | |
128 | #define NUM_USERGART_ENTRIES 2 | |
f4302747 | 129 | struct omap_drm_usergart_entry { |
f7f9f453 RC |
130 | struct tiler_block *block; /* the reserved tiler block */ |
131 | dma_addr_t paddr; | |
132 | struct drm_gem_object *obj; /* the current pinned obj */ | |
133 | pgoff_t obj_pgoff; /* page offset of obj currently | |
134 | mapped in */ | |
135 | }; | |
f4302747 LP |
136 | |
137 | struct omap_drm_usergart { | |
138 | struct omap_drm_usergart_entry entry[NUM_USERGART_ENTRIES]; | |
f7f9f453 RC |
139 | int height; /* height in rows */ |
140 | int height_shift; /* ilog2(height in rows) */ | |
141 | int slot_shift; /* ilog2(width per slot) */ | |
142 | int stride_pfn; /* stride in pages */ | |
143 | int last; /* index of last used entry */ | |
f4302747 | 144 | }; |
f7f9f453 | 145 | |
b902f8f4 LP |
146 | /* ----------------------------------------------------------------------------- |
147 | * Helpers | |
148 | */ | |
149 | ||
150 | /** get mmap offset */ | |
151 | static uint64_t mmap_offset(struct drm_gem_object *obj) | |
152 | { | |
153 | struct drm_device *dev = obj->dev; | |
154 | int ret; | |
155 | size_t size; | |
156 | ||
157 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); | |
158 | ||
159 | /* Make it mmapable */ | |
160 | size = omap_gem_mmap_size(obj); | |
161 | ret = drm_gem_create_mmap_offset_size(obj, size); | |
162 | if (ret) { | |
163 | dev_err(dev->dev, "could not allocate mmap offset\n"); | |
164 | return 0; | |
165 | } | |
166 | ||
167 | return drm_vma_node_offset_addr(&obj->vma_node); | |
168 | } | |
169 | ||
7ef93b0a LP |
170 | /* ----------------------------------------------------------------------------- |
171 | * Eviction | |
172 | */ | |
f7f9f453 RC |
173 | |
174 | static void evict_entry(struct drm_gem_object *obj, | |
f4302747 | 175 | enum tiler_fmt fmt, struct omap_drm_usergart_entry *entry) |
f7f9f453 | 176 | { |
6796cb16 | 177 | struct omap_gem_object *omap_obj = to_omap_bo(obj); |
f4302747 LP |
178 | struct omap_drm_private *priv = obj->dev->dev_private; |
179 | int n = priv->usergart[fmt].height; | |
6796cb16 DH |
180 | size_t size = PAGE_SIZE * n; |
181 | loff_t off = mmap_offset(obj) + | |
182 | (entry->obj_pgoff << PAGE_SHIFT); | |
183 | const int m = 1 + ((omap_obj->width << fmt) / PAGE_SIZE); | |
184 | ||
185 | if (m > 1) { | |
186 | int i; | |
187 | /* if stride > than PAGE_SIZE then sparse mapping: */ | |
188 | for (i = n; i > 0; i--) { | |
189 | unmap_mapping_range(obj->dev->anon_inode->i_mapping, | |
190 | off, PAGE_SIZE, 1); | |
191 | off += PAGE_SIZE * m; | |
e559895a | 192 | } |
6796cb16 DH |
193 | } else { |
194 | unmap_mapping_range(obj->dev->anon_inode->i_mapping, | |
195 | off, size, 1); | |
f7f9f453 RC |
196 | } |
197 | ||
198 | entry->obj = NULL; | |
199 | } | |
200 | ||
201 | /* Evict a buffer from usergart, if it is mapped there */ | |
202 | static void evict(struct drm_gem_object *obj) | |
203 | { | |
204 | struct omap_gem_object *omap_obj = to_omap_bo(obj); | |
f4302747 | 205 | struct omap_drm_private *priv = obj->dev->dev_private; |
f7f9f453 RC |
206 | |
207 | if (omap_obj->flags & OMAP_BO_TILED) { | |
208 | enum tiler_fmt fmt = gem2fmt(omap_obj->flags); | |
209 | int i; | |
210 | ||
f7f9f453 | 211 | for (i = 0; i < NUM_USERGART_ENTRIES; i++) { |
f4302747 LP |
212 | struct omap_drm_usergart_entry *entry = |
213 | &priv->usergart[fmt].entry[i]; | |
214 | ||
f7f9f453 RC |
215 | if (entry->obj == obj) |
216 | evict_entry(obj, fmt, entry); | |
217 | } | |
218 | } | |
219 | } | |
220 | ||
7ef93b0a LP |
221 | /* ----------------------------------------------------------------------------- |
222 | * Page Management | |
8b6b569e | 223 | */ |
cd5351f4 RC |
224 | |
225 | /** ensure backing pages are allocated */ | |
226 | static int omap_gem_attach_pages(struct drm_gem_object *obj) | |
227 | { | |
8b6b569e | 228 | struct drm_device *dev = obj->dev; |
cd5351f4 RC |
229 | struct omap_gem_object *omap_obj = to_omap_bo(obj); |
230 | struct page **pages; | |
d4eb23a9 EG |
231 | int npages = obj->size >> PAGE_SHIFT; |
232 | int i, ret; | |
8b6b569e | 233 | dma_addr_t *addrs; |
cd5351f4 RC |
234 | |
235 | WARN_ON(omap_obj->pages); | |
236 | ||
0cdbe8ac | 237 | pages = drm_gem_get_pages(obj); |
cd5351f4 RC |
238 | if (IS_ERR(pages)) { |
239 | dev_err(obj->dev->dev, "could not get pages: %ld\n", PTR_ERR(pages)); | |
240 | return PTR_ERR(pages); | |
241 | } | |
242 | ||
f3bc9d24 RC |
243 | /* for non-cached buffers, ensure the new pages are clean because |
244 | * DSS, GPU, etc. are not cache coherent: | |
245 | */ | |
246 | if (omap_obj->flags & (OMAP_BO_WC|OMAP_BO_UNCACHED)) { | |
23d84ed9 | 247 | addrs = kmalloc(npages * sizeof(*addrs), GFP_KERNEL); |
d4eb23a9 EG |
248 | if (!addrs) { |
249 | ret = -ENOMEM; | |
250 | goto free_pages; | |
251 | } | |
252 | ||
f3bc9d24 | 253 | for (i = 0; i < npages; i++) { |
8b6b569e | 254 | addrs[i] = dma_map_page(dev->dev, pages[i], |
f3bc9d24 | 255 | 0, PAGE_SIZE, DMA_BIDIRECTIONAL); |
579ef254 TV |
256 | |
257 | if (dma_mapping_error(dev->dev, addrs[i])) { | |
258 | dev_warn(dev->dev, | |
259 | "%s: failed to map page\n", __func__); | |
260 | ||
261 | for (i = i - 1; i >= 0; --i) { | |
262 | dma_unmap_page(dev->dev, addrs[i], | |
263 | PAGE_SIZE, DMA_BIDIRECTIONAL); | |
264 | } | |
265 | ||
266 | ret = -ENOMEM; | |
267 | goto free_addrs; | |
268 | } | |
f3bc9d24 | 269 | } |
8b6b569e | 270 | } else { |
23d84ed9 | 271 | addrs = kzalloc(npages * sizeof(*addrs), GFP_KERNEL); |
d4eb23a9 EG |
272 | if (!addrs) { |
273 | ret = -ENOMEM; | |
274 | goto free_pages; | |
275 | } | |
f3bc9d24 RC |
276 | } |
277 | ||
8b6b569e | 278 | omap_obj->addrs = addrs; |
cd5351f4 | 279 | omap_obj->pages = pages; |
8b6b569e | 280 | |
cd5351f4 | 281 | return 0; |
d4eb23a9 | 282 | |
579ef254 TV |
283 | free_addrs: |
284 | kfree(addrs); | |
d4eb23a9 | 285 | free_pages: |
ddcd09d6 | 286 | drm_gem_put_pages(obj, pages, true, false); |
d4eb23a9 EG |
287 | |
288 | return ret; | |
cd5351f4 RC |
289 | } |
290 | ||
b902f8f4 LP |
291 | /* acquire pages when needed (for example, for DMA where physically |
292 | * contiguous buffer is not required | |
293 | */ | |
294 | static int get_pages(struct drm_gem_object *obj, struct page ***pages) | |
295 | { | |
296 | struct omap_gem_object *omap_obj = to_omap_bo(obj); | |
297 | int ret = 0; | |
298 | ||
cdb0381d | 299 | if ((omap_obj->flags & OMAP_BO_MEM_SHMEM) && !omap_obj->pages) { |
b902f8f4 LP |
300 | ret = omap_gem_attach_pages(obj); |
301 | if (ret) { | |
302 | dev_err(obj->dev->dev, "could not attach pages\n"); | |
303 | return ret; | |
304 | } | |
305 | } | |
306 | ||
307 | /* TODO: even phys-contig.. we should have a list of pages? */ | |
308 | *pages = omap_obj->pages; | |
309 | ||
310 | return 0; | |
311 | } | |
312 | ||
cd5351f4 RC |
313 | /** release backing pages */ |
314 | static void omap_gem_detach_pages(struct drm_gem_object *obj) | |
315 | { | |
316 | struct omap_gem_object *omap_obj = to_omap_bo(obj); | |
f3bc9d24 RC |
317 | |
318 | /* for non-cached buffers, ensure the new pages are clean because | |
319 | * DSS, GPU, etc. are not cache coherent: | |
320 | */ | |
321 | if (omap_obj->flags & (OMAP_BO_WC|OMAP_BO_UNCACHED)) { | |
322 | int i, npages = obj->size >> PAGE_SHIFT; | |
323 | for (i = 0; i < npages; i++) { | |
324 | dma_unmap_page(obj->dev->dev, omap_obj->addrs[i], | |
325 | PAGE_SIZE, DMA_BIDIRECTIONAL); | |
326 | } | |
f3bc9d24 RC |
327 | } |
328 | ||
8b6b569e RC |
329 | kfree(omap_obj->addrs); |
330 | omap_obj->addrs = NULL; | |
331 | ||
ddcd09d6 | 332 | drm_gem_put_pages(obj, omap_obj->pages, true, false); |
cd5351f4 RC |
333 | omap_obj->pages = NULL; |
334 | } | |
335 | ||
6ad11bc3 RC |
336 | /* get buffer flags */ |
337 | uint32_t omap_gem_flags(struct drm_gem_object *obj) | |
338 | { | |
339 | return to_omap_bo(obj)->flags; | |
340 | } | |
341 | ||
c5b1247b RC |
342 | uint64_t omap_gem_mmap_offset(struct drm_gem_object *obj) |
343 | { | |
344 | uint64_t offset; | |
345 | mutex_lock(&obj->dev->struct_mutex); | |
346 | offset = mmap_offset(obj); | |
347 | mutex_unlock(&obj->dev->struct_mutex); | |
348 | return offset; | |
349 | } | |
350 | ||
f7f9f453 RC |
351 | /** get mmap size */ |
352 | size_t omap_gem_mmap_size(struct drm_gem_object *obj) | |
353 | { | |
354 | struct omap_gem_object *omap_obj = to_omap_bo(obj); | |
355 | size_t size = obj->size; | |
356 | ||
357 | if (omap_obj->flags & OMAP_BO_TILED) { | |
358 | /* for tiled buffers, the virtual size has stride rounded up | |
359 | * to 4kb.. (to hide the fact that row n+1 might start 16kb or | |
360 | * 32kb later!). But we don't back the entire buffer with | |
361 | * pages, only the valid picture part.. so need to adjust for | |
362 | * this in the size used to mmap and generate mmap offset | |
363 | */ | |
364 | size = tiler_vsize(gem2fmt(omap_obj->flags), | |
365 | omap_obj->width, omap_obj->height); | |
366 | } | |
367 | ||
368 | return size; | |
369 | } | |
370 | ||
3c810c61 RC |
371 | /* get tiled size, returns -EINVAL if not tiled buffer */ |
372 | int omap_gem_tiled_size(struct drm_gem_object *obj, uint16_t *w, uint16_t *h) | |
373 | { | |
374 | struct omap_gem_object *omap_obj = to_omap_bo(obj); | |
375 | if (omap_obj->flags & OMAP_BO_TILED) { | |
376 | *w = omap_obj->width; | |
377 | *h = omap_obj->height; | |
378 | return 0; | |
379 | } | |
380 | return -EINVAL; | |
381 | } | |
f7f9f453 | 382 | |
7ef93b0a LP |
383 | /* ----------------------------------------------------------------------------- |
384 | * Fault Handling | |
385 | */ | |
386 | ||
f7f9f453 RC |
387 | /* Normal handling for the case of faulting in non-tiled buffers */ |
388 | static int fault_1d(struct drm_gem_object *obj, | |
389 | struct vm_area_struct *vma, struct vm_fault *vmf) | |
390 | { | |
391 | struct omap_gem_object *omap_obj = to_omap_bo(obj); | |
392 | unsigned long pfn; | |
393 | pgoff_t pgoff; | |
394 | ||
395 | /* We don't use vmf->pgoff since that has the fake offset: */ | |
396 | pgoff = ((unsigned long)vmf->virtual_address - | |
397 | vma->vm_start) >> PAGE_SHIFT; | |
398 | ||
399 | if (omap_obj->pages) { | |
8b6b569e | 400 | omap_gem_cpu_sync(obj, pgoff); |
f7f9f453 RC |
401 | pfn = page_to_pfn(omap_obj->pages[pgoff]); |
402 | } else { | |
cdb0381d | 403 | BUG_ON(!(omap_obj->flags & OMAP_BO_MEM_DMA_API)); |
f7f9f453 RC |
404 | pfn = (omap_obj->paddr >> PAGE_SHIFT) + pgoff; |
405 | } | |
406 | ||
407 | VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address, | |
408 | pfn, pfn << PAGE_SHIFT); | |
409 | ||
01c8f1c4 DW |
410 | return vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, |
411 | __pfn_to_pfn_t(pfn, PFN_DEV)); | |
f7f9f453 RC |
412 | } |
413 | ||
414 | /* Special handling for the case of faulting in 2d tiled buffers */ | |
415 | static int fault_2d(struct drm_gem_object *obj, | |
416 | struct vm_area_struct *vma, struct vm_fault *vmf) | |
417 | { | |
418 | struct omap_gem_object *omap_obj = to_omap_bo(obj); | |
f4302747 LP |
419 | struct omap_drm_private *priv = obj->dev->dev_private; |
420 | struct omap_drm_usergart_entry *entry; | |
f7f9f453 RC |
421 | enum tiler_fmt fmt = gem2fmt(omap_obj->flags); |
422 | struct page *pages[64]; /* XXX is this too much to have on stack? */ | |
423 | unsigned long pfn; | |
424 | pgoff_t pgoff, base_pgoff; | |
425 | void __user *vaddr; | |
426 | int i, ret, slots; | |
427 | ||
e559895a RC |
428 | /* |
429 | * Note the height of the slot is also equal to the number of pages | |
430 | * that need to be mapped in to fill 4kb wide CPU page. If the slot | |
431 | * height is 64, then 64 pages fill a 4kb wide by 64 row region. | |
432 | */ | |
f4302747 LP |
433 | const int n = priv->usergart[fmt].height; |
434 | const int n_shift = priv->usergart[fmt].height_shift; | |
e559895a RC |
435 | |
436 | /* | |
437 | * If buffer width in bytes > PAGE_SIZE then the virtual stride is | |
438 | * rounded up to next multiple of PAGE_SIZE.. this need to be taken | |
439 | * into account in some of the math, so figure out virtual stride | |
440 | * in pages | |
f7f9f453 | 441 | */ |
e559895a | 442 | const int m = 1 + ((omap_obj->width << fmt) / PAGE_SIZE); |
f7f9f453 RC |
443 | |
444 | /* We don't use vmf->pgoff since that has the fake offset: */ | |
445 | pgoff = ((unsigned long)vmf->virtual_address - | |
446 | vma->vm_start) >> PAGE_SHIFT; | |
447 | ||
e559895a RC |
448 | /* |
449 | * Actual address we start mapping at is rounded down to previous slot | |
f7f9f453 RC |
450 | * boundary in the y direction: |
451 | */ | |
e559895a | 452 | base_pgoff = round_down(pgoff, m << n_shift); |
f7f9f453 | 453 | |
e559895a | 454 | /* figure out buffer width in slots */ |
f4302747 | 455 | slots = omap_obj->width >> priv->usergart[fmt].slot_shift; |
f7f9f453 | 456 | |
e559895a RC |
457 | vaddr = vmf->virtual_address - ((pgoff - base_pgoff) << PAGE_SHIFT); |
458 | ||
f4302747 | 459 | entry = &priv->usergart[fmt].entry[priv->usergart[fmt].last]; |
e559895a | 460 | |
f7f9f453 RC |
461 | /* evict previous buffer using this usergart entry, if any: */ |
462 | if (entry->obj) | |
463 | evict_entry(entry->obj, fmt, entry); | |
464 | ||
465 | entry->obj = obj; | |
466 | entry->obj_pgoff = base_pgoff; | |
467 | ||
e559895a RC |
468 | /* now convert base_pgoff to phys offset from virt offset: */ |
469 | base_pgoff = (base_pgoff >> n_shift) * slots; | |
470 | ||
471 | /* for wider-than 4k.. figure out which part of the slot-row we want: */ | |
472 | if (m > 1) { | |
473 | int off = pgoff % m; | |
474 | entry->obj_pgoff += off; | |
475 | base_pgoff /= m; | |
476 | slots = min(slots - (off << n_shift), n); | |
477 | base_pgoff += off << n_shift; | |
478 | vaddr += off << PAGE_SHIFT; | |
479 | } | |
480 | ||
481 | /* | |
482 | * Map in pages. Beyond the valid pixel part of the buffer, we set | |
483 | * pages[i] to NULL to get a dummy page mapped in.. if someone | |
484 | * reads/writes it they will get random/undefined content, but at | |
485 | * least it won't be corrupting whatever other random page used to | |
486 | * be mapped in, or other undefined behavior. | |
f7f9f453 RC |
487 | */ |
488 | memcpy(pages, &omap_obj->pages[base_pgoff], | |
489 | sizeof(struct page *) * slots); | |
490 | memset(pages + slots, 0, | |
e559895a | 491 | sizeof(struct page *) * (n - slots)); |
f7f9f453 | 492 | |
a6a91827 | 493 | ret = tiler_pin(entry->block, pages, ARRAY_SIZE(pages), 0, true); |
f7f9f453 RC |
494 | if (ret) { |
495 | dev_err(obj->dev->dev, "failed to pin: %d\n", ret); | |
496 | return ret; | |
497 | } | |
498 | ||
f7f9f453 RC |
499 | pfn = entry->paddr >> PAGE_SHIFT; |
500 | ||
501 | VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address, | |
502 | pfn, pfn << PAGE_SHIFT); | |
503 | ||
e559895a | 504 | for (i = n; i > 0; i--) { |
01c8f1c4 DW |
505 | vm_insert_mixed(vma, (unsigned long)vaddr, |
506 | __pfn_to_pfn_t(pfn, PFN_DEV)); | |
f4302747 | 507 | pfn += priv->usergart[fmt].stride_pfn; |
e559895a | 508 | vaddr += PAGE_SIZE * m; |
f7f9f453 RC |
509 | } |
510 | ||
511 | /* simple round-robin: */ | |
f4302747 LP |
512 | priv->usergart[fmt].last = (priv->usergart[fmt].last + 1) |
513 | % NUM_USERGART_ENTRIES; | |
f7f9f453 RC |
514 | |
515 | return 0; | |
516 | } | |
517 | ||
cd5351f4 RC |
518 | /** |
519 | * omap_gem_fault - pagefault handler for GEM objects | |
520 | * @vma: the VMA of the GEM object | |
521 | * @vmf: fault detail | |
522 | * | |
523 | * Invoked when a fault occurs on an mmap of a GEM managed area. GEM | |
524 | * does most of the work for us including the actual map/unmap calls | |
525 | * but we need to do the actual page work. | |
526 | * | |
527 | * The VMA was set up by GEM. In doing so it also ensured that the | |
528 | * vma->vm_private_data points to the GEM object that is backing this | |
529 | * mapping. | |
530 | */ | |
531 | int omap_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |
532 | { | |
533 | struct drm_gem_object *obj = vma->vm_private_data; | |
534 | struct omap_gem_object *omap_obj = to_omap_bo(obj); | |
535 | struct drm_device *dev = obj->dev; | |
536 | struct page **pages; | |
cd5351f4 RC |
537 | int ret; |
538 | ||
539 | /* Make sure we don't parallel update on a fault, nor move or remove | |
540 | * something from beneath our feet | |
541 | */ | |
542 | mutex_lock(&dev->struct_mutex); | |
543 | ||
544 | /* if a shmem backed object, make sure we have pages attached now */ | |
545 | ret = get_pages(obj, &pages); | |
ae053039 | 546 | if (ret) |
cd5351f4 | 547 | goto fail; |
cd5351f4 RC |
548 | |
549 | /* where should we do corresponding put_pages().. we are mapping | |
550 | * the original page, rather than thru a GART, so we can't rely | |
551 | * on eviction to trigger this. But munmap() or all mappings should | |
552 | * probably trigger put_pages()? | |
553 | */ | |
554 | ||
f7f9f453 RC |
555 | if (omap_obj->flags & OMAP_BO_TILED) |
556 | ret = fault_2d(obj, vma, vmf); | |
557 | else | |
558 | ret = fault_1d(obj, vma, vmf); | |
cd5351f4 | 559 | |
cd5351f4 RC |
560 | |
561 | fail: | |
562 | mutex_unlock(&dev->struct_mutex); | |
563 | switch (ret) { | |
564 | case 0: | |
565 | case -ERESTARTSYS: | |
566 | case -EINTR: | |
567 | return VM_FAULT_NOPAGE; | |
568 | case -ENOMEM: | |
569 | return VM_FAULT_OOM; | |
570 | default: | |
571 | return VM_FAULT_SIGBUS; | |
572 | } | |
573 | } | |
574 | ||
575 | /** We override mainly to fix up some of the vm mapping flags.. */ | |
576 | int omap_gem_mmap(struct file *filp, struct vm_area_struct *vma) | |
577 | { | |
cd5351f4 RC |
578 | int ret; |
579 | ||
580 | ret = drm_gem_mmap(filp, vma); | |
581 | if (ret) { | |
582 | DBG("mmap failed: %d", ret); | |
583 | return ret; | |
584 | } | |
585 | ||
8b6b569e RC |
586 | return omap_gem_mmap_obj(vma->vm_private_data, vma); |
587 | } | |
588 | ||
589 | int omap_gem_mmap_obj(struct drm_gem_object *obj, | |
590 | struct vm_area_struct *vma) | |
591 | { | |
592 | struct omap_gem_object *omap_obj = to_omap_bo(obj); | |
cd5351f4 RC |
593 | |
594 | vma->vm_flags &= ~VM_PFNMAP; | |
595 | vma->vm_flags |= VM_MIXEDMAP; | |
596 | ||
597 | if (omap_obj->flags & OMAP_BO_WC) { | |
598 | vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); | |
599 | } else if (omap_obj->flags & OMAP_BO_UNCACHED) { | |
600 | vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags)); | |
601 | } else { | |
8b6b569e RC |
602 | /* |
603 | * We do have some private objects, at least for scanout buffers | |
604 | * on hardware without DMM/TILER. But these are allocated write- | |
605 | * combine | |
606 | */ | |
607 | if (WARN_ON(!obj->filp)) | |
608 | return -EINVAL; | |
609 | ||
610 | /* | |
611 | * Shunt off cached objs to shmem file so they have their own | |
612 | * address_space (so unmap_mapping_range does what we want, | |
613 | * in particular in the case of mmap'd dmabufs) | |
614 | */ | |
615 | fput(vma->vm_file); | |
8b6b569e | 616 | vma->vm_pgoff = 0; |
cb0942b8 | 617 | vma->vm_file = get_file(obj->filp); |
8b6b569e | 618 | |
cd5351f4 RC |
619 | vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); |
620 | } | |
621 | ||
8b6b569e | 622 | return 0; |
cd5351f4 RC |
623 | } |
624 | ||
7ef93b0a LP |
625 | /* ----------------------------------------------------------------------------- |
626 | * Dumb Buffers | |
627 | */ | |
8b6b569e | 628 | |
cd5351f4 RC |
629 | /** |
630 | * omap_gem_dumb_create - create a dumb buffer | |
631 | * @drm_file: our client file | |
632 | * @dev: our device | |
633 | * @args: the requested arguments copied from userspace | |
634 | * | |
635 | * Allocate a buffer suitable for use for a frame buffer of the | |
636 | * form described by user space. Give userspace a handle by which | |
637 | * to reference it. | |
638 | */ | |
639 | int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev, | |
640 | struct drm_mode_create_dumb *args) | |
641 | { | |
642 | union omap_gem_size gsize; | |
643 | ||
bdb2b933 | 644 | args->pitch = align_pitch(0, args->width, args->bpp); |
cd5351f4 RC |
645 | args->size = PAGE_ALIGN(args->pitch * args->height); |
646 | ||
647 | gsize = (union omap_gem_size){ | |
648 | .bytes = args->size, | |
649 | }; | |
650 | ||
651 | return omap_gem_new_handle(dev, file, gsize, | |
652 | OMAP_BO_SCANOUT | OMAP_BO_WC, &args->handle); | |
653 | } | |
654 | ||
cd5351f4 RC |
655 | /** |
656 | * omap_gem_dumb_map - buffer mapping for dumb interface | |
657 | * @file: our drm client file | |
658 | * @dev: drm device | |
659 | * @handle: GEM handle to the object (from dumb_create) | |
660 | * | |
661 | * Do the necessary setup to allow the mapping of the frame buffer | |
662 | * into user memory. We don't have to do much here at the moment. | |
663 | */ | |
664 | int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, | |
665 | uint32_t handle, uint64_t *offset) | |
666 | { | |
667 | struct drm_gem_object *obj; | |
668 | int ret = 0; | |
669 | ||
cd5351f4 RC |
670 | /* GEM does all our handle to object mapping */ |
671 | obj = drm_gem_object_lookup(dev, file, handle); | |
672 | if (obj == NULL) { | |
673 | ret = -ENOENT; | |
674 | goto fail; | |
675 | } | |
676 | ||
677 | *offset = omap_gem_mmap_offset(obj); | |
678 | ||
679 | drm_gem_object_unreference_unlocked(obj); | |
680 | ||
681 | fail: | |
cd5351f4 RC |
682 | return ret; |
683 | } | |
684 | ||
e1c1174f | 685 | #ifdef CONFIG_DRM_FBDEV_EMULATION |
a6a91827 RC |
686 | /* Set scrolling position. This allows us to implement fast scrolling |
687 | * for console. | |
9b55b95a RC |
688 | * |
689 | * Call only from non-atomic contexts. | |
a6a91827 RC |
690 | */ |
691 | int omap_gem_roll(struct drm_gem_object *obj, uint32_t roll) | |
692 | { | |
693 | struct omap_gem_object *omap_obj = to_omap_bo(obj); | |
694 | uint32_t npages = obj->size >> PAGE_SHIFT; | |
695 | int ret = 0; | |
696 | ||
697 | if (roll > npages) { | |
698 | dev_err(obj->dev->dev, "invalid roll: %d\n", roll); | |
699 | return -EINVAL; | |
700 | } | |
701 | ||
a6a91827 RC |
702 | omap_obj->roll = roll; |
703 | ||
af69592a RC |
704 | mutex_lock(&obj->dev->struct_mutex); |
705 | ||
a6a91827 RC |
706 | /* if we aren't mapped yet, we don't need to do anything */ |
707 | if (omap_obj->block) { | |
708 | struct page **pages; | |
709 | ret = get_pages(obj, &pages); | |
710 | if (ret) | |
711 | goto fail; | |
712 | ret = tiler_pin(omap_obj->block, pages, npages, roll, true); | |
713 | if (ret) | |
714 | dev_err(obj->dev->dev, "could not repin: %d\n", ret); | |
715 | } | |
716 | ||
717 | fail: | |
718 | mutex_unlock(&obj->dev->struct_mutex); | |
719 | ||
720 | return ret; | |
721 | } | |
e1c1174f | 722 | #endif |
a6a91827 | 723 | |
7ef93b0a LP |
724 | /* ----------------------------------------------------------------------------- |
725 | * Memory Management & DMA Sync | |
726 | */ | |
727 | ||
728 | /** | |
729 | * shmem buffers that are mapped cached can simulate coherency via using | |
730 | * page faulting to keep track of dirty pages | |
731 | */ | |
732 | static inline bool is_cached_coherent(struct drm_gem_object *obj) | |
733 | { | |
734 | struct omap_gem_object *omap_obj = to_omap_bo(obj); | |
cdb0381d LP |
735 | |
736 | return (omap_obj->flags & OMAP_BO_MEM_SHMEM) && | |
7ef93b0a LP |
737 | ((omap_obj->flags & OMAP_BO_CACHE_MASK) == OMAP_BO_CACHED); |
738 | } | |
a6a91827 | 739 | |
8b6b569e RC |
740 | /* Sync the buffer for CPU access.. note pages should already be |
741 | * attached, ie. omap_gem_get_pages() | |
742 | */ | |
743 | void omap_gem_cpu_sync(struct drm_gem_object *obj, int pgoff) | |
744 | { | |
745 | struct drm_device *dev = obj->dev; | |
746 | struct omap_gem_object *omap_obj = to_omap_bo(obj); | |
747 | ||
748 | if (is_cached_coherent(obj) && omap_obj->addrs[pgoff]) { | |
749 | dma_unmap_page(dev->dev, omap_obj->addrs[pgoff], | |
750 | PAGE_SIZE, DMA_BIDIRECTIONAL); | |
751 | omap_obj->addrs[pgoff] = 0; | |
752 | } | |
753 | } | |
754 | ||
755 | /* sync the buffer for DMA access */ | |
756 | void omap_gem_dma_sync(struct drm_gem_object *obj, | |
757 | enum dma_data_direction dir) | |
758 | { | |
759 | struct drm_device *dev = obj->dev; | |
760 | struct omap_gem_object *omap_obj = to_omap_bo(obj); | |
761 | ||
762 | if (is_cached_coherent(obj)) { | |
763 | int i, npages = obj->size >> PAGE_SHIFT; | |
764 | struct page **pages = omap_obj->pages; | |
765 | bool dirty = false; | |
766 | ||
767 | for (i = 0; i < npages; i++) { | |
768 | if (!omap_obj->addrs[i]) { | |
a3d6345d TV |
769 | dma_addr_t addr; |
770 | ||
771 | addr = dma_map_page(dev->dev, pages[i], 0, | |
8b6b569e | 772 | PAGE_SIZE, DMA_BIDIRECTIONAL); |
a3d6345d TV |
773 | |
774 | if (dma_mapping_error(dev->dev, addr)) { | |
775 | dev_warn(dev->dev, | |
776 | "%s: failed to map page\n", | |
777 | __func__); | |
778 | break; | |
779 | } | |
780 | ||
8b6b569e | 781 | dirty = true; |
a3d6345d | 782 | omap_obj->addrs[i] = addr; |
8b6b569e RC |
783 | } |
784 | } | |
785 | ||
786 | if (dirty) { | |
787 | unmap_mapping_range(obj->filp->f_mapping, 0, | |
788 | omap_gem_mmap_size(obj), 1); | |
789 | } | |
790 | } | |
791 | } | |
792 | ||
cd5351f4 RC |
793 | /* Get physical address for DMA.. if 'remap' is true, and the buffer is not |
794 | * already contiguous, remap it to pin in physically contiguous memory.. (ie. | |
795 | * map in TILER) | |
796 | */ | |
797 | int omap_gem_get_paddr(struct drm_gem_object *obj, | |
798 | dma_addr_t *paddr, bool remap) | |
799 | { | |
a6a91827 | 800 | struct omap_drm_private *priv = obj->dev->dev_private; |
cd5351f4 RC |
801 | struct omap_gem_object *omap_obj = to_omap_bo(obj); |
802 | int ret = 0; | |
803 | ||
f7f9f453 RC |
804 | mutex_lock(&obj->dev->struct_mutex); |
805 | ||
cdb0381d LP |
806 | if (!(omap_obj->flags & OMAP_BO_MEM_DMA_API) && |
807 | remap && priv->has_dmm) { | |
f7f9f453 RC |
808 | if (omap_obj->paddr_cnt == 0) { |
809 | struct page **pages; | |
a6a91827 | 810 | uint32_t npages = obj->size >> PAGE_SHIFT; |
f7f9f453 RC |
811 | enum tiler_fmt fmt = gem2fmt(omap_obj->flags); |
812 | struct tiler_block *block; | |
a6a91827 | 813 | |
f7f9f453 RC |
814 | BUG_ON(omap_obj->block); |
815 | ||
816 | ret = get_pages(obj, &pages); | |
817 | if (ret) | |
818 | goto fail; | |
819 | ||
f7f9f453 RC |
820 | if (omap_obj->flags & OMAP_BO_TILED) { |
821 | block = tiler_reserve_2d(fmt, | |
822 | omap_obj->width, | |
823 | omap_obj->height, 0); | |
824 | } else { | |
825 | block = tiler_reserve_1d(obj->size); | |
826 | } | |
827 | ||
828 | if (IS_ERR(block)) { | |
829 | ret = PTR_ERR(block); | |
830 | dev_err(obj->dev->dev, | |
831 | "could not remap: %d (%d)\n", ret, fmt); | |
832 | goto fail; | |
833 | } | |
834 | ||
835 | /* TODO: enable async refill.. */ | |
a6a91827 RC |
836 | ret = tiler_pin(block, pages, npages, |
837 | omap_obj->roll, true); | |
f7f9f453 RC |
838 | if (ret) { |
839 | tiler_release(block); | |
840 | dev_err(obj->dev->dev, | |
841 | "could not pin: %d\n", ret); | |
842 | goto fail; | |
843 | } | |
844 | ||
845 | omap_obj->paddr = tiler_ssptr(block); | |
846 | omap_obj->block = block; | |
847 | ||
2d31ca3a | 848 | DBG("got paddr: %pad", &omap_obj->paddr); |
f7f9f453 RC |
849 | } |
850 | ||
851 | omap_obj->paddr_cnt++; | |
852 | ||
853 | *paddr = omap_obj->paddr; | |
cdb0381d | 854 | } else if (omap_obj->flags & OMAP_BO_MEM_DMA_API) { |
f7f9f453 RC |
855 | *paddr = omap_obj->paddr; |
856 | } else { | |
857 | ret = -EINVAL; | |
8b6b569e | 858 | goto fail; |
cd5351f4 RC |
859 | } |
860 | ||
f7f9f453 RC |
861 | fail: |
862 | mutex_unlock(&obj->dev->struct_mutex); | |
cd5351f4 RC |
863 | |
864 | return ret; | |
865 | } | |
866 | ||
867 | /* Release physical address, when DMA is no longer being performed.. this | |
868 | * could potentially unpin and unmap buffers from TILER | |
869 | */ | |
393a949f | 870 | void omap_gem_put_paddr(struct drm_gem_object *obj) |
cd5351f4 | 871 | { |
f7f9f453 | 872 | struct omap_gem_object *omap_obj = to_omap_bo(obj); |
393a949f | 873 | int ret; |
f7f9f453 RC |
874 | |
875 | mutex_lock(&obj->dev->struct_mutex); | |
876 | if (omap_obj->paddr_cnt > 0) { | |
877 | omap_obj->paddr_cnt--; | |
878 | if (omap_obj->paddr_cnt == 0) { | |
879 | ret = tiler_unpin(omap_obj->block); | |
880 | if (ret) { | |
881 | dev_err(obj->dev->dev, | |
882 | "could not unpin pages: %d\n", ret); | |
f7f9f453 RC |
883 | } |
884 | ret = tiler_release(omap_obj->block); | |
885 | if (ret) { | |
886 | dev_err(obj->dev->dev, | |
887 | "could not release unmap: %d\n", ret); | |
888 | } | |
3f4d17c4 | 889 | omap_obj->paddr = 0; |
f7f9f453 RC |
890 | omap_obj->block = NULL; |
891 | } | |
892 | } | |
393a949f | 893 | |
f7f9f453 | 894 | mutex_unlock(&obj->dev->struct_mutex); |
cd5351f4 RC |
895 | } |
896 | ||
3c810c61 RC |
897 | /* Get rotated scanout address (only valid if already pinned), at the |
898 | * specified orientation and x,y offset from top-left corner of buffer | |
899 | * (only valid for tiled 2d buffers) | |
900 | */ | |
901 | int omap_gem_rotated_paddr(struct drm_gem_object *obj, uint32_t orient, | |
902 | int x, int y, dma_addr_t *paddr) | |
903 | { | |
904 | struct omap_gem_object *omap_obj = to_omap_bo(obj); | |
905 | int ret = -EINVAL; | |
906 | ||
907 | mutex_lock(&obj->dev->struct_mutex); | |
908 | if ((omap_obj->paddr_cnt > 0) && omap_obj->block && | |
909 | (omap_obj->flags & OMAP_BO_TILED)) { | |
910 | *paddr = tiler_tsptr(omap_obj->block, orient, x, y); | |
911 | ret = 0; | |
912 | } | |
913 | mutex_unlock(&obj->dev->struct_mutex); | |
914 | return ret; | |
915 | } | |
916 | ||
917 | /* Get tiler stride for the buffer (only valid for 2d tiled buffers) */ | |
918 | int omap_gem_tiled_stride(struct drm_gem_object *obj, uint32_t orient) | |
919 | { | |
920 | struct omap_gem_object *omap_obj = to_omap_bo(obj); | |
921 | int ret = -EINVAL; | |
922 | if (omap_obj->flags & OMAP_BO_TILED) | |
923 | ret = tiler_stride(gem2fmt(omap_obj->flags), orient); | |
924 | return ret; | |
925 | } | |
926 | ||
6ad11bc3 RC |
927 | /* if !remap, and we don't have pages backing, then fail, rather than |
928 | * increasing the pin count (which we don't really do yet anyways, | |
929 | * because we don't support swapping pages back out). And 'remap' | |
930 | * might not be quite the right name, but I wanted to keep it working | |
931 | * similarly to omap_gem_get_paddr(). Note though that mutex is not | |
932 | * aquired if !remap (because this can be called in atomic ctxt), | |
933 | * but probably omap_gem_get_paddr() should be changed to work in the | |
934 | * same way. If !remap, a matching omap_gem_put_pages() call is not | |
935 | * required (and should not be made). | |
936 | */ | |
937 | int omap_gem_get_pages(struct drm_gem_object *obj, struct page ***pages, | |
938 | bool remap) | |
cd5351f4 RC |
939 | { |
940 | int ret; | |
6ad11bc3 RC |
941 | if (!remap) { |
942 | struct omap_gem_object *omap_obj = to_omap_bo(obj); | |
943 | if (!omap_obj->pages) | |
944 | return -ENOMEM; | |
945 | *pages = omap_obj->pages; | |
946 | return 0; | |
947 | } | |
cd5351f4 RC |
948 | mutex_lock(&obj->dev->struct_mutex); |
949 | ret = get_pages(obj, pages); | |
950 | mutex_unlock(&obj->dev->struct_mutex); | |
951 | return ret; | |
952 | } | |
953 | ||
954 | /* release pages when DMA no longer being performed */ | |
955 | int omap_gem_put_pages(struct drm_gem_object *obj) | |
956 | { | |
957 | /* do something here if we dynamically attach/detach pages.. at | |
958 | * least they would no longer need to be pinned if everyone has | |
959 | * released the pages.. | |
960 | */ | |
961 | return 0; | |
962 | } | |
963 | ||
e1c1174f | 964 | #ifdef CONFIG_DRM_FBDEV_EMULATION |
f7f9f453 RC |
965 | /* Get kernel virtual address for CPU access.. this more or less only |
966 | * exists for omap_fbdev. This should be called with struct_mutex | |
967 | * held. | |
cd5351f4 RC |
968 | */ |
969 | void *omap_gem_vaddr(struct drm_gem_object *obj) | |
970 | { | |
971 | struct omap_gem_object *omap_obj = to_omap_bo(obj); | |
696e3ca3 | 972 | WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); |
f7f9f453 RC |
973 | if (!omap_obj->vaddr) { |
974 | struct page **pages; | |
975 | int ret = get_pages(obj, &pages); | |
976 | if (ret) | |
977 | return ERR_PTR(ret); | |
978 | omap_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT, | |
979 | VM_MAP, pgprot_writecombine(PAGE_KERNEL)); | |
980 | } | |
cd5351f4 RC |
981 | return omap_obj->vaddr; |
982 | } | |
e1c1174f | 983 | #endif |
cd5351f4 | 984 | |
7ef93b0a LP |
985 | /* ----------------------------------------------------------------------------- |
986 | * Power Management | |
987 | */ | |
cd5351f4 | 988 | |
e78edba1 AG |
989 | #ifdef CONFIG_PM |
990 | /* re-pin objects in DMM in resume path: */ | |
991 | int omap_gem_resume(struct device *dev) | |
992 | { | |
993 | struct drm_device *drm_dev = dev_get_drvdata(dev); | |
994 | struct omap_drm_private *priv = drm_dev->dev_private; | |
995 | struct omap_gem_object *omap_obj; | |
996 | int ret = 0; | |
997 | ||
998 | list_for_each_entry(omap_obj, &priv->obj_list, mm_list) { | |
999 | if (omap_obj->block) { | |
1000 | struct drm_gem_object *obj = &omap_obj->base; | |
1001 | uint32_t npages = obj->size >> PAGE_SHIFT; | |
1002 | WARN_ON(!omap_obj->pages); /* this can't happen */ | |
1003 | ret = tiler_pin(omap_obj->block, | |
1004 | omap_obj->pages, npages, | |
1005 | omap_obj->roll, true); | |
1006 | if (ret) { | |
1007 | dev_err(dev, "could not repin: %d\n", ret); | |
1008 | return ret; | |
1009 | } | |
1010 | } | |
1011 | } | |
1012 | ||
1013 | return 0; | |
1014 | } | |
1015 | #endif | |
1016 | ||
7ef93b0a LP |
1017 | /* ----------------------------------------------------------------------------- |
1018 | * DebugFS | |
1019 | */ | |
1020 | ||
f6b6036e RC |
1021 | #ifdef CONFIG_DEBUG_FS |
1022 | void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m) | |
1023 | { | |
f6b6036e | 1024 | struct omap_gem_object *omap_obj = to_omap_bo(obj); |
0de23977 | 1025 | uint64_t off; |
f6b6036e | 1026 | |
0de23977 | 1027 | off = drm_vma_node_start(&obj->vma_node); |
f6b6036e | 1028 | |
2d31ca3a | 1029 | seq_printf(m, "%08x: %2d (%2d) %08llx %pad (%2d) %p %4d", |
f6b6036e | 1030 | omap_obj->flags, obj->name, obj->refcount.refcount.counter, |
2d31ca3a | 1031 | off, &omap_obj->paddr, omap_obj->paddr_cnt, |
f6b6036e RC |
1032 | omap_obj->vaddr, omap_obj->roll); |
1033 | ||
1034 | if (omap_obj->flags & OMAP_BO_TILED) { | |
1035 | seq_printf(m, " %dx%d", omap_obj->width, omap_obj->height); | |
1036 | if (omap_obj->block) { | |
1037 | struct tcm_area *area = &omap_obj->block->area; | |
1038 | seq_printf(m, " (%dx%d, %dx%d)", | |
1039 | area->p0.x, area->p0.y, | |
1040 | area->p1.x, area->p1.y); | |
1041 | } | |
1042 | } else { | |
1043 | seq_printf(m, " %d", obj->size); | |
1044 | } | |
1045 | ||
1046 | seq_printf(m, "\n"); | |
1047 | } | |
1048 | ||
1049 | void omap_gem_describe_objects(struct list_head *list, struct seq_file *m) | |
1050 | { | |
1051 | struct omap_gem_object *omap_obj; | |
1052 | int count = 0; | |
1053 | size_t size = 0; | |
1054 | ||
1055 | list_for_each_entry(omap_obj, list, mm_list) { | |
1056 | struct drm_gem_object *obj = &omap_obj->base; | |
1057 | seq_printf(m, " "); | |
1058 | omap_gem_describe(obj, m); | |
1059 | count++; | |
1060 | size += obj->size; | |
1061 | } | |
1062 | ||
1063 | seq_printf(m, "Total %d objects, %zu bytes\n", count, size); | |
1064 | } | |
1065 | #endif | |
1066 | ||
7ef93b0a LP |
1067 | /* ----------------------------------------------------------------------------- |
1068 | * Buffer Synchronization | |
cd5351f4 RC |
1069 | */ |
1070 | ||
7ef93b0a LP |
1071 | static DEFINE_SPINLOCK(sync_lock); |
1072 | ||
cd5351f4 RC |
1073 | struct omap_gem_sync_waiter { |
1074 | struct list_head list; | |
1075 | struct omap_gem_object *omap_obj; | |
1076 | enum omap_gem_op op; | |
1077 | uint32_t read_target, write_target; | |
1078 | /* notify called w/ sync_lock held */ | |
1079 | void (*notify)(void *arg); | |
1080 | void *arg; | |
1081 | }; | |
1082 | ||
1083 | /* list of omap_gem_sync_waiter.. the notify fxn gets called back when | |
1084 | * the read and/or write target count is achieved which can call a user | |
1085 | * callback (ex. to kick 3d and/or 2d), wakeup blocked task (prep for | |
1086 | * cpu access), etc. | |
1087 | */ | |
1088 | static LIST_HEAD(waiters); | |
1089 | ||
1090 | static inline bool is_waiting(struct omap_gem_sync_waiter *waiter) | |
1091 | { | |
1092 | struct omap_gem_object *omap_obj = waiter->omap_obj; | |
1093 | if ((waiter->op & OMAP_GEM_READ) && | |
f2cff0f3 | 1094 | (omap_obj->sync->write_complete < waiter->write_target)) |
cd5351f4 RC |
1095 | return true; |
1096 | if ((waiter->op & OMAP_GEM_WRITE) && | |
f2cff0f3 | 1097 | (omap_obj->sync->read_complete < waiter->read_target)) |
cd5351f4 RC |
1098 | return true; |
1099 | return false; | |
1100 | } | |
1101 | ||
1102 | /* macro for sync debug.. */ | |
1103 | #define SYNCDBG 0 | |
1104 | #define SYNC(fmt, ...) do { if (SYNCDBG) \ | |
1105 | printk(KERN_ERR "%s:%d: "fmt"\n", \ | |
1106 | __func__, __LINE__, ##__VA_ARGS__); \ | |
1107 | } while (0) | |
1108 | ||
1109 | ||
1110 | static void sync_op_update(void) | |
1111 | { | |
1112 | struct omap_gem_sync_waiter *waiter, *n; | |
1113 | list_for_each_entry_safe(waiter, n, &waiters, list) { | |
1114 | if (!is_waiting(waiter)) { | |
1115 | list_del(&waiter->list); | |
1116 | SYNC("notify: %p", waiter); | |
1117 | waiter->notify(waiter->arg); | |
1118 | kfree(waiter); | |
1119 | } | |
1120 | } | |
1121 | } | |
1122 | ||
1123 | static inline int sync_op(struct drm_gem_object *obj, | |
1124 | enum omap_gem_op op, bool start) | |
1125 | { | |
1126 | struct omap_gem_object *omap_obj = to_omap_bo(obj); | |
1127 | int ret = 0; | |
1128 | ||
1129 | spin_lock(&sync_lock); | |
1130 | ||
1131 | if (!omap_obj->sync) { | |
1132 | omap_obj->sync = kzalloc(sizeof(*omap_obj->sync), GFP_ATOMIC); | |
1133 | if (!omap_obj->sync) { | |
1134 | ret = -ENOMEM; | |
1135 | goto unlock; | |
1136 | } | |
1137 | } | |
1138 | ||
1139 | if (start) { | |
1140 | if (op & OMAP_GEM_READ) | |
1141 | omap_obj->sync->read_pending++; | |
1142 | if (op & OMAP_GEM_WRITE) | |
1143 | omap_obj->sync->write_pending++; | |
1144 | } else { | |
1145 | if (op & OMAP_GEM_READ) | |
1146 | omap_obj->sync->read_complete++; | |
1147 | if (op & OMAP_GEM_WRITE) | |
1148 | omap_obj->sync->write_complete++; | |
1149 | sync_op_update(); | |
1150 | } | |
1151 | ||
1152 | unlock: | |
1153 | spin_unlock(&sync_lock); | |
1154 | ||
1155 | return ret; | |
1156 | } | |
1157 | ||
1158 | /* it is a bit lame to handle updates in this sort of polling way, but | |
1159 | * in case of PVR, the GPU can directly update read/write complete | |
1160 | * values, and not really tell us which ones it updated.. this also | |
1161 | * means that sync_lock is not quite sufficient. So we'll need to | |
1162 | * do something a bit better when it comes time to add support for | |
1163 | * separate 2d hw.. | |
1164 | */ | |
1165 | void omap_gem_op_update(void) | |
1166 | { | |
1167 | spin_lock(&sync_lock); | |
1168 | sync_op_update(); | |
1169 | spin_unlock(&sync_lock); | |
1170 | } | |
1171 | ||
1172 | /* mark the start of read and/or write operation */ | |
1173 | int omap_gem_op_start(struct drm_gem_object *obj, enum omap_gem_op op) | |
1174 | { | |
1175 | return sync_op(obj, op, true); | |
1176 | } | |
1177 | ||
1178 | int omap_gem_op_finish(struct drm_gem_object *obj, enum omap_gem_op op) | |
1179 | { | |
1180 | return sync_op(obj, op, false); | |
1181 | } | |
1182 | ||
1183 | static DECLARE_WAIT_QUEUE_HEAD(sync_event); | |
1184 | ||
1185 | static void sync_notify(void *arg) | |
1186 | { | |
1187 | struct task_struct **waiter_task = arg; | |
1188 | *waiter_task = NULL; | |
1189 | wake_up_all(&sync_event); | |
1190 | } | |
1191 | ||
1192 | int omap_gem_op_sync(struct drm_gem_object *obj, enum omap_gem_op op) | |
1193 | { | |
1194 | struct omap_gem_object *omap_obj = to_omap_bo(obj); | |
1195 | int ret = 0; | |
1196 | if (omap_obj->sync) { | |
1197 | struct task_struct *waiter_task = current; | |
1198 | struct omap_gem_sync_waiter *waiter = | |
1199 | kzalloc(sizeof(*waiter), GFP_KERNEL); | |
1200 | ||
ae053039 | 1201 | if (!waiter) |
cd5351f4 | 1202 | return -ENOMEM; |
cd5351f4 RC |
1203 | |
1204 | waiter->omap_obj = omap_obj; | |
1205 | waiter->op = op; | |
1206 | waiter->read_target = omap_obj->sync->read_pending; | |
1207 | waiter->write_target = omap_obj->sync->write_pending; | |
1208 | waiter->notify = sync_notify; | |
1209 | waiter->arg = &waiter_task; | |
1210 | ||
1211 | spin_lock(&sync_lock); | |
1212 | if (is_waiting(waiter)) { | |
1213 | SYNC("waited: %p", waiter); | |
1214 | list_add_tail(&waiter->list, &waiters); | |
1215 | spin_unlock(&sync_lock); | |
1216 | ret = wait_event_interruptible(sync_event, | |
1217 | (waiter_task == NULL)); | |
1218 | spin_lock(&sync_lock); | |
1219 | if (waiter_task) { | |
1220 | SYNC("interrupted: %p", waiter); | |
1221 | /* we were interrupted */ | |
1222 | list_del(&waiter->list); | |
1223 | waiter_task = NULL; | |
1224 | } else { | |
1225 | /* freed in sync_op_update() */ | |
1226 | waiter = NULL; | |
1227 | } | |
1228 | } | |
1229 | spin_unlock(&sync_lock); | |
d2c87e2d | 1230 | kfree(waiter); |
cd5351f4 RC |
1231 | } |
1232 | return ret; | |
1233 | } | |
1234 | ||
1235 | /* call fxn(arg), either synchronously or asynchronously if the op | |
1236 | * is currently blocked.. fxn() can be called from any context | |
1237 | * | |
1238 | * (TODO for now fxn is called back from whichever context calls | |
1239 | * omap_gem_op_update().. but this could be better defined later | |
1240 | * if needed) | |
1241 | * | |
1242 | * TODO more code in common w/ _sync().. | |
1243 | */ | |
1244 | int omap_gem_op_async(struct drm_gem_object *obj, enum omap_gem_op op, | |
1245 | void (*fxn)(void *arg), void *arg) | |
1246 | { | |
1247 | struct omap_gem_object *omap_obj = to_omap_bo(obj); | |
1248 | if (omap_obj->sync) { | |
1249 | struct omap_gem_sync_waiter *waiter = | |
1250 | kzalloc(sizeof(*waiter), GFP_ATOMIC); | |
1251 | ||
ae053039 | 1252 | if (!waiter) |
cd5351f4 | 1253 | return -ENOMEM; |
cd5351f4 RC |
1254 | |
1255 | waiter->omap_obj = omap_obj; | |
1256 | waiter->op = op; | |
1257 | waiter->read_target = omap_obj->sync->read_pending; | |
1258 | waiter->write_target = omap_obj->sync->write_pending; | |
1259 | waiter->notify = fxn; | |
1260 | waiter->arg = arg; | |
1261 | ||
1262 | spin_lock(&sync_lock); | |
1263 | if (is_waiting(waiter)) { | |
1264 | SYNC("waited: %p", waiter); | |
1265 | list_add_tail(&waiter->list, &waiters); | |
1266 | spin_unlock(&sync_lock); | |
1267 | return 0; | |
1268 | } | |
1269 | ||
1270 | spin_unlock(&sync_lock); | |
15ec2ca9 SP |
1271 | |
1272 | kfree(waiter); | |
cd5351f4 RC |
1273 | } |
1274 | ||
1275 | /* no waiting.. */ | |
1276 | fxn(arg); | |
1277 | ||
1278 | return 0; | |
1279 | } | |
1280 | ||
1281 | /* special API so PVR can update the buffer to use a sync-object allocated | |
1282 | * from it's sync-obj heap. Only used for a newly allocated (from PVR's | |
1283 | * perspective) sync-object, so we overwrite the new syncobj w/ values | |
1284 | * from the already allocated syncobj (if there is one) | |
1285 | */ | |
1286 | int omap_gem_set_sync_object(struct drm_gem_object *obj, void *syncobj) | |
1287 | { | |
1288 | struct omap_gem_object *omap_obj = to_omap_bo(obj); | |
1289 | int ret = 0; | |
1290 | ||
1291 | spin_lock(&sync_lock); | |
1292 | ||
1293 | if ((omap_obj->flags & OMAP_BO_EXT_SYNC) && !syncobj) { | |
1294 | /* clearing a previously set syncobj */ | |
e6200964 PH |
1295 | syncobj = kmemdup(omap_obj->sync, sizeof(*omap_obj->sync), |
1296 | GFP_ATOMIC); | |
cd5351f4 RC |
1297 | if (!syncobj) { |
1298 | ret = -ENOMEM; | |
1299 | goto unlock; | |
1300 | } | |
cd5351f4 RC |
1301 | omap_obj->flags &= ~OMAP_BO_EXT_SYNC; |
1302 | omap_obj->sync = syncobj; | |
1303 | } else if (syncobj && !(omap_obj->flags & OMAP_BO_EXT_SYNC)) { | |
1304 | /* replacing an existing syncobj */ | |
1305 | if (omap_obj->sync) { | |
1306 | memcpy(syncobj, omap_obj->sync, sizeof(*omap_obj->sync)); | |
1307 | kfree(omap_obj->sync); | |
1308 | } | |
1309 | omap_obj->flags |= OMAP_BO_EXT_SYNC; | |
1310 | omap_obj->sync = syncobj; | |
1311 | } | |
1312 | ||
1313 | unlock: | |
1314 | spin_unlock(&sync_lock); | |
1315 | return ret; | |
1316 | } | |
1317 | ||
7ef93b0a LP |
1318 | /* ----------------------------------------------------------------------------- |
1319 | * Constructor & Destructor | |
1320 | */ | |
1321 | ||
cd5351f4 RC |
1322 | /* don't call directly.. called from GEM core when it is time to actually |
1323 | * free the object.. | |
1324 | */ | |
1325 | void omap_gem_free_object(struct drm_gem_object *obj) | |
1326 | { | |
1327 | struct drm_device *dev = obj->dev; | |
76c4055f | 1328 | struct omap_drm_private *priv = dev->dev_private; |
cd5351f4 RC |
1329 | struct omap_gem_object *omap_obj = to_omap_bo(obj); |
1330 | ||
f7f9f453 RC |
1331 | evict(obj); |
1332 | ||
f6b6036e RC |
1333 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); |
1334 | ||
76c4055f | 1335 | spin_lock(&priv->list_lock); |
f6b6036e | 1336 | list_del(&omap_obj->mm_list); |
76c4055f | 1337 | spin_unlock(&priv->list_lock); |
f6b6036e | 1338 | |
9a0774e0 RC |
1339 | /* this means the object is still pinned.. which really should |
1340 | * not happen. I think.. | |
1341 | */ | |
1342 | WARN_ON(omap_obj->paddr_cnt > 0); | |
1343 | ||
cd5351f4 | 1344 | /* don't free externally allocated backing memory */ |
cdb0381d | 1345 | if (!(omap_obj->flags & OMAP_BO_MEM_EXT)) { |
ae053039 | 1346 | if (omap_obj->pages) |
cd5351f4 | 1347 | omap_gem_detach_pages(obj); |
ae053039 | 1348 | |
cdb0381d | 1349 | if (omap_obj->flags & OMAP_BO_MEM_DMA_API) { |
cd5351f4 RC |
1350 | dma_free_writecombine(dev->dev, obj->size, |
1351 | omap_obj->vaddr, omap_obj->paddr); | |
f7f9f453 RC |
1352 | } else if (omap_obj->vaddr) { |
1353 | vunmap(omap_obj->vaddr); | |
cd5351f4 RC |
1354 | } |
1355 | } | |
1356 | ||
1357 | /* don't free externally allocated syncobj */ | |
ae053039 | 1358 | if (!(omap_obj->flags & OMAP_BO_EXT_SYNC)) |
cd5351f4 | 1359 | kfree(omap_obj->sync); |
cd5351f4 RC |
1360 | |
1361 | drm_gem_object_release(obj); | |
1362 | ||
00e9c7c7 | 1363 | kfree(omap_obj); |
cd5351f4 RC |
1364 | } |
1365 | ||
1366 | /* GEM buffer object constructor */ | |
1367 | struct drm_gem_object *omap_gem_new(struct drm_device *dev, | |
1368 | union omap_gem_size gsize, uint32_t flags) | |
1369 | { | |
a6a91827 | 1370 | struct omap_drm_private *priv = dev->dev_private; |
cd5351f4 | 1371 | struct omap_gem_object *omap_obj; |
92b4b445 | 1372 | struct drm_gem_object *obj; |
ab5a60c3 | 1373 | struct address_space *mapping; |
cd5351f4 RC |
1374 | size_t size; |
1375 | int ret; | |
1376 | ||
1377 | if (flags & OMAP_BO_TILED) { | |
f4302747 | 1378 | if (!priv->usergart) { |
f7f9f453 | 1379 | dev_err(dev->dev, "Tiled buffers require DMM\n"); |
92b4b445 | 1380 | return NULL; |
f7f9f453 RC |
1381 | } |
1382 | ||
1383 | /* tiled buffers are always shmem paged backed.. when they are | |
1384 | * scanned out, they are remapped into DMM/TILER | |
1385 | */ | |
1386 | flags &= ~OMAP_BO_SCANOUT; | |
1387 | ||
1388 | /* currently don't allow cached buffers.. there is some caching | |
1389 | * stuff that needs to be handled better | |
1390 | */ | |
7cb0d6c1 TV |
1391 | flags &= ~(OMAP_BO_CACHED|OMAP_BO_WC|OMAP_BO_UNCACHED); |
1392 | flags |= tiler_get_cpu_cache_flags(); | |
cd5351f4 | 1393 | |
f7f9f453 RC |
1394 | /* align dimensions to slot boundaries... */ |
1395 | tiler_align(gem2fmt(flags), | |
1396 | &gsize.tiled.width, &gsize.tiled.height); | |
1397 | ||
1398 | /* ...and calculate size based on aligned dimensions */ | |
1399 | size = tiler_size(gem2fmt(flags), | |
1400 | gsize.tiled.width, gsize.tiled.height); | |
1401 | } else { | |
1402 | size = PAGE_ALIGN(gsize.bytes); | |
1403 | } | |
cd5351f4 RC |
1404 | |
1405 | omap_obj = kzalloc(sizeof(*omap_obj), GFP_KERNEL); | |
78110bb8 | 1406 | if (!omap_obj) |
a903e3b6 | 1407 | return NULL; |
f6b6036e | 1408 | |
cd5351f4 RC |
1409 | obj = &omap_obj->base; |
1410 | ||
a6a91827 RC |
1411 | if ((flags & OMAP_BO_SCANOUT) && !priv->has_dmm) { |
1412 | /* attempt to allocate contiguous memory if we don't | |
1413 | * have DMM for remappign discontiguous buffers | |
1414 | */ | |
cd5351f4 RC |
1415 | omap_obj->vaddr = dma_alloc_writecombine(dev->dev, size, |
1416 | &omap_obj->paddr, GFP_KERNEL); | |
a903e3b6 TV |
1417 | if (!omap_obj->vaddr) { |
1418 | kfree(omap_obj); | |
ae053039 | 1419 | |
a903e3b6 TV |
1420 | return NULL; |
1421 | } | |
1422 | ||
cdb0381d | 1423 | flags |= OMAP_BO_MEM_DMA_API; |
cd5351f4 RC |
1424 | } |
1425 | ||
a903e3b6 TV |
1426 | spin_lock(&priv->list_lock); |
1427 | list_add(&omap_obj->mm_list, &priv->obj_list); | |
1428 | spin_unlock(&priv->list_lock); | |
1429 | ||
cd5351f4 RC |
1430 | omap_obj->flags = flags; |
1431 | ||
f7f9f453 RC |
1432 | if (flags & OMAP_BO_TILED) { |
1433 | omap_obj->width = gsize.tiled.width; | |
1434 | omap_obj->height = gsize.tiled.height; | |
1435 | } | |
1436 | ||
cdb0381d | 1437 | if (flags & (OMAP_BO_MEM_DMA_API | OMAP_BO_MEM_EXT)) { |
89c8233f | 1438 | drm_gem_private_object_init(dev, obj, size); |
ab5a60c3 | 1439 | } else { |
cd5351f4 | 1440 | ret = drm_gem_object_init(dev, obj, size); |
ab5a60c3 DH |
1441 | if (ret) |
1442 | goto fail; | |
cd5351f4 | 1443 | |
ab5a60c3 DH |
1444 | mapping = file_inode(obj->filp)->i_mapping; |
1445 | mapping_set_gfp_mask(mapping, GFP_USER | __GFP_DMA32); | |
cdb0381d LP |
1446 | |
1447 | omap_obj->flags |= OMAP_BO_MEM_SHMEM; | |
ab5a60c3 | 1448 | } |
cd5351f4 RC |
1449 | |
1450 | return obj; | |
1451 | ||
1452 | fail: | |
92b4b445 | 1453 | omap_gem_free_object(obj); |
cd5351f4 RC |
1454 | return NULL; |
1455 | } | |
f7f9f453 | 1456 | |
7ef93b0a LP |
1457 | /* convenience method to construct a GEM buffer object, and userspace handle */ |
1458 | int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file, | |
1459 | union omap_gem_size gsize, uint32_t flags, uint32_t *handle) | |
1460 | { | |
1461 | struct drm_gem_object *obj; | |
1462 | int ret; | |
1463 | ||
1464 | obj = omap_gem_new(dev, gsize, flags); | |
1465 | if (!obj) | |
1466 | return -ENOMEM; | |
1467 | ||
1468 | ret = drm_gem_handle_create(file, obj, handle); | |
1469 | if (ret) { | |
cd5351f4 | 1470 | omap_gem_free_object(obj); |
7ef93b0a LP |
1471 | return ret; |
1472 | } | |
ae053039 | 1473 | |
7ef93b0a LP |
1474 | /* drop reference from allocate - handle holds it now */ |
1475 | drm_gem_object_unreference_unlocked(obj); | |
1476 | ||
1477 | return 0; | |
cd5351f4 | 1478 | } |
f7f9f453 | 1479 | |
7ef93b0a LP |
1480 | /* ----------------------------------------------------------------------------- |
1481 | * Init & Cleanup | |
1482 | */ | |
1483 | ||
1484 | /* If DMM is used, we need to set some stuff up.. */ | |
f7f9f453 RC |
1485 | void omap_gem_init(struct drm_device *dev) |
1486 | { | |
a6a91827 | 1487 | struct omap_drm_private *priv = dev->dev_private; |
f4302747 | 1488 | struct omap_drm_usergart *usergart; |
f7f9f453 RC |
1489 | const enum tiler_fmt fmts[] = { |
1490 | TILFMT_8BIT, TILFMT_16BIT, TILFMT_32BIT | |
1491 | }; | |
5c137797 | 1492 | int i, j; |
f7f9f453 | 1493 | |
e5e4e9b7 | 1494 | if (!dmm_is_available()) { |
f7f9f453 | 1495 | /* DMM only supported on OMAP4 and later, so this isn't fatal */ |
5c137797 | 1496 | dev_warn(dev->dev, "DMM not available, disable DMM support\n"); |
f7f9f453 RC |
1497 | return; |
1498 | } | |
1499 | ||
78110bb8 JP |
1500 | usergart = kcalloc(3, sizeof(*usergart), GFP_KERNEL); |
1501 | if (!usergart) | |
b369839b | 1502 | return; |
f7f9f453 RC |
1503 | |
1504 | /* reserve 4k aligned/wide regions for userspace mappings: */ | |
1505 | for (i = 0; i < ARRAY_SIZE(fmts); i++) { | |
1506 | uint16_t h = 1, w = PAGE_SIZE >> i; | |
1507 | tiler_align(fmts[i], &w, &h); | |
1508 | /* note: since each region is 1 4kb page wide, and minimum | |
1509 | * number of rows, the height ends up being the same as the | |
1510 | * # of pages in the region | |
1511 | */ | |
1512 | usergart[i].height = h; | |
1513 | usergart[i].height_shift = ilog2(h); | |
3c810c61 | 1514 | usergart[i].stride_pfn = tiler_stride(fmts[i], 0) >> PAGE_SHIFT; |
f7f9f453 RC |
1515 | usergart[i].slot_shift = ilog2((PAGE_SIZE / h) >> i); |
1516 | for (j = 0; j < NUM_USERGART_ENTRIES; j++) { | |
f4302747 LP |
1517 | struct omap_drm_usergart_entry *entry; |
1518 | struct tiler_block *block; | |
1519 | ||
1520 | entry = &usergart[i].entry[j]; | |
1521 | block = tiler_reserve_2d(fmts[i], w, h, PAGE_SIZE); | |
f7f9f453 RC |
1522 | if (IS_ERR(block)) { |
1523 | dev_err(dev->dev, | |
1524 | "reserve failed: %d, %d, %ld\n", | |
1525 | i, j, PTR_ERR(block)); | |
1526 | return; | |
1527 | } | |
1528 | entry->paddr = tiler_ssptr(block); | |
1529 | entry->block = block; | |
1530 | ||
2d31ca3a RK |
1531 | DBG("%d:%d: %dx%d: paddr=%pad stride=%d", i, j, w, h, |
1532 | &entry->paddr, | |
f7f9f453 RC |
1533 | usergart[i].stride_pfn << PAGE_SHIFT); |
1534 | } | |
1535 | } | |
a6a91827 | 1536 | |
f4302747 | 1537 | priv->usergart = usergart; |
a6a91827 | 1538 | priv->has_dmm = true; |
f7f9f453 RC |
1539 | } |
1540 | ||
1541 | void omap_gem_deinit(struct drm_device *dev) | |
1542 | { | |
f4302747 LP |
1543 | struct omap_drm_private *priv = dev->dev_private; |
1544 | ||
f7f9f453 RC |
1545 | /* I believe we can rely on there being no more outstanding GEM |
1546 | * objects which could depend on usergart/dmm at this point. | |
1547 | */ | |
f4302747 | 1548 | kfree(priv->usergart); |
f7f9f453 | 1549 | } |