]>
Commit | Line | Data |
---|---|---|
c8afe684 RC |
1 | /* |
2 | * Copyright (C) 2013 Red Hat | |
3 | * Author: Rob Clark <robdclark@gmail.com> | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify it | |
6 | * under the terms of the GNU General Public License version 2 as published by | |
7 | * the Free Software Foundation. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, but WITHOUT | |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
12 | * more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License along with | |
15 | * this program. If not, see <http://www.gnu.org/licenses/>. | |
16 | */ | |
17 | ||
18 | #include <linux/spinlock.h> | |
19 | #include <linux/shmem_fs.h> | |
05b84911 | 20 | #include <linux/dma-buf.h> |
c8afe684 RC |
21 | |
22 | #include "msm_drv.h" | |
23 | #include "msm_gem.h" | |
7198e6b0 | 24 | #include "msm_gpu.h" |
871d812a | 25 | #include "msm_mmu.h" |
c8afe684 | 26 | |
871d812a RC |
27 | static dma_addr_t physaddr(struct drm_gem_object *obj) |
28 | { | |
29 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | |
30 | struct msm_drm_private *priv = obj->dev->dev_private; | |
31 | return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) + | |
32 | priv->vram.paddr; | |
33 | } | |
34 | ||
072f1f91 RC |
35 | static bool use_pages(struct drm_gem_object *obj) |
36 | { | |
37 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | |
38 | return !msm_obj->vram_node; | |
39 | } | |
40 | ||
871d812a RC |
41 | /* allocate pages from VRAM carveout, used when no IOMMU: */ |
42 | static struct page **get_pages_vram(struct drm_gem_object *obj, | |
43 | int npages) | |
44 | { | |
45 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | |
46 | struct msm_drm_private *priv = obj->dev->dev_private; | |
47 | dma_addr_t paddr; | |
48 | struct page **p; | |
49 | int ret, i; | |
50 | ||
51 | p = drm_malloc_ab(npages, sizeof(struct page *)); | |
52 | if (!p) | |
53 | return ERR_PTR(-ENOMEM); | |
54 | ||
55 | ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, | |
56 | npages, 0, DRM_MM_SEARCH_DEFAULT); | |
57 | if (ret) { | |
58 | drm_free_large(p); | |
59 | return ERR_PTR(ret); | |
60 | } | |
61 | ||
62 | paddr = physaddr(obj); | |
63 | for (i = 0; i < npages; i++) { | |
64 | p[i] = phys_to_page(paddr); | |
65 | paddr += PAGE_SIZE; | |
66 | } | |
67 | ||
68 | return p; | |
69 | } | |
c8afe684 RC |
70 | |
71 | /* called with dev->struct_mutex held */ | |
72 | static struct page **get_pages(struct drm_gem_object *obj) | |
73 | { | |
74 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | |
75 | ||
76 | if (!msm_obj->pages) { | |
77 | struct drm_device *dev = obj->dev; | |
871d812a | 78 | struct page **p; |
c8afe684 RC |
79 | int npages = obj->size >> PAGE_SHIFT; |
80 | ||
072f1f91 | 81 | if (use_pages(obj)) |
0cdbe8ac | 82 | p = drm_gem_get_pages(obj); |
871d812a RC |
83 | else |
84 | p = get_pages_vram(obj, npages); | |
85 | ||
c8afe684 RC |
86 | if (IS_ERR(p)) { |
87 | dev_err(dev->dev, "could not get pages: %ld\n", | |
88 | PTR_ERR(p)); | |
89 | return p; | |
90 | } | |
91 | ||
92 | msm_obj->sgt = drm_prime_pages_to_sg(p, npages); | |
1f70e079 | 93 | if (IS_ERR(msm_obj->sgt)) { |
c8afe684 | 94 | dev_err(dev->dev, "failed to allocate sgt\n"); |
1f70e079 | 95 | return ERR_CAST(msm_obj->sgt); |
c8afe684 RC |
96 | } |
97 | ||
98 | msm_obj->pages = p; | |
99 | ||
100 | /* For non-cached buffers, ensure the new pages are clean | |
101 | * because display controller, GPU, etc. are not coherent: | |
102 | */ | |
103 | if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) | |
104 | dma_map_sg(dev->dev, msm_obj->sgt->sgl, | |
105 | msm_obj->sgt->nents, DMA_BIDIRECTIONAL); | |
106 | } | |
107 | ||
108 | return msm_obj->pages; | |
109 | } | |
110 | ||
111 | static void put_pages(struct drm_gem_object *obj) | |
112 | { | |
113 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | |
114 | ||
115 | if (msm_obj->pages) { | |
116 | /* For non-cached buffers, ensure the new pages are clean | |
117 | * because display controller, GPU, etc. are not coherent: | |
118 | */ | |
119 | if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) | |
120 | dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl, | |
121 | msm_obj->sgt->nents, DMA_BIDIRECTIONAL); | |
122 | sg_free_table(msm_obj->sgt); | |
123 | kfree(msm_obj->sgt); | |
124 | ||
072f1f91 | 125 | if (use_pages(obj)) |
871d812a | 126 | drm_gem_put_pages(obj, msm_obj->pages, true, false); |
1ffa2425 | 127 | else { |
871d812a | 128 | drm_mm_remove_node(msm_obj->vram_node); |
1ffa2425 MR |
129 | drm_free_large(msm_obj->pages); |
130 | } | |
871d812a | 131 | |
c8afe684 RC |
132 | msm_obj->pages = NULL; |
133 | } | |
134 | } | |
135 | ||
05b84911 RC |
136 | struct page **msm_gem_get_pages(struct drm_gem_object *obj) |
137 | { | |
138 | struct drm_device *dev = obj->dev; | |
139 | struct page **p; | |
140 | mutex_lock(&dev->struct_mutex); | |
141 | p = get_pages(obj); | |
142 | mutex_unlock(&dev->struct_mutex); | |
143 | return p; | |
144 | } | |
145 | ||
146 | void msm_gem_put_pages(struct drm_gem_object *obj) | |
147 | { | |
148 | /* when we start tracking the pin count, then do something here */ | |
149 | } | |
150 | ||
c8afe684 RC |
151 | int msm_gem_mmap_obj(struct drm_gem_object *obj, |
152 | struct vm_area_struct *vma) | |
153 | { | |
154 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | |
155 | ||
156 | vma->vm_flags &= ~VM_PFNMAP; | |
157 | vma->vm_flags |= VM_MIXEDMAP; | |
158 | ||
159 | if (msm_obj->flags & MSM_BO_WC) { | |
160 | vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); | |
161 | } else if (msm_obj->flags & MSM_BO_UNCACHED) { | |
162 | vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags)); | |
163 | } else { | |
164 | /* | |
165 | * Shunt off cached objs to shmem file so they have their own | |
166 | * address_space (so unmap_mapping_range does what we want, | |
167 | * in particular in the case of mmap'd dmabufs) | |
168 | */ | |
169 | fput(vma->vm_file); | |
170 | get_file(obj->filp); | |
171 | vma->vm_pgoff = 0; | |
172 | vma->vm_file = obj->filp; | |
173 | ||
174 | vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); | |
175 | } | |
176 | ||
177 | return 0; | |
178 | } | |
179 | ||
180 | int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma) | |
181 | { | |
182 | int ret; | |
183 | ||
184 | ret = drm_gem_mmap(filp, vma); | |
185 | if (ret) { | |
186 | DBG("mmap failed: %d", ret); | |
187 | return ret; | |
188 | } | |
189 | ||
190 | return msm_gem_mmap_obj(vma->vm_private_data, vma); | |
191 | } | |
192 | ||
193 | int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |
194 | { | |
195 | struct drm_gem_object *obj = vma->vm_private_data; | |
c8afe684 RC |
196 | struct drm_device *dev = obj->dev; |
197 | struct page **pages; | |
198 | unsigned long pfn; | |
199 | pgoff_t pgoff; | |
200 | int ret; | |
201 | ||
202 | /* Make sure we don't parallel update on a fault, nor move or remove | |
203 | * something from beneath our feet | |
204 | */ | |
205 | ret = mutex_lock_interruptible(&dev->struct_mutex); | |
206 | if (ret) | |
207 | goto out; | |
208 | ||
209 | /* make sure we have pages attached now */ | |
210 | pages = get_pages(obj); | |
211 | if (IS_ERR(pages)) { | |
212 | ret = PTR_ERR(pages); | |
213 | goto out_unlock; | |
214 | } | |
215 | ||
216 | /* We don't use vmf->pgoff since that has the fake offset: */ | |
217 | pgoff = ((unsigned long)vmf->virtual_address - | |
218 | vma->vm_start) >> PAGE_SHIFT; | |
219 | ||
871d812a | 220 | pfn = page_to_pfn(pages[pgoff]); |
c8afe684 RC |
221 | |
222 | VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address, | |
223 | pfn, pfn << PAGE_SHIFT); | |
224 | ||
225 | ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn); | |
226 | ||
227 | out_unlock: | |
228 | mutex_unlock(&dev->struct_mutex); | |
229 | out: | |
230 | switch (ret) { | |
231 | case -EAGAIN: | |
c8afe684 RC |
232 | case 0: |
233 | case -ERESTARTSYS: | |
234 | case -EINTR: | |
505886d5 RC |
235 | case -EBUSY: |
236 | /* | |
237 | * EBUSY is ok: this just means that another thread | |
238 | * already did the job. | |
239 | */ | |
c8afe684 RC |
240 | return VM_FAULT_NOPAGE; |
241 | case -ENOMEM: | |
242 | return VM_FAULT_OOM; | |
243 | default: | |
244 | return VM_FAULT_SIGBUS; | |
245 | } | |
246 | } | |
247 | ||
248 | /** get mmap offset */ | |
249 | static uint64_t mmap_offset(struct drm_gem_object *obj) | |
250 | { | |
251 | struct drm_device *dev = obj->dev; | |
252 | int ret; | |
253 | ||
254 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); | |
255 | ||
256 | /* Make it mmapable */ | |
257 | ret = drm_gem_create_mmap_offset(obj); | |
258 | ||
259 | if (ret) { | |
260 | dev_err(dev->dev, "could not allocate mmap offset\n"); | |
261 | return 0; | |
262 | } | |
263 | ||
264 | return drm_vma_node_offset_addr(&obj->vma_node); | |
265 | } | |
266 | ||
267 | uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj) | |
268 | { | |
269 | uint64_t offset; | |
270 | mutex_lock(&obj->dev->struct_mutex); | |
271 | offset = mmap_offset(obj); | |
272 | mutex_unlock(&obj->dev->struct_mutex); | |
273 | return offset; | |
274 | } | |
275 | ||
c8afe684 RC |
276 | /* should be called under struct_mutex.. although it can be called |
277 | * from atomic context without struct_mutex to acquire an extra | |
278 | * iova ref if you know one is already held. | |
279 | * | |
280 | * That means when I do eventually need to add support for unpinning | |
281 | * the refcnt counter needs to be atomic_t. | |
282 | */ | |
283 | int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id, | |
284 | uint32_t *iova) | |
285 | { | |
286 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | |
287 | int ret = 0; | |
288 | ||
289 | if (!msm_obj->domain[id].iova) { | |
290 | struct msm_drm_private *priv = obj->dev->dev_private; | |
871d812a RC |
291 | struct page **pages = get_pages(obj); |
292 | ||
c8afe684 RC |
293 | if (IS_ERR(pages)) |
294 | return PTR_ERR(pages); | |
871d812a RC |
295 | |
296 | if (iommu_present(&platform_bus_type)) { | |
1c4997fe RC |
297 | struct msm_mmu *mmu = priv->mmus[id]; |
298 | uint32_t offset; | |
299 | ||
300 | if (WARN_ON(!mmu)) | |
301 | return -EINVAL; | |
302 | ||
303 | offset = (uint32_t)mmap_offset(obj); | |
871d812a RC |
304 | ret = mmu->funcs->map(mmu, offset, msm_obj->sgt, |
305 | obj->size, IOMMU_READ | IOMMU_WRITE); | |
306 | msm_obj->domain[id].iova = offset; | |
307 | } else { | |
308 | msm_obj->domain[id].iova = physaddr(obj); | |
309 | } | |
c8afe684 RC |
310 | } |
311 | ||
312 | if (!ret) | |
313 | *iova = msm_obj->domain[id].iova; | |
314 | ||
315 | return ret; | |
316 | } | |
317 | ||
2638d90a | 318 | /* get iova, taking a reference. Should have a matching put */ |
c8afe684 RC |
319 | int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova) |
320 | { | |
edd4fc63 | 321 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
c8afe684 | 322 | int ret; |
edd4fc63 RC |
323 | |
324 | /* this is safe right now because we don't unmap until the | |
325 | * bo is deleted: | |
326 | */ | |
327 | if (msm_obj->domain[id].iova) { | |
328 | *iova = msm_obj->domain[id].iova; | |
329 | return 0; | |
330 | } | |
331 | ||
c8afe684 RC |
332 | mutex_lock(&obj->dev->struct_mutex); |
333 | ret = msm_gem_get_iova_locked(obj, id, iova); | |
334 | mutex_unlock(&obj->dev->struct_mutex); | |
335 | return ret; | |
336 | } | |
337 | ||
2638d90a RC |
338 | /* get iova without taking a reference, used in places where you have |
339 | * already done a 'msm_gem_get_iova()'. | |
340 | */ | |
341 | uint32_t msm_gem_iova(struct drm_gem_object *obj, int id) | |
342 | { | |
343 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | |
344 | WARN_ON(!msm_obj->domain[id].iova); | |
345 | return msm_obj->domain[id].iova; | |
346 | } | |
347 | ||
c8afe684 RC |
348 | void msm_gem_put_iova(struct drm_gem_object *obj, int id) |
349 | { | |
350 | // XXX TODO .. | |
351 | // NOTE: probably don't need a _locked() version.. we wouldn't | |
352 | // normally unmap here, but instead just mark that it could be | |
353 | // unmapped (if the iova refcnt drops to zero), but then later | |
354 | // if another _get_iova_locked() fails we can start unmapping | |
355 | // things that are no longer needed.. | |
356 | } | |
357 | ||
358 | int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev, | |
359 | struct drm_mode_create_dumb *args) | |
360 | { | |
361 | args->pitch = align_pitch(args->width, args->bpp); | |
362 | args->size = PAGE_ALIGN(args->pitch * args->height); | |
363 | return msm_gem_new_handle(dev, file, args->size, | |
364 | MSM_BO_SCANOUT | MSM_BO_WC, &args->handle); | |
365 | } | |
366 | ||
c8afe684 RC |
367 | int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, |
368 | uint32_t handle, uint64_t *offset) | |
369 | { | |
370 | struct drm_gem_object *obj; | |
371 | int ret = 0; | |
372 | ||
373 | /* GEM does all our handle to object mapping */ | |
374 | obj = drm_gem_object_lookup(dev, file, handle); | |
375 | if (obj == NULL) { | |
376 | ret = -ENOENT; | |
377 | goto fail; | |
378 | } | |
379 | ||
380 | *offset = msm_gem_mmap_offset(obj); | |
381 | ||
382 | drm_gem_object_unreference_unlocked(obj); | |
383 | ||
384 | fail: | |
385 | return ret; | |
386 | } | |
387 | ||
388 | void *msm_gem_vaddr_locked(struct drm_gem_object *obj) | |
389 | { | |
390 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | |
391 | WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); | |
392 | if (!msm_obj->vaddr) { | |
393 | struct page **pages = get_pages(obj); | |
394 | if (IS_ERR(pages)) | |
395 | return ERR_CAST(pages); | |
396 | msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT, | |
397 | VM_MAP, pgprot_writecombine(PAGE_KERNEL)); | |
398 | } | |
399 | return msm_obj->vaddr; | |
400 | } | |
401 | ||
402 | void *msm_gem_vaddr(struct drm_gem_object *obj) | |
403 | { | |
404 | void *ret; | |
405 | mutex_lock(&obj->dev->struct_mutex); | |
406 | ret = msm_gem_vaddr_locked(obj); | |
407 | mutex_unlock(&obj->dev->struct_mutex); | |
408 | return ret; | |
409 | } | |
410 | ||
edd4fc63 RC |
411 | /* setup callback for when bo is no longer busy.. |
412 | * TODO probably want to differentiate read vs write.. | |
413 | */ | |
414 | int msm_gem_queue_inactive_cb(struct drm_gem_object *obj, | |
415 | struct msm_fence_cb *cb) | |
c8afe684 | 416 | { |
7198e6b0 | 417 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
69193e50 RC |
418 | uint32_t fence = msm_gem_fence(msm_obj, |
419 | MSM_PREP_READ | MSM_PREP_WRITE); | |
420 | return msm_queue_fence_cb(obj->dev, cb, fence); | |
7198e6b0 RC |
421 | } |
422 | ||
423 | void msm_gem_move_to_active(struct drm_gem_object *obj, | |
bf6811f3 | 424 | struct msm_gpu *gpu, bool write, uint32_t fence) |
7198e6b0 RC |
425 | { |
426 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | |
427 | msm_obj->gpu = gpu; | |
bf6811f3 RC |
428 | if (write) |
429 | msm_obj->write_fence = fence; | |
430 | else | |
431 | msm_obj->read_fence = fence; | |
7198e6b0 RC |
432 | list_del_init(&msm_obj->mm_list); |
433 | list_add_tail(&msm_obj->mm_list, &gpu->active_list); | |
434 | } | |
435 | ||
436 | void msm_gem_move_to_inactive(struct drm_gem_object *obj) | |
437 | { | |
438 | struct drm_device *dev = obj->dev; | |
439 | struct msm_drm_private *priv = dev->dev_private; | |
440 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | |
441 | ||
442 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); | |
443 | ||
444 | msm_obj->gpu = NULL; | |
bf6811f3 RC |
445 | msm_obj->read_fence = 0; |
446 | msm_obj->write_fence = 0; | |
7198e6b0 RC |
447 | list_del_init(&msm_obj->mm_list); |
448 | list_add_tail(&msm_obj->mm_list, &priv->inactive_list); | |
7198e6b0 RC |
449 | } |
450 | ||
451 | int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, | |
452 | struct timespec *timeout) | |
453 | { | |
454 | struct drm_device *dev = obj->dev; | |
455 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | |
456 | int ret = 0; | |
457 | ||
f816f272 | 458 | if (is_active(msm_obj)) { |
69193e50 | 459 | uint32_t fence = msm_gem_fence(msm_obj, op); |
f816f272 | 460 | |
f816f272 RC |
461 | if (op & MSM_PREP_NOSYNC) |
462 | timeout = NULL; | |
463 | ||
bf6811f3 RC |
464 | ret = msm_wait_fence_interruptable(dev, fence, timeout); |
465 | } | |
7198e6b0 RC |
466 | |
467 | /* TODO cache maintenance */ | |
c8afe684 | 468 | |
7198e6b0 RC |
469 | return ret; |
470 | } | |
c8afe684 | 471 | |
7198e6b0 RC |
472 | int msm_gem_cpu_fini(struct drm_gem_object *obj) |
473 | { | |
474 | /* TODO cache maintenance */ | |
c8afe684 RC |
475 | return 0; |
476 | } | |
477 | ||
478 | #ifdef CONFIG_DEBUG_FS | |
479 | void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m) | |
480 | { | |
481 | struct drm_device *dev = obj->dev; | |
482 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | |
483 | uint64_t off = drm_vma_node_start(&obj->vma_node); | |
484 | ||
485 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); | |
bf6811f3 | 486 | seq_printf(m, "%08x: %c(r=%u,w=%u) %2d (%2d) %08llx %p %d\n", |
7198e6b0 | 487 | msm_obj->flags, is_active(msm_obj) ? 'A' : 'I', |
bf6811f3 RC |
488 | msm_obj->read_fence, msm_obj->write_fence, |
489 | obj->name, obj->refcount.refcount.counter, | |
c8afe684 RC |
490 | off, msm_obj->vaddr, obj->size); |
491 | } | |
492 | ||
493 | void msm_gem_describe_objects(struct list_head *list, struct seq_file *m) | |
494 | { | |
495 | struct msm_gem_object *msm_obj; | |
496 | int count = 0; | |
497 | size_t size = 0; | |
498 | ||
499 | list_for_each_entry(msm_obj, list, mm_list) { | |
500 | struct drm_gem_object *obj = &msm_obj->base; | |
501 | seq_printf(m, " "); | |
502 | msm_gem_describe(obj, m); | |
503 | count++; | |
504 | size += obj->size; | |
505 | } | |
506 | ||
507 | seq_printf(m, "Total %d objects, %zu bytes\n", count, size); | |
508 | } | |
509 | #endif | |
510 | ||
511 | void msm_gem_free_object(struct drm_gem_object *obj) | |
512 | { | |
513 | struct drm_device *dev = obj->dev; | |
871d812a | 514 | struct msm_drm_private *priv = obj->dev->dev_private; |
c8afe684 RC |
515 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
516 | int id; | |
517 | ||
518 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); | |
519 | ||
7198e6b0 RC |
520 | /* object should not be on active list: */ |
521 | WARN_ON(is_active(msm_obj)); | |
522 | ||
c8afe684 RC |
523 | list_del(&msm_obj->mm_list); |
524 | ||
525 | for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) { | |
871d812a RC |
526 | struct msm_mmu *mmu = priv->mmus[id]; |
527 | if (mmu && msm_obj->domain[id].iova) { | |
257d06f7 | 528 | uint32_t offset = msm_obj->domain[id].iova; |
871d812a | 529 | mmu->funcs->unmap(mmu, offset, msm_obj->sgt, obj->size); |
c8afe684 RC |
530 | } |
531 | } | |
532 | ||
05b84911 RC |
533 | if (obj->import_attach) { |
534 | if (msm_obj->vaddr) | |
535 | dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr); | |
536 | ||
537 | /* Don't drop the pages for imported dmabuf, as they are not | |
538 | * ours, just free the array we allocated: | |
539 | */ | |
540 | if (msm_obj->pages) | |
541 | drm_free_large(msm_obj->pages); | |
c8afe684 | 542 | |
05b84911 | 543 | } else { |
264f7d67 | 544 | vunmap(msm_obj->vaddr); |
05b84911 RC |
545 | put_pages(obj); |
546 | } | |
c8afe684 | 547 | |
7198e6b0 RC |
548 | if (msm_obj->resv == &msm_obj->_resv) |
549 | reservation_object_fini(msm_obj->resv); | |
550 | ||
c8afe684 RC |
551 | drm_gem_object_release(obj); |
552 | ||
553 | kfree(msm_obj); | |
554 | } | |
555 | ||
556 | /* convenience method to construct a GEM buffer object, and userspace handle */ | |
557 | int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file, | |
558 | uint32_t size, uint32_t flags, uint32_t *handle) | |
559 | { | |
560 | struct drm_gem_object *obj; | |
561 | int ret; | |
562 | ||
563 | ret = mutex_lock_interruptible(&dev->struct_mutex); | |
564 | if (ret) | |
565 | return ret; | |
566 | ||
567 | obj = msm_gem_new(dev, size, flags); | |
568 | ||
569 | mutex_unlock(&dev->struct_mutex); | |
570 | ||
571 | if (IS_ERR(obj)) | |
572 | return PTR_ERR(obj); | |
573 | ||
574 | ret = drm_gem_handle_create(file, obj, handle); | |
575 | ||
576 | /* drop reference from allocate - handle holds it now */ | |
577 | drm_gem_object_unreference_unlocked(obj); | |
578 | ||
579 | return ret; | |
580 | } | |
581 | ||
05b84911 RC |
582 | static int msm_gem_new_impl(struct drm_device *dev, |
583 | uint32_t size, uint32_t flags, | |
584 | struct drm_gem_object **obj) | |
c8afe684 RC |
585 | { |
586 | struct msm_drm_private *priv = dev->dev_private; | |
587 | struct msm_gem_object *msm_obj; | |
871d812a | 588 | unsigned sz; |
072f1f91 | 589 | bool use_vram = false; |
c8afe684 RC |
590 | |
591 | switch (flags & MSM_BO_CACHE_MASK) { | |
592 | case MSM_BO_UNCACHED: | |
593 | case MSM_BO_CACHED: | |
594 | case MSM_BO_WC: | |
595 | break; | |
596 | default: | |
597 | dev_err(dev->dev, "invalid cache flag: %x\n", | |
598 | (flags & MSM_BO_CACHE_MASK)); | |
05b84911 | 599 | return -EINVAL; |
c8afe684 RC |
600 | } |
601 | ||
871d812a | 602 | if (!iommu_present(&platform_bus_type)) |
072f1f91 RC |
603 | use_vram = true; |
604 | else if ((flags & MSM_BO_STOLEN) && priv->vram.size) | |
605 | use_vram = true; | |
606 | ||
607 | if (WARN_ON(use_vram && !priv->vram.size)) | |
608 | return -EINVAL; | |
609 | ||
610 | sz = sizeof(*msm_obj); | |
611 | if (use_vram) | |
871d812a RC |
612 | sz += sizeof(struct drm_mm_node); |
613 | ||
614 | msm_obj = kzalloc(sz, GFP_KERNEL); | |
05b84911 RC |
615 | if (!msm_obj) |
616 | return -ENOMEM; | |
c8afe684 | 617 | |
072f1f91 | 618 | if (use_vram) |
871d812a RC |
619 | msm_obj->vram_node = (void *)&msm_obj[1]; |
620 | ||
c8afe684 RC |
621 | msm_obj->flags = flags; |
622 | ||
7198e6b0 RC |
623 | msm_obj->resv = &msm_obj->_resv; |
624 | reservation_object_init(msm_obj->resv); | |
c8afe684 | 625 | |
7198e6b0 | 626 | INIT_LIST_HEAD(&msm_obj->submit_entry); |
c8afe684 RC |
627 | list_add_tail(&msm_obj->mm_list, &priv->inactive_list); |
628 | ||
05b84911 RC |
629 | *obj = &msm_obj->base; |
630 | ||
631 | return 0; | |
632 | } | |
633 | ||
634 | struct drm_gem_object *msm_gem_new(struct drm_device *dev, | |
635 | uint32_t size, uint32_t flags) | |
636 | { | |
871d812a | 637 | struct drm_gem_object *obj = NULL; |
05b84911 RC |
638 | int ret; |
639 | ||
640 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); | |
641 | ||
642 | size = PAGE_ALIGN(size); | |
643 | ||
644 | ret = msm_gem_new_impl(dev, size, flags, &obj); | |
645 | if (ret) | |
646 | goto fail; | |
647 | ||
072f1f91 | 648 | if (use_pages(obj)) { |
871d812a RC |
649 | ret = drm_gem_object_init(dev, obj, size); |
650 | if (ret) | |
651 | goto fail; | |
652 | } else { | |
653 | drm_gem_private_object_init(dev, obj, size); | |
654 | } | |
05b84911 RC |
655 | |
656 | return obj; | |
657 | ||
658 | fail: | |
659 | if (obj) | |
9999f105 | 660 | drm_gem_object_unreference(obj); |
05b84911 RC |
661 | |
662 | return ERR_PTR(ret); | |
663 | } | |
664 | ||
665 | struct drm_gem_object *msm_gem_import(struct drm_device *dev, | |
666 | uint32_t size, struct sg_table *sgt) | |
667 | { | |
668 | struct msm_gem_object *msm_obj; | |
669 | struct drm_gem_object *obj; | |
670 | int ret, npages; | |
671 | ||
871d812a RC |
672 | /* if we don't have IOMMU, don't bother pretending we can import: */ |
673 | if (!iommu_present(&platform_bus_type)) { | |
674 | dev_err(dev->dev, "cannot import without IOMMU\n"); | |
675 | return ERR_PTR(-EINVAL); | |
676 | } | |
677 | ||
05b84911 RC |
678 | size = PAGE_ALIGN(size); |
679 | ||
680 | ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj); | |
681 | if (ret) | |
682 | goto fail; | |
683 | ||
684 | drm_gem_private_object_init(dev, obj, size); | |
685 | ||
686 | npages = size / PAGE_SIZE; | |
687 | ||
688 | msm_obj = to_msm_bo(obj); | |
689 | msm_obj->sgt = sgt; | |
690 | msm_obj->pages = drm_malloc_ab(npages, sizeof(struct page *)); | |
691 | if (!msm_obj->pages) { | |
692 | ret = -ENOMEM; | |
693 | goto fail; | |
694 | } | |
695 | ||
696 | ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages); | |
697 | if (ret) | |
698 | goto fail; | |
699 | ||
c8afe684 RC |
700 | return obj; |
701 | ||
702 | fail: | |
703 | if (obj) | |
704 | drm_gem_object_unreference_unlocked(obj); | |
705 | ||
706 | return ERR_PTR(ret); | |
707 | } |