]>
Commit | Line | Data |
---|---|---|
c8afe684 RC |
1 | /* |
2 | * Copyright (C) 2013 Red Hat | |
3 | * Author: Rob Clark <robdclark@gmail.com> | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify it | |
6 | * under the terms of the GNU General Public License version 2 as published by | |
7 | * the Free Software Foundation. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, but WITHOUT | |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
12 | * more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License along with | |
15 | * this program. If not, see <http://www.gnu.org/licenses/>. | |
16 | */ | |
17 | ||
18 | #include <linux/spinlock.h> | |
19 | #include <linux/shmem_fs.h> | |
05b84911 | 20 | #include <linux/dma-buf.h> |
01c8f1c4 | 21 | #include <linux/pfn_t.h> |
c8afe684 RC |
22 | |
23 | #include "msm_drv.h" | |
fde5de6c | 24 | #include "msm_fence.h" |
c8afe684 | 25 | #include "msm_gem.h" |
7198e6b0 | 26 | #include "msm_gpu.h" |
871d812a | 27 | #include "msm_mmu.h" |
c8afe684 | 28 | |
0e08270a SS |
29 | static void msm_gem_vunmap_locked(struct drm_gem_object *obj); |
30 | ||
31 | ||
871d812a RC |
32 | static dma_addr_t physaddr(struct drm_gem_object *obj) |
33 | { | |
34 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | |
35 | struct msm_drm_private *priv = obj->dev->dev_private; | |
36 | return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) + | |
37 | priv->vram.paddr; | |
38 | } | |
39 | ||
072f1f91 RC |
40 | static bool use_pages(struct drm_gem_object *obj) |
41 | { | |
42 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | |
43 | return !msm_obj->vram_node; | |
44 | } | |
45 | ||
871d812a | 46 | /* allocate pages from VRAM carveout, used when no IOMMU: */ |
0e08270a | 47 | static struct page **get_pages_vram(struct drm_gem_object *obj, int npages) |
871d812a RC |
48 | { |
49 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | |
50 | struct msm_drm_private *priv = obj->dev->dev_private; | |
51 | dma_addr_t paddr; | |
52 | struct page **p; | |
53 | int ret, i; | |
54 | ||
2098105e | 55 | p = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); |
871d812a RC |
56 | if (!p) |
57 | return ERR_PTR(-ENOMEM); | |
58 | ||
0e08270a | 59 | spin_lock(&priv->vram.lock); |
4e64e553 | 60 | ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages); |
0e08270a | 61 | spin_unlock(&priv->vram.lock); |
871d812a | 62 | if (ret) { |
2098105e | 63 | kvfree(p); |
871d812a RC |
64 | return ERR_PTR(ret); |
65 | } | |
66 | ||
67 | paddr = physaddr(obj); | |
68 | for (i = 0; i < npages; i++) { | |
69 | p[i] = phys_to_page(paddr); | |
70 | paddr += PAGE_SIZE; | |
71 | } | |
72 | ||
73 | return p; | |
74 | } | |
c8afe684 | 75 | |
c8afe684 RC |
76 | static struct page **get_pages(struct drm_gem_object *obj) |
77 | { | |
78 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | |
79 | ||
80 | if (!msm_obj->pages) { | |
81 | struct drm_device *dev = obj->dev; | |
871d812a | 82 | struct page **p; |
c8afe684 RC |
83 | int npages = obj->size >> PAGE_SHIFT; |
84 | ||
072f1f91 | 85 | if (use_pages(obj)) |
0cdbe8ac | 86 | p = drm_gem_get_pages(obj); |
871d812a RC |
87 | else |
88 | p = get_pages_vram(obj, npages); | |
89 | ||
c8afe684 | 90 | if (IS_ERR(p)) { |
6a41da17 | 91 | DRM_DEV_ERROR(dev->dev, "could not get pages: %ld\n", |
c8afe684 RC |
92 | PTR_ERR(p)); |
93 | return p; | |
94 | } | |
95 | ||
62e3a3e3 PK |
96 | msm_obj->pages = p; |
97 | ||
c8afe684 | 98 | msm_obj->sgt = drm_prime_pages_to_sg(p, npages); |
1f70e079 | 99 | if (IS_ERR(msm_obj->sgt)) { |
62e3a3e3 PK |
100 | void *ptr = ERR_CAST(msm_obj->sgt); |
101 | ||
6a41da17 | 102 | DRM_DEV_ERROR(dev->dev, "failed to allocate sgt\n"); |
62e3a3e3 PK |
103 | msm_obj->sgt = NULL; |
104 | return ptr; | |
c8afe684 RC |
105 | } |
106 | ||
c8afe684 RC |
107 | /* For non-cached buffers, ensure the new pages are clean |
108 | * because display controller, GPU, etc. are not coherent: | |
109 | */ | |
110 | if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) | |
111 | dma_map_sg(dev->dev, msm_obj->sgt->sgl, | |
112 | msm_obj->sgt->nents, DMA_BIDIRECTIONAL); | |
113 | } | |
114 | ||
115 | return msm_obj->pages; | |
116 | } | |
117 | ||
0e08270a SS |
118 | static void put_pages_vram(struct drm_gem_object *obj) |
119 | { | |
120 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | |
121 | struct msm_drm_private *priv = obj->dev->dev_private; | |
122 | ||
123 | spin_lock(&priv->vram.lock); | |
124 | drm_mm_remove_node(msm_obj->vram_node); | |
125 | spin_unlock(&priv->vram.lock); | |
126 | ||
127 | kvfree(msm_obj->pages); | |
128 | } | |
129 | ||
c8afe684 RC |
130 | static void put_pages(struct drm_gem_object *obj) |
131 | { | |
132 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | |
133 | ||
134 | if (msm_obj->pages) { | |
3976626e BH |
135 | if (msm_obj->sgt) { |
136 | /* For non-cached buffers, ensure the new | |
137 | * pages are clean because display controller, | |
138 | * GPU, etc. are not coherent: | |
139 | */ | |
140 | if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) | |
141 | dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl, | |
142 | msm_obj->sgt->nents, | |
143 | DMA_BIDIRECTIONAL); | |
62e3a3e3 | 144 | |
62e3a3e3 | 145 | sg_free_table(msm_obj->sgt); |
3976626e BH |
146 | kfree(msm_obj->sgt); |
147 | } | |
c8afe684 | 148 | |
072f1f91 | 149 | if (use_pages(obj)) |
871d812a | 150 | drm_gem_put_pages(obj, msm_obj->pages, true, false); |
0e08270a SS |
151 | else |
152 | put_pages_vram(obj); | |
871d812a | 153 | |
c8afe684 RC |
154 | msm_obj->pages = NULL; |
155 | } | |
156 | } | |
157 | ||
05b84911 RC |
158 | struct page **msm_gem_get_pages(struct drm_gem_object *obj) |
159 | { | |
0e08270a | 160 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
05b84911 | 161 | struct page **p; |
0e08270a SS |
162 | |
163 | mutex_lock(&msm_obj->lock); | |
164 | ||
165 | if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) { | |
166 | mutex_unlock(&msm_obj->lock); | |
167 | return ERR_PTR(-EBUSY); | |
168 | } | |
169 | ||
05b84911 | 170 | p = get_pages(obj); |
0e08270a | 171 | mutex_unlock(&msm_obj->lock); |
05b84911 RC |
172 | return p; |
173 | } | |
174 | ||
175 | void msm_gem_put_pages(struct drm_gem_object *obj) | |
176 | { | |
177 | /* when we start tracking the pin count, then do something here */ | |
178 | } | |
179 | ||
c8afe684 RC |
180 | int msm_gem_mmap_obj(struct drm_gem_object *obj, |
181 | struct vm_area_struct *vma) | |
182 | { | |
183 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | |
184 | ||
185 | vma->vm_flags &= ~VM_PFNMAP; | |
186 | vma->vm_flags |= VM_MIXEDMAP; | |
187 | ||
188 | if (msm_obj->flags & MSM_BO_WC) { | |
189 | vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); | |
190 | } else if (msm_obj->flags & MSM_BO_UNCACHED) { | |
191 | vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags)); | |
192 | } else { | |
193 | /* | |
194 | * Shunt off cached objs to shmem file so they have their own | |
195 | * address_space (so unmap_mapping_range does what we want, | |
196 | * in particular in the case of mmap'd dmabufs) | |
197 | */ | |
198 | fput(vma->vm_file); | |
199 | get_file(obj->filp); | |
200 | vma->vm_pgoff = 0; | |
201 | vma->vm_file = obj->filp; | |
202 | ||
203 | vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); | |
204 | } | |
205 | ||
206 | return 0; | |
207 | } | |
208 | ||
209 | int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma) | |
210 | { | |
211 | int ret; | |
212 | ||
213 | ret = drm_gem_mmap(filp, vma); | |
214 | if (ret) { | |
215 | DBG("mmap failed: %d", ret); | |
216 | return ret; | |
217 | } | |
218 | ||
219 | return msm_gem_mmap_obj(vma->vm_private_data, vma); | |
220 | } | |
221 | ||
a5f74ec7 | 222 | vm_fault_t msm_gem_fault(struct vm_fault *vmf) |
c8afe684 | 223 | { |
11bac800 | 224 | struct vm_area_struct *vma = vmf->vma; |
c8afe684 | 225 | struct drm_gem_object *obj = vma->vm_private_data; |
0e08270a | 226 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
c8afe684 RC |
227 | struct page **pages; |
228 | unsigned long pfn; | |
229 | pgoff_t pgoff; | |
a5f74ec7 SJ |
230 | int err; |
231 | vm_fault_t ret; | |
c8afe684 | 232 | |
0e08270a SS |
233 | /* |
234 | * vm_ops.open/drm_gem_mmap_obj and close get and put | |
235 | * a reference on obj. So, we dont need to hold one here. | |
c8afe684 | 236 | */ |
a5f74ec7 SJ |
237 | err = mutex_lock_interruptible(&msm_obj->lock); |
238 | if (err) { | |
239 | ret = VM_FAULT_NOPAGE; | |
c8afe684 | 240 | goto out; |
a5f74ec7 | 241 | } |
c8afe684 | 242 | |
0e08270a SS |
243 | if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) { |
244 | mutex_unlock(&msm_obj->lock); | |
245 | return VM_FAULT_SIGBUS; | |
246 | } | |
247 | ||
c8afe684 RC |
248 | /* make sure we have pages attached now */ |
249 | pages = get_pages(obj); | |
250 | if (IS_ERR(pages)) { | |
a5f74ec7 | 251 | ret = vmf_error(PTR_ERR(pages)); |
c8afe684 RC |
252 | goto out_unlock; |
253 | } | |
254 | ||
255 | /* We don't use vmf->pgoff since that has the fake offset: */ | |
1a29d85e | 256 | pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT; |
c8afe684 | 257 | |
871d812a | 258 | pfn = page_to_pfn(pages[pgoff]); |
c8afe684 | 259 | |
1a29d85e | 260 | VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address, |
c8afe684 RC |
261 | pfn, pfn << PAGE_SHIFT); |
262 | ||
a5f74ec7 | 263 | ret = vmf_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV)); |
c8afe684 | 264 | out_unlock: |
0e08270a | 265 | mutex_unlock(&msm_obj->lock); |
c8afe684 | 266 | out: |
a5f74ec7 | 267 | return ret; |
c8afe684 RC |
268 | } |
269 | ||
270 | /** get mmap offset */ | |
271 | static uint64_t mmap_offset(struct drm_gem_object *obj) | |
272 | { | |
273 | struct drm_device *dev = obj->dev; | |
0e08270a | 274 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
c8afe684 RC |
275 | int ret; |
276 | ||
0e08270a | 277 | WARN_ON(!mutex_is_locked(&msm_obj->lock)); |
c8afe684 RC |
278 | |
279 | /* Make it mmapable */ | |
280 | ret = drm_gem_create_mmap_offset(obj); | |
281 | ||
282 | if (ret) { | |
6a41da17 | 283 | DRM_DEV_ERROR(dev->dev, "could not allocate mmap offset\n"); |
c8afe684 RC |
284 | return 0; |
285 | } | |
286 | ||
287 | return drm_vma_node_offset_addr(&obj->vma_node); | |
288 | } | |
289 | ||
290 | uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj) | |
291 | { | |
292 | uint64_t offset; | |
0e08270a SS |
293 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
294 | ||
295 | mutex_lock(&msm_obj->lock); | |
c8afe684 | 296 | offset = mmap_offset(obj); |
0e08270a | 297 | mutex_unlock(&msm_obj->lock); |
c8afe684 RC |
298 | return offset; |
299 | } | |
300 | ||
4b85f7f5 RC |
301 | static struct msm_gem_vma *add_vma(struct drm_gem_object *obj, |
302 | struct msm_gem_address_space *aspace) | |
303 | { | |
304 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | |
305 | struct msm_gem_vma *vma; | |
306 | ||
0e08270a SS |
307 | WARN_ON(!mutex_is_locked(&msm_obj->lock)); |
308 | ||
4b85f7f5 RC |
309 | vma = kzalloc(sizeof(*vma), GFP_KERNEL); |
310 | if (!vma) | |
311 | return ERR_PTR(-ENOMEM); | |
312 | ||
313 | vma->aspace = aspace; | |
314 | ||
315 | list_add_tail(&vma->list, &msm_obj->vmas); | |
316 | ||
317 | return vma; | |
318 | } | |
319 | ||
320 | static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj, | |
321 | struct msm_gem_address_space *aspace) | |
322 | { | |
323 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | |
324 | struct msm_gem_vma *vma; | |
325 | ||
0e08270a | 326 | WARN_ON(!mutex_is_locked(&msm_obj->lock)); |
4b85f7f5 RC |
327 | |
328 | list_for_each_entry(vma, &msm_obj->vmas, list) { | |
329 | if (vma->aspace == aspace) | |
330 | return vma; | |
331 | } | |
332 | ||
333 | return NULL; | |
334 | } | |
335 | ||
336 | static void del_vma(struct msm_gem_vma *vma) | |
337 | { | |
338 | if (!vma) | |
339 | return; | |
340 | ||
341 | list_del(&vma->list); | |
342 | kfree(vma); | |
343 | } | |
344 | ||
0e08270a | 345 | /* Called with msm_obj->lock locked */ |
4fe5f65e RC |
346 | static void |
347 | put_iova(struct drm_gem_object *obj) | |
348 | { | |
4fe5f65e | 349 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
4b85f7f5 | 350 | struct msm_gem_vma *vma, *tmp; |
4fe5f65e | 351 | |
0e08270a | 352 | WARN_ON(!mutex_is_locked(&msm_obj->lock)); |
4fe5f65e | 353 | |
4b85f7f5 | 354 | list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) { |
70dc51b4 | 355 | msm_gem_unmap_vma(vma->aspace, vma); |
4b85f7f5 | 356 | del_vma(vma); |
4fe5f65e RC |
357 | } |
358 | } | |
359 | ||
c0ee9794 | 360 | static int msm_gem_get_iova_locked(struct drm_gem_object *obj, |
8bdcd949 | 361 | struct msm_gem_address_space *aspace, uint64_t *iova) |
c8afe684 RC |
362 | { |
363 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | |
4b85f7f5 | 364 | struct msm_gem_vma *vma; |
c8afe684 RC |
365 | int ret = 0; |
366 | ||
c0ee9794 | 367 | WARN_ON(!mutex_is_locked(&msm_obj->lock)); |
cb1e3818 | 368 | |
4b85f7f5 | 369 | vma = lookup_vma(obj, aspace); |
871d812a | 370 | |
4b85f7f5 | 371 | if (!vma) { |
4b85f7f5 | 372 | vma = add_vma(obj, aspace); |
c0ee9794 JC |
373 | if (IS_ERR(vma)) |
374 | return PTR_ERR(vma); | |
4b85f7f5 | 375 | |
c0ee9794 JC |
376 | ret = msm_gem_init_vma(aspace, vma, obj->size >> PAGE_SHIFT); |
377 | if (ret) { | |
378 | del_vma(vma); | |
379 | return ret; | |
4b85f7f5 | 380 | } |
c8afe684 RC |
381 | } |
382 | ||
4b85f7f5 RC |
383 | *iova = vma->iova; |
384 | return 0; | |
c0ee9794 JC |
385 | } |
386 | ||
387 | static int msm_gem_pin_iova(struct drm_gem_object *obj, | |
388 | struct msm_gem_address_space *aspace) | |
389 | { | |
390 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | |
391 | struct msm_gem_vma *vma; | |
392 | struct page **pages; | |
393 | ||
394 | WARN_ON(!mutex_is_locked(&msm_obj->lock)); | |
395 | ||
396 | if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) | |
397 | return -EBUSY; | |
398 | ||
399 | vma = lookup_vma(obj, aspace); | |
400 | if (WARN_ON(!vma)) | |
401 | return -EINVAL; | |
402 | ||
403 | pages = get_pages(obj); | |
404 | if (IS_ERR(pages)) | |
405 | return PTR_ERR(pages); | |
406 | ||
407 | return msm_gem_map_vma(aspace, vma, msm_obj->sgt, | |
408 | obj->size >> PAGE_SHIFT); | |
409 | } | |
410 | ||
411 | ||
412 | /* get iova, taking a reference. Should have a matching put */ | |
413 | int msm_gem_get_iova(struct drm_gem_object *obj, | |
414 | struct msm_gem_address_space *aspace, uint64_t *iova) | |
415 | { | |
416 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | |
417 | u64 local; | |
418 | int ret; | |
419 | ||
420 | mutex_lock(&msm_obj->lock); | |
421 | ||
422 | ret = msm_gem_get_iova_locked(obj, aspace, &local); | |
423 | ||
424 | if (!ret) | |
425 | ret = msm_gem_pin_iova(obj, aspace); | |
426 | ||
427 | if (!ret) | |
428 | *iova = local; | |
4b85f7f5 | 429 | |
0e08270a | 430 | mutex_unlock(&msm_obj->lock); |
c8afe684 RC |
431 | return ret; |
432 | } | |
433 | ||
2638d90a RC |
434 | /* get iova without taking a reference, used in places where you have |
435 | * already done a 'msm_gem_get_iova()'. | |
436 | */ | |
8bdcd949 RC |
437 | uint64_t msm_gem_iova(struct drm_gem_object *obj, |
438 | struct msm_gem_address_space *aspace) | |
2638d90a | 439 | { |
0e08270a | 440 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
4b85f7f5 RC |
441 | struct msm_gem_vma *vma; |
442 | ||
0e08270a | 443 | mutex_lock(&msm_obj->lock); |
4b85f7f5 | 444 | vma = lookup_vma(obj, aspace); |
0e08270a | 445 | mutex_unlock(&msm_obj->lock); |
4b85f7f5 RC |
446 | WARN_ON(!vma); |
447 | ||
448 | return vma ? vma->iova : 0; | |
2638d90a RC |
449 | } |
450 | ||
8bdcd949 RC |
451 | void msm_gem_put_iova(struct drm_gem_object *obj, |
452 | struct msm_gem_address_space *aspace) | |
c8afe684 RC |
453 | { |
454 | // XXX TODO .. | |
455 | // NOTE: probably don't need a _locked() version.. we wouldn't | |
456 | // normally unmap here, but instead just mark that it could be | |
457 | // unmapped (if the iova refcnt drops to zero), but then later | |
458 | // if another _get_iova_locked() fails we can start unmapping | |
459 | // things that are no longer needed.. | |
460 | } | |
461 | ||
462 | int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev, | |
463 | struct drm_mode_create_dumb *args) | |
464 | { | |
465 | args->pitch = align_pitch(args->width, args->bpp); | |
466 | args->size = PAGE_ALIGN(args->pitch * args->height); | |
467 | return msm_gem_new_handle(dev, file, args->size, | |
468 | MSM_BO_SCANOUT | MSM_BO_WC, &args->handle); | |
469 | } | |
470 | ||
c8afe684 RC |
471 | int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, |
472 | uint32_t handle, uint64_t *offset) | |
473 | { | |
474 | struct drm_gem_object *obj; | |
475 | int ret = 0; | |
476 | ||
477 | /* GEM does all our handle to object mapping */ | |
a8ad0bd8 | 478 | obj = drm_gem_object_lookup(file, handle); |
c8afe684 RC |
479 | if (obj == NULL) { |
480 | ret = -ENOENT; | |
481 | goto fail; | |
482 | } | |
483 | ||
484 | *offset = msm_gem_mmap_offset(obj); | |
485 | ||
dc9a9b32 | 486 | drm_gem_object_put_unlocked(obj); |
c8afe684 RC |
487 | |
488 | fail: | |
489 | return ret; | |
490 | } | |
491 | ||
fad33f4b | 492 | static void *get_vaddr(struct drm_gem_object *obj, unsigned madv) |
c8afe684 RC |
493 | { |
494 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | |
0e08270a SS |
495 | int ret = 0; |
496 | ||
497 | mutex_lock(&msm_obj->lock); | |
498 | ||
fad33f4b | 499 | if (WARN_ON(msm_obj->madv > madv)) { |
6a41da17 | 500 | DRM_DEV_ERROR(obj->dev->dev, "Invalid madv state: %u vs %u\n", |
fad33f4b | 501 | msm_obj->madv, madv); |
0e08270a SS |
502 | mutex_unlock(&msm_obj->lock); |
503 | return ERR_PTR(-EBUSY); | |
504 | } | |
505 | ||
506 | /* increment vmap_count *before* vmap() call, so shrinker can | |
507 | * check vmap_count (is_vunmapable()) outside of msm_obj->lock. | |
508 | * This guarantees that we won't try to msm_gem_vunmap() this | |
509 | * same object from within the vmap() call (while we already | |
510 | * hold msm_obj->lock) | |
511 | */ | |
512 | msm_obj->vmap_count++; | |
513 | ||
c8afe684 RC |
514 | if (!msm_obj->vaddr) { |
515 | struct page **pages = get_pages(obj); | |
0e08270a SS |
516 | if (IS_ERR(pages)) { |
517 | ret = PTR_ERR(pages); | |
518 | goto fail; | |
519 | } | |
c8afe684 RC |
520 | msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT, |
521 | VM_MAP, pgprot_writecombine(PAGE_KERNEL)); | |
0e08270a SS |
522 | if (msm_obj->vaddr == NULL) { |
523 | ret = -ENOMEM; | |
524 | goto fail; | |
525 | } | |
c8afe684 | 526 | } |
0e08270a SS |
527 | |
528 | mutex_unlock(&msm_obj->lock); | |
c8afe684 | 529 | return msm_obj->vaddr; |
c8afe684 | 530 | |
0e08270a SS |
531 | fail: |
532 | msm_obj->vmap_count--; | |
533 | mutex_unlock(&msm_obj->lock); | |
534 | return ERR_PTR(ret); | |
c8afe684 RC |
535 | } |
536 | ||
fad33f4b RC |
537 | void *msm_gem_get_vaddr(struct drm_gem_object *obj) |
538 | { | |
539 | return get_vaddr(obj, MSM_MADV_WILLNEED); | |
540 | } | |
541 | ||
542 | /* | |
543 | * Don't use this! It is for the very special case of dumping | |
544 | * submits from GPU hangs or faults, were the bo may already | |
545 | * be MSM_MADV_DONTNEED, but we know the buffer is still on the | |
546 | * active list. | |
547 | */ | |
548 | void *msm_gem_get_vaddr_active(struct drm_gem_object *obj) | |
549 | { | |
550 | return get_vaddr(obj, __MSM_MADV_PURGED); | |
551 | } | |
552 | ||
0e08270a | 553 | void msm_gem_put_vaddr(struct drm_gem_object *obj) |
18f23049 | 554 | { |
e1e9db2c | 555 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
0e08270a SS |
556 | |
557 | mutex_lock(&msm_obj->lock); | |
e1e9db2c RC |
558 | WARN_ON(msm_obj->vmap_count < 1); |
559 | msm_obj->vmap_count--; | |
0e08270a | 560 | mutex_unlock(&msm_obj->lock); |
18f23049 RC |
561 | } |
562 | ||
4cd33c48 RC |
563 | /* Update madvise status, returns true if not purged, else |
564 | * false or -errno. | |
565 | */ | |
566 | int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv) | |
567 | { | |
568 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | |
569 | ||
0e08270a SS |
570 | mutex_lock(&msm_obj->lock); |
571 | ||
4cd33c48 RC |
572 | WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); |
573 | ||
574 | if (msm_obj->madv != __MSM_MADV_PURGED) | |
575 | msm_obj->madv = madv; | |
576 | ||
0e08270a SS |
577 | madv = msm_obj->madv; |
578 | ||
579 | mutex_unlock(&msm_obj->lock); | |
580 | ||
581 | return (madv != __MSM_MADV_PURGED); | |
4cd33c48 RC |
582 | } |
583 | ||
0e08270a | 584 | void msm_gem_purge(struct drm_gem_object *obj, enum msm_gem_lock subclass) |
68209390 RC |
585 | { |
586 | struct drm_device *dev = obj->dev; | |
587 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | |
588 | ||
589 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); | |
590 | WARN_ON(!is_purgeable(msm_obj)); | |
591 | WARN_ON(obj->import_attach); | |
592 | ||
0e08270a SS |
593 | mutex_lock_nested(&msm_obj->lock, subclass); |
594 | ||
68209390 RC |
595 | put_iova(obj); |
596 | ||
0e08270a | 597 | msm_gem_vunmap_locked(obj); |
68209390 RC |
598 | |
599 | put_pages(obj); | |
600 | ||
601 | msm_obj->madv = __MSM_MADV_PURGED; | |
602 | ||
603 | drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping); | |
604 | drm_gem_free_mmap_offset(obj); | |
605 | ||
606 | /* Our goal here is to return as much of the memory as | |
607 | * is possible back to the system as we are called from OOM. | |
608 | * To do this we must instruct the shmfs to drop all of its | |
609 | * backing pages, *now*. | |
610 | */ | |
611 | shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1); | |
612 | ||
613 | invalidate_mapping_pages(file_inode(obj->filp)->i_mapping, | |
614 | 0, (loff_t)-1); | |
0e08270a SS |
615 | |
616 | mutex_unlock(&msm_obj->lock); | |
68209390 RC |
617 | } |
618 | ||
0e08270a | 619 | static void msm_gem_vunmap_locked(struct drm_gem_object *obj) |
e1e9db2c RC |
620 | { |
621 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | |
622 | ||
0e08270a SS |
623 | WARN_ON(!mutex_is_locked(&msm_obj->lock)); |
624 | ||
e1e9db2c RC |
625 | if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj))) |
626 | return; | |
627 | ||
628 | vunmap(msm_obj->vaddr); | |
629 | msm_obj->vaddr = NULL; | |
630 | } | |
631 | ||
0e08270a SS |
632 | void msm_gem_vunmap(struct drm_gem_object *obj, enum msm_gem_lock subclass) |
633 | { | |
634 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | |
635 | ||
636 | mutex_lock_nested(&msm_obj->lock, subclass); | |
637 | msm_gem_vunmap_locked(obj); | |
638 | mutex_unlock(&msm_obj->lock); | |
639 | } | |
640 | ||
b6295f9a RC |
641 | /* must be called before _move_to_active().. */ |
642 | int msm_gem_sync_object(struct drm_gem_object *obj, | |
643 | struct msm_fence_context *fctx, bool exclusive) | |
644 | { | |
645 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | |
646 | struct reservation_object_list *fobj; | |
f54d1867 | 647 | struct dma_fence *fence; |
b6295f9a RC |
648 | int i, ret; |
649 | ||
b6295f9a RC |
650 | fobj = reservation_object_get_list(msm_obj->resv); |
651 | if (!fobj || (fobj->shared_count == 0)) { | |
652 | fence = reservation_object_get_excl(msm_obj->resv); | |
653 | /* don't need to wait on our own fences, since ring is fifo */ | |
654 | if (fence && (fence->context != fctx->context)) { | |
f54d1867 | 655 | ret = dma_fence_wait(fence, true); |
b6295f9a RC |
656 | if (ret) |
657 | return ret; | |
658 | } | |
659 | } | |
660 | ||
661 | if (!exclusive || !fobj) | |
662 | return 0; | |
663 | ||
664 | for (i = 0; i < fobj->shared_count; i++) { | |
665 | fence = rcu_dereference_protected(fobj->shared[i], | |
666 | reservation_object_held(msm_obj->resv)); | |
667 | if (fence->context != fctx->context) { | |
f54d1867 | 668 | ret = dma_fence_wait(fence, true); |
b6295f9a RC |
669 | if (ret) |
670 | return ret; | |
671 | } | |
672 | } | |
673 | ||
674 | return 0; | |
675 | } | |
676 | ||
7198e6b0 | 677 | void msm_gem_move_to_active(struct drm_gem_object *obj, |
f54d1867 | 678 | struct msm_gpu *gpu, bool exclusive, struct dma_fence *fence) |
7198e6b0 RC |
679 | { |
680 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | |
4cd33c48 | 681 | WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED); |
7198e6b0 | 682 | msm_obj->gpu = gpu; |
b6295f9a RC |
683 | if (exclusive) |
684 | reservation_object_add_excl_fence(msm_obj->resv, fence); | |
bf6811f3 | 685 | else |
b6295f9a | 686 | reservation_object_add_shared_fence(msm_obj->resv, fence); |
7198e6b0 RC |
687 | list_del_init(&msm_obj->mm_list); |
688 | list_add_tail(&msm_obj->mm_list, &gpu->active_list); | |
689 | } | |
690 | ||
691 | void msm_gem_move_to_inactive(struct drm_gem_object *obj) | |
692 | { | |
693 | struct drm_device *dev = obj->dev; | |
694 | struct msm_drm_private *priv = dev->dev_private; | |
695 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | |
696 | ||
697 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); | |
698 | ||
699 | msm_obj->gpu = NULL; | |
7198e6b0 RC |
700 | list_del_init(&msm_obj->mm_list); |
701 | list_add_tail(&msm_obj->mm_list, &priv->inactive_list); | |
7198e6b0 RC |
702 | } |
703 | ||
b6295f9a | 704 | int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout) |
7198e6b0 | 705 | { |
7198e6b0 | 706 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
b6295f9a | 707 | bool write = !!(op & MSM_PREP_WRITE); |
f755e227 CW |
708 | unsigned long remain = |
709 | op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout); | |
710 | long ret; | |
711 | ||
712 | ret = reservation_object_wait_timeout_rcu(msm_obj->resv, write, | |
713 | true, remain); | |
714 | if (ret == 0) | |
715 | return remain == 0 ? -EBUSY : -ETIMEDOUT; | |
716 | else if (ret < 0) | |
717 | return ret; | |
7198e6b0 RC |
718 | |
719 | /* TODO cache maintenance */ | |
c8afe684 | 720 | |
b6295f9a | 721 | return 0; |
7198e6b0 | 722 | } |
c8afe684 | 723 | |
7198e6b0 RC |
724 | int msm_gem_cpu_fini(struct drm_gem_object *obj) |
725 | { | |
726 | /* TODO cache maintenance */ | |
c8afe684 RC |
727 | return 0; |
728 | } | |
729 | ||
730 | #ifdef CONFIG_DEBUG_FS | |
f54d1867 | 731 | static void describe_fence(struct dma_fence *fence, const char *type, |
b6295f9a RC |
732 | struct seq_file *m) |
733 | { | |
f54d1867 | 734 | if (!dma_fence_is_signaled(fence)) |
b6295f9a RC |
735 | seq_printf(m, "\t%9s: %s %s seq %u\n", type, |
736 | fence->ops->get_driver_name(fence), | |
737 | fence->ops->get_timeline_name(fence), | |
738 | fence->seqno); | |
739 | } | |
740 | ||
c8afe684 RC |
741 | void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m) |
742 | { | |
c8afe684 | 743 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
b6295f9a RC |
744 | struct reservation_object *robj = msm_obj->resv; |
745 | struct reservation_object_list *fobj; | |
f54d1867 | 746 | struct dma_fence *fence; |
4b85f7f5 | 747 | struct msm_gem_vma *vma; |
c8afe684 | 748 | uint64_t off = drm_vma_node_start(&obj->vma_node); |
4cd33c48 | 749 | const char *madv; |
c8afe684 | 750 | |
0e08270a | 751 | mutex_lock(&msm_obj->lock); |
b6295f9a | 752 | |
4cd33c48 RC |
753 | switch (msm_obj->madv) { |
754 | case __MSM_MADV_PURGED: | |
755 | madv = " purged"; | |
756 | break; | |
757 | case MSM_MADV_DONTNEED: | |
758 | madv = " purgeable"; | |
759 | break; | |
760 | case MSM_MADV_WILLNEED: | |
761 | default: | |
762 | madv = ""; | |
763 | break; | |
764 | } | |
765 | ||
667ce33e | 766 | seq_printf(m, "%08x: %c %2d (%2d) %08llx %p\t", |
7198e6b0 | 767 | msm_obj->flags, is_active(msm_obj) ? 'A' : 'I', |
2c935bc5 | 768 | obj->name, kref_read(&obj->refcount), |
667ce33e RC |
769 | off, msm_obj->vaddr); |
770 | ||
4b85f7f5 RC |
771 | /* FIXME: we need to print the address space here too */ |
772 | list_for_each_entry(vma, &msm_obj->vmas, list) | |
773 | seq_printf(m, " %08llx", vma->iova); | |
667ce33e RC |
774 | |
775 | seq_printf(m, " %zu%s\n", obj->size, madv); | |
b6295f9a RC |
776 | |
777 | rcu_read_lock(); | |
778 | fobj = rcu_dereference(robj->fence); | |
779 | if (fobj) { | |
780 | unsigned int i, shared_count = fobj->shared_count; | |
781 | ||
782 | for (i = 0; i < shared_count; i++) { | |
783 | fence = rcu_dereference(fobj->shared[i]); | |
784 | describe_fence(fence, "Shared", m); | |
785 | } | |
786 | } | |
787 | ||
788 | fence = rcu_dereference(robj->fence_excl); | |
789 | if (fence) | |
790 | describe_fence(fence, "Exclusive", m); | |
791 | rcu_read_unlock(); | |
0e08270a SS |
792 | |
793 | mutex_unlock(&msm_obj->lock); | |
c8afe684 RC |
794 | } |
795 | ||
796 | void msm_gem_describe_objects(struct list_head *list, struct seq_file *m) | |
797 | { | |
798 | struct msm_gem_object *msm_obj; | |
799 | int count = 0; | |
800 | size_t size = 0; | |
801 | ||
802 | list_for_each_entry(msm_obj, list, mm_list) { | |
803 | struct drm_gem_object *obj = &msm_obj->base; | |
804 | seq_printf(m, " "); | |
805 | msm_gem_describe(obj, m); | |
806 | count++; | |
807 | size += obj->size; | |
808 | } | |
809 | ||
810 | seq_printf(m, "Total %d objects, %zu bytes\n", count, size); | |
811 | } | |
812 | #endif | |
813 | ||
d71b6bd8 | 814 | /* don't call directly! Use drm_gem_object_put() and friends */ |
c8afe684 RC |
815 | void msm_gem_free_object(struct drm_gem_object *obj) |
816 | { | |
817 | struct drm_device *dev = obj->dev; | |
818 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | |
c8afe684 RC |
819 | |
820 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); | |
821 | ||
7198e6b0 RC |
822 | /* object should not be on active list: */ |
823 | WARN_ON(is_active(msm_obj)); | |
824 | ||
c8afe684 RC |
825 | list_del(&msm_obj->mm_list); |
826 | ||
0e08270a SS |
827 | mutex_lock(&msm_obj->lock); |
828 | ||
4fe5f65e | 829 | put_iova(obj); |
c8afe684 | 830 | |
05b84911 RC |
831 | if (obj->import_attach) { |
832 | if (msm_obj->vaddr) | |
833 | dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr); | |
834 | ||
835 | /* Don't drop the pages for imported dmabuf, as they are not | |
836 | * ours, just free the array we allocated: | |
837 | */ | |
838 | if (msm_obj->pages) | |
2098105e | 839 | kvfree(msm_obj->pages); |
c8afe684 | 840 | |
f28730c8 | 841 | drm_prime_gem_destroy(obj, msm_obj->sgt); |
05b84911 | 842 | } else { |
0e08270a | 843 | msm_gem_vunmap_locked(obj); |
05b84911 RC |
844 | put_pages(obj); |
845 | } | |
c8afe684 | 846 | |
7198e6b0 RC |
847 | if (msm_obj->resv == &msm_obj->_resv) |
848 | reservation_object_fini(msm_obj->resv); | |
849 | ||
c8afe684 RC |
850 | drm_gem_object_release(obj); |
851 | ||
0e08270a | 852 | mutex_unlock(&msm_obj->lock); |
c8afe684 RC |
853 | kfree(msm_obj); |
854 | } | |
855 | ||
856 | /* convenience method to construct a GEM buffer object, and userspace handle */ | |
857 | int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file, | |
858 | uint32_t size, uint32_t flags, uint32_t *handle) | |
859 | { | |
860 | struct drm_gem_object *obj; | |
861 | int ret; | |
862 | ||
c8afe684 RC |
863 | obj = msm_gem_new(dev, size, flags); |
864 | ||
c8afe684 RC |
865 | if (IS_ERR(obj)) |
866 | return PTR_ERR(obj); | |
867 | ||
868 | ret = drm_gem_handle_create(file, obj, handle); | |
869 | ||
870 | /* drop reference from allocate - handle holds it now */ | |
dc9a9b32 | 871 | drm_gem_object_put_unlocked(obj); |
c8afe684 RC |
872 | |
873 | return ret; | |
874 | } | |
875 | ||
05b84911 RC |
876 | static int msm_gem_new_impl(struct drm_device *dev, |
877 | uint32_t size, uint32_t flags, | |
79f0e202 | 878 | struct reservation_object *resv, |
0e08270a SS |
879 | struct drm_gem_object **obj, |
880 | bool struct_mutex_locked) | |
c8afe684 RC |
881 | { |
882 | struct msm_drm_private *priv = dev->dev_private; | |
883 | struct msm_gem_object *msm_obj; | |
c8afe684 RC |
884 | |
885 | switch (flags & MSM_BO_CACHE_MASK) { | |
886 | case MSM_BO_UNCACHED: | |
887 | case MSM_BO_CACHED: | |
888 | case MSM_BO_WC: | |
889 | break; | |
890 | default: | |
6a41da17 | 891 | DRM_DEV_ERROR(dev->dev, "invalid cache flag: %x\n", |
c8afe684 | 892 | (flags & MSM_BO_CACHE_MASK)); |
05b84911 | 893 | return -EINVAL; |
c8afe684 RC |
894 | } |
895 | ||
667ce33e | 896 | msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL); |
05b84911 RC |
897 | if (!msm_obj) |
898 | return -ENOMEM; | |
c8afe684 | 899 | |
0e08270a SS |
900 | mutex_init(&msm_obj->lock); |
901 | ||
c8afe684 | 902 | msm_obj->flags = flags; |
4cd33c48 | 903 | msm_obj->madv = MSM_MADV_WILLNEED; |
c8afe684 | 904 | |
79f0e202 RC |
905 | if (resv) { |
906 | msm_obj->resv = resv; | |
907 | } else { | |
908 | msm_obj->resv = &msm_obj->_resv; | |
909 | reservation_object_init(msm_obj->resv); | |
910 | } | |
c8afe684 | 911 | |
7198e6b0 | 912 | INIT_LIST_HEAD(&msm_obj->submit_entry); |
4b85f7f5 RC |
913 | INIT_LIST_HEAD(&msm_obj->vmas); |
914 | ||
0e08270a SS |
915 | if (struct_mutex_locked) { |
916 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); | |
917 | list_add_tail(&msm_obj->mm_list, &priv->inactive_list); | |
918 | } else { | |
919 | mutex_lock(&dev->struct_mutex); | |
920 | list_add_tail(&msm_obj->mm_list, &priv->inactive_list); | |
921 | mutex_unlock(&dev->struct_mutex); | |
922 | } | |
c8afe684 | 923 | |
05b84911 RC |
924 | *obj = &msm_obj->base; |
925 | ||
926 | return 0; | |
927 | } | |
928 | ||
0e08270a SS |
929 | static struct drm_gem_object *_msm_gem_new(struct drm_device *dev, |
930 | uint32_t size, uint32_t flags, bool struct_mutex_locked) | |
05b84911 | 931 | { |
f4839bd5 | 932 | struct msm_drm_private *priv = dev->dev_private; |
871d812a | 933 | struct drm_gem_object *obj = NULL; |
f4839bd5 | 934 | bool use_vram = false; |
05b84911 RC |
935 | int ret; |
936 | ||
05b84911 RC |
937 | size = PAGE_ALIGN(size); |
938 | ||
f4839bd5 RC |
939 | if (!iommu_present(&platform_bus_type)) |
940 | use_vram = true; | |
941 | else if ((flags & MSM_BO_STOLEN) && priv->vram.size) | |
942 | use_vram = true; | |
943 | ||
944 | if (WARN_ON(use_vram && !priv->vram.size)) | |
945 | return ERR_PTR(-EINVAL); | |
946 | ||
1a5dff5d JC |
947 | /* Disallow zero sized objects as they make the underlying |
948 | * infrastructure grumpy | |
949 | */ | |
950 | if (size == 0) | |
951 | return ERR_PTR(-EINVAL); | |
952 | ||
0e08270a | 953 | ret = msm_gem_new_impl(dev, size, flags, NULL, &obj, struct_mutex_locked); |
05b84911 RC |
954 | if (ret) |
955 | goto fail; | |
956 | ||
f4839bd5 | 957 | if (use_vram) { |
4b85f7f5 | 958 | struct msm_gem_vma *vma; |
f4839bd5 | 959 | struct page **pages; |
b3949a9a HV |
960 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
961 | ||
962 | mutex_lock(&msm_obj->lock); | |
f4839bd5 | 963 | |
4b85f7f5 | 964 | vma = add_vma(obj, NULL); |
b3949a9a | 965 | mutex_unlock(&msm_obj->lock); |
4b85f7f5 RC |
966 | if (IS_ERR(vma)) { |
967 | ret = PTR_ERR(vma); | |
968 | goto fail; | |
969 | } | |
970 | ||
971 | to_msm_bo(obj)->vram_node = &vma->node; | |
972 | ||
f4839bd5 RC |
973 | drm_gem_private_object_init(dev, obj, size); |
974 | ||
f4839bd5 RC |
975 | pages = get_pages(obj); |
976 | if (IS_ERR(pages)) { | |
977 | ret = PTR_ERR(pages); | |
978 | goto fail; | |
979 | } | |
4b85f7f5 RC |
980 | |
981 | vma->iova = physaddr(obj); | |
f4839bd5 | 982 | } else { |
871d812a RC |
983 | ret = drm_gem_object_init(dev, obj, size); |
984 | if (ret) | |
985 | goto fail; | |
871d812a | 986 | } |
05b84911 RC |
987 | |
988 | return obj; | |
989 | ||
990 | fail: | |
dc9a9b32 | 991 | drm_gem_object_put_unlocked(obj); |
05b84911 RC |
992 | return ERR_PTR(ret); |
993 | } | |
994 | ||
0e08270a SS |
995 | struct drm_gem_object *msm_gem_new_locked(struct drm_device *dev, |
996 | uint32_t size, uint32_t flags) | |
997 | { | |
998 | return _msm_gem_new(dev, size, flags, true); | |
999 | } | |
1000 | ||
1001 | struct drm_gem_object *msm_gem_new(struct drm_device *dev, | |
1002 | uint32_t size, uint32_t flags) | |
1003 | { | |
1004 | return _msm_gem_new(dev, size, flags, false); | |
1005 | } | |
1006 | ||
05b84911 | 1007 | struct drm_gem_object *msm_gem_import(struct drm_device *dev, |
79f0e202 | 1008 | struct dma_buf *dmabuf, struct sg_table *sgt) |
05b84911 RC |
1009 | { |
1010 | struct msm_gem_object *msm_obj; | |
1011 | struct drm_gem_object *obj; | |
79f0e202 | 1012 | uint32_t size; |
05b84911 RC |
1013 | int ret, npages; |
1014 | ||
871d812a RC |
1015 | /* if we don't have IOMMU, don't bother pretending we can import: */ |
1016 | if (!iommu_present(&platform_bus_type)) { | |
6a41da17 | 1017 | DRM_DEV_ERROR(dev->dev, "cannot import without IOMMU\n"); |
871d812a RC |
1018 | return ERR_PTR(-EINVAL); |
1019 | } | |
1020 | ||
79f0e202 | 1021 | size = PAGE_ALIGN(dmabuf->size); |
05b84911 | 1022 | |
0e08270a | 1023 | ret = msm_gem_new_impl(dev, size, MSM_BO_WC, dmabuf->resv, &obj, false); |
05b84911 RC |
1024 | if (ret) |
1025 | goto fail; | |
1026 | ||
1027 | drm_gem_private_object_init(dev, obj, size); | |
1028 | ||
1029 | npages = size / PAGE_SIZE; | |
1030 | ||
1031 | msm_obj = to_msm_bo(obj); | |
0e08270a | 1032 | mutex_lock(&msm_obj->lock); |
05b84911 | 1033 | msm_obj->sgt = sgt; |
2098105e | 1034 | msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); |
05b84911 | 1035 | if (!msm_obj->pages) { |
0e08270a | 1036 | mutex_unlock(&msm_obj->lock); |
05b84911 RC |
1037 | ret = -ENOMEM; |
1038 | goto fail; | |
1039 | } | |
1040 | ||
1041 | ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages); | |
0e08270a SS |
1042 | if (ret) { |
1043 | mutex_unlock(&msm_obj->lock); | |
05b84911 | 1044 | goto fail; |
0e08270a | 1045 | } |
05b84911 | 1046 | |
0e08270a | 1047 | mutex_unlock(&msm_obj->lock); |
c8afe684 RC |
1048 | return obj; |
1049 | ||
1050 | fail: | |
dc9a9b32 | 1051 | drm_gem_object_put_unlocked(obj); |
c8afe684 RC |
1052 | return ERR_PTR(ret); |
1053 | } | |
8223286d JC |
1054 | |
1055 | static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size, | |
1056 | uint32_t flags, struct msm_gem_address_space *aspace, | |
1057 | struct drm_gem_object **bo, uint64_t *iova, bool locked) | |
1058 | { | |
1059 | void *vaddr; | |
1060 | struct drm_gem_object *obj = _msm_gem_new(dev, size, flags, locked); | |
1061 | int ret; | |
1062 | ||
1063 | if (IS_ERR(obj)) | |
1064 | return ERR_CAST(obj); | |
1065 | ||
1066 | if (iova) { | |
1067 | ret = msm_gem_get_iova(obj, aspace, iova); | |
93f7abf1 JC |
1068 | if (ret) |
1069 | goto err; | |
8223286d JC |
1070 | } |
1071 | ||
1072 | vaddr = msm_gem_get_vaddr(obj); | |
c9811d0f | 1073 | if (IS_ERR(vaddr)) { |
8223286d | 1074 | msm_gem_put_iova(obj, aspace); |
93f7abf1 JC |
1075 | ret = PTR_ERR(vaddr); |
1076 | goto err; | |
8223286d JC |
1077 | } |
1078 | ||
1079 | if (bo) | |
1080 | *bo = obj; | |
1081 | ||
1082 | return vaddr; | |
93f7abf1 JC |
1083 | err: |
1084 | if (locked) | |
1085 | drm_gem_object_put(obj); | |
1086 | else | |
1087 | drm_gem_object_put_unlocked(obj); | |
1088 | ||
1089 | return ERR_PTR(ret); | |
1090 | ||
8223286d JC |
1091 | } |
1092 | ||
1093 | void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size, | |
1094 | uint32_t flags, struct msm_gem_address_space *aspace, | |
1095 | struct drm_gem_object **bo, uint64_t *iova) | |
1096 | { | |
1097 | return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, false); | |
1098 | } | |
1099 | ||
1100 | void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size, | |
1101 | uint32_t flags, struct msm_gem_address_space *aspace, | |
1102 | struct drm_gem_object **bo, uint64_t *iova) | |
1103 | { | |
1104 | return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, true); | |
1105 | } | |
1e29dff0 JC |
1106 | |
1107 | void msm_gem_kernel_put(struct drm_gem_object *bo, | |
1108 | struct msm_gem_address_space *aspace, bool locked) | |
1109 | { | |
1110 | if (IS_ERR_OR_NULL(bo)) | |
1111 | return; | |
1112 | ||
1113 | msm_gem_put_vaddr(bo); | |
1114 | msm_gem_put_iova(bo, aspace); | |
1115 | ||
1116 | if (locked) | |
1117 | drm_gem_object_put(bo); | |
1118 | else | |
1119 | drm_gem_object_put_unlocked(bo); | |
1120 | } |